Content

Conjugated Gradient Method

We show a simple generic implementation of the conjugated gradient (cg) method. This implementation works for any symmetric matrix type that implement the matrix-vector product. In this example we use it for a sparse symmetric system matrix.

Conjugated Gradient (cg) Method

The implementation only requires that the underlying BLAS routines are available. So if you want to play around with different matrix types make sure that the corresponding matrix-vector product is implemented. Recall that FLENS is doing the mapping of overloaded operator notation to underlying BLAS call for you. For your own matrix type just throw-in an appropriate blas::mv function, that's it.

In another example we will reuse this generic implementation for a simple user-defined matrix type.

#ifndef FLENS_EXAMPLES_CG_H
#define FLENS_EXAMPLES_CG_H 1

#include <flens/flens.h>
#include <limits>

template <typename MA, typename VX, typename VB>
int
cg(const MA &A, const VB &b, VX &&x,
   double tol = std::numeric_limits<double>::epsilon(),
   int    maxIterations = std::numeric_limits<int>::max())
{
    typedef typename VB::ElementType  ElementType;
    typedef typename VB::IndexType    IndexType;
    typedef typename VB::NoView       VectorType;

    ElementType  alpha, beta, rNormSquare, rNormSquarePrev;
    VectorType   Ap, r, p;

    r = b - A*x;
    p = r;
    rNormSquare = r*r;
    for (int k=1; k<=maxIterations; ++k) {
        if (sqrt(rNormSquare)<=tol) {
            return k-1;
        }
        Ap = A*p;
        alpha = rNormSquare/(p * Ap);
        x += alpha*p;
        r -= alpha*Ap;

        rNormSquarePrev = rNormSquare;
        rNormSquare = r*r;
        beta = rNormSquare/rNormSquarePrev;
        p = beta*p + r;
    }
    return maxIterations;
}

#endif // FLENS_EXAMPLES_CG_H

Example Code

#include <cmath>
#include <iostream>
#include <flens/flens.cxx>
#include <flens/examples/cg.h>

using namespace flens;
using namespace std;

int
main()
{
    const int n = 5;

    SyCoordMatrix<CoordStorage<double> >    A(n, Upper);
    for (int i=1; i<=n; ++i) {
        A(i,i) += 2;
        if (i<n) {
            A(i,i+1) -= 1;
        }
    }

    SyCRSMatrix<CRS<double> >   B = A;

    DenseVector<Array<double> > x(n), z(n), b(n);
    z = 1;
    b = B*z;

    cg(B, b, x);
    cout << "x = " << x << endl;

    x -= z;
    cout << "max abs error = " << abs(x(blas::iamax(x))) << std::endl;
}

Some Notes

Let the diagonal of A have values of 2 and the off-diagonal -1.

    SyCoordMatrix<CoordStorage<double> >    A(n, Upper);
    for (int i=1; i<=n; ++i) {
        A(i,i) += 2;
        if (i<n) {
            A(i,i+1) -= 1;
        }
    }

Compress A and stor it in B

    SyCRSMatrix<CRS<double> >   B = A;

Vector z is the control solution.

    DenseVector<Array<double> > x(n), z(n), b(n);
    z = 1;
    b = B*z;

Solve \(Bx=b\)

    cg(B, b, x);
    cout << "x = " << x << endl;

Get the infinity norm of the error

    x -= z;
    cout << "max abs error = " << abs(x(blas::iamax(x))) << std::endl;
}

Compile

$shell> cd flens/examples                                                      
$shell> g++ -Wall -std=c++11 -I../.. -o sycrs-cg sycrs-cg.cc                   

Run

$shell> cd flens/examples                                                      
$shell> ./sycrs-cg                                                             
x = 
            1              1              1              1              1 
max abs error = 0