Restriction and prolongation operator

Content

We saw the smoothing property of a single Jacobi iteration in the last session. Application of a Jacobi iteration is denoted with the $$S_h$$ operator. For example, applying a Jacobi iteration on the grid function $$u_h^{(k)}$$:

$u_h^{(k+1)} = S_h u_h^{(k)}$

We will implement the last two ingredients for the multigrid solver:

• The restriction operator is used to restrict a grid function on $$\Omega_h$$ to $$\Omega_{2h}$$. We will formally denote it by $$R_h^{2h}$$. In the multigrid method the restricted the residual on $$\Omega_h$$ becomes the right-hand side on $$\Omega_{2h}$$:

$\begin{array}{lcl}r_h & = & f_h - A_h u_h \\f_{2h} & = & R_h^{2h} r_h \\\end{array}$
• The prolongation operator is used to update a grid function on $$\Omega_h$$ with a coarse grid function on $$\Omega_{2h}$$. We denote the update operation by $$P_{2h}^h. We will use it to update the approximate solution on$$\Omegah$$with an approximate solution on$$\Omega{2h}\$:

$\begin{array}{lcl}u_h & = & u_h + P_{2h}^h u_{2h} \\\end{array}$

Coarse grid correction step

We will use the so called coarse grid correction to test our ingredients. The coarse grid restriction is the simplest case of the multigrid method. It only uses the two grids $$\Omega_h$$ and $$\Omega_{2h}$$ and the algorithm reads as follows:

$\begin{array}{lcll} u_h & \leftarrow & u_h^0 & \text{ (Initial guess satisfying the boundary condition) }\\ u_h & \leftarrow & S_h u_h & \text{ (Smooth error) } \\ f_{2h} & \leftarrow & R_{h}^{2h} u_h^{(1)} & \text{ (Restrict residual) } \\ u_{2h} & \leftarrow & A_{2h}^{-1} f_{2h} & \text{ (Direct solver on coarse grid) } \\ u_{h} & \leftarrow & u_{h} + R_{2h}^h u_{2h} & \text{ (Update with prolonagtion of coarse solution) } \\ u_h & \leftarrow & S_h u_h & \text{ (Smooth error) } \\ \end{array}$

Notes on plotting data records with gnuplot

We will store different records with functions data in a single file. The format used is

"Title for record 0"
x1 y1
x2 y2
.  .
.  .
xn yn

"Title for record 1"
x1 y1
x2 y2
.  .
.  .
xn yn

"Title for record 2"
x1 y1
x2 y2
.  .
.  .
xn yn

...


You can plot a single record by specifying its index, e.g.:

gnuplot "filename.data" index 2 using 1:2 with linespoints lt 2 lw 2


plots the record with index 2.

Code foe the exercise

#include <cmath>
#include <memory>
#include <vector>

#include <hpc/matvec/axpy.hpp>
#include <hpc/matvec/copy.hpp>
#include <hpc/matvec/densevector.hpp>
#include <hpc/matvec/iterators.hpp>
#include <hpc/matvec/traits.hpp>
#include <hpc/matvec/print.hpp>

//------------------------------------------------------------------------------

using namespace hpc;
using namespace hpc::matvec;

template <typename T>
const T &
assertEqual(const T &a, const T &b)
{
assert(a==b);
return a;
}

//------------------------------------------------------------------------------

template <
typename F, typename G,
typename T, template<typename> class VectorF,
template<typename> class VectorU,
Require< Dense<VectorF<T>>,
Dense<VectorU<T>> > = true
>
void
discretizeProblem(const F &f, const G &g, VectorF<T> &f_h, VectorU<T> &u_h)
{
auto N  = assertEqual(f_h.length(), u_h.length()) - 2;
auto h  = T(1)/(N+1);

u_h(0)   = g(0);
u_h(N+1) = g(1);

for (auto [i, fi] : f_h) {
fi = f(i*h);
}
}

//------------------------------------------------------------------------------

template <
typename U,
typename T, template<typename> class VectorU,
template<typename> class VectorE,
Require< Dense<VectorU<T>>,
Dense<VectorE<T>> > = true
>
void
evalError(const U &u, const VectorU<T> &u_h, VectorE<T> &e_h)
{
auto N  = assertEqual(u_h.length(), e_h.length()) - 2;
auto h  = T(1)/(N+1);

for (auto [i, ei] : e_h) {
ei = u(i*h) - u_h(i);
}
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class VectorU,
template<typename> class VectorF,
template<typename> class VectorR,
Require< Dense<VectorF<T>>,
Dense<VectorU<T>>,
Dense<VectorR<T>> > = true
>
void
evalResidual(const VectorU<T> &u_h, const VectorF<T> &f_h, VectorR<T> &r_h)
{
auto N  = assertEqual(u_h.length(), r_h.length()) - 2;
auto h  = T(1)/(N+1);
auto h2 = h*h;

r_h(0) = r_h(N+1) = 0;
for (std::size_t i=1; i<=N; ++i) {
r_h(i) = f_h(i) - 1./h2 * (2*u_h(i) - u_h(i-1) - u_h(i+1));
}
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class VectorR1,
template<typename> class VectorR2,
Require< Dense<VectorR1<T>>,
Dense<VectorR2<T>> > = true
>
void
restriction(const VectorR1<T> &r_h, VectorR2<T> &r_2h)
{
auto N  = r_h.length() -2;
auto n  = r_2h.length() -2;

assert(2*(n+1) == N+1);

r_2h(0) = r_2h(n+1) = 0;
for (std::size_t i=1; i<=n; ++i) {
r_2h(i) = 0.25*r_h(2*i-1) + 0.5*r_h(2*i) + 0.25*r_h(2*i+1);
}
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class VectorU1,
template<typename> class VectorU2,
Require< Dense<VectorU1<T>>,
Dense<VectorU2<T>> > = true
>
void
updateProlongation(const VectorU1<T> &u_2h, VectorU2<T> &u_h)
{
auto N  = u_h.length() -2;
auto n  = u_2h.length() -2;

assert(2*(n+1) == N+1);

for (std::size_t i=1; i<=n; ++i) {
u_h(2*i-1) += 0.5 * u_2h(i);
u_h(2*i  ) += 1.0 * u_2h(i);
u_h(2*i+1) += 0.5 * u_2h(i);
}
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class Vector,
Require< Dense<Vector<T>> > = true
>
void
{
auto N  = x_h.length()-2;
auto h  = T(1)/(N+1);

for (std::size_t i=1; i<=N; ++i) {
x_h(i) += sin(M_PI*k*i*h);
}
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class Vector,
Require< Dense<Vector<T>> > = true
>
T
lInfNorm(const Vector<T> &x_h)
{
T res = 0;

for (auto [i, xi] : x_h) {
i=i;
if (std::abs(xi) > res) {
res = std::abs(xi);
}
}
return res;
}

template <
typename T, template<typename> class Vector1,
template<typename> class Vector2,
Require< Dense<Vector1<T>>,
Dense<Vector2<T>> > = true
>
T
lInfNorm(const Vector1<T> &x1_h, const Vector2<T> &x2_h)
{
auto N  = assertEqual(x1_h.length(), x2_h.length());

T res = 0;

for (std::size_t i=0; i<N; ++i) {
if (std::abs(x1_h(i) - x2_h(i)) > res) {
res = std::abs(x1_h(i) - x2_h(i));
}
}
return res;
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class Vector,
Require< Dense<Vector<T>> > = true
>
T
l1Norm(const Vector<T> &x_h)
{
auto N  = x_h.length()-2;
auto h  = T(1)/(N+1);

T res = 0;

for (std::size_t i=1; i<=N; ++i) {
i=i;
res += 0.5*(std::abs(x_h(i-1)) + std::abs(x_h(i)));
}
return h*res;
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class VectorF,
template<typename> class VectorU,
template<typename> class VectorS,
Require< Dense<VectorF<T>>,
Dense<VectorU<T>>,
Dense<VectorS<T>> > = true
>
void
jacobi_step(double omega, const VectorF<T> &f_h, const VectorU<T> &u_h,
VectorS<T> &s_h)
{
auto N  = assertEqual(f_h.length(), u_h.length()) - 2;
auto h  = T(1)/(N+1);
auto h2 = h*h;

for (std::size_t i=1; i<=N; ++i) {
s_h(i) = (1-omega)*u_h(i) + omega*0.5*(u_h(i-1) +u_h(i+1)+h2*f_h(i));
}
s_h(0)   = u_h(0);
s_h(N+1) = u_h(N+1);
}

//------------------------------------------------------------------------------

template <
typename T, template<typename> class VectorF,
template<typename> class VectorU,
Require< Dense<VectorF<T>>,
Dense<VectorU<T>> > = true
>
void
direct_solver(const VectorF<T> &f_h, VectorU<T> &u_h)
{
auto N  = assertEqual(f_h.length(), u_h.length()) - 2;
auto h  = T(1)/(N+1);
auto h2 = h*h;

// forward substitution
u_h(1) = h2*f_h(1) + u_h(0);
for (std::size_t k=2; k<N; ++k) {
u_h(k) = h2*f_h(k) + u_h(k-1)*(k-1)/k;
}
u_h(N) = h2*f_h(N) + u_h(N+1) + u_h(N-1)*(N-1)/N;

// backward substitution
u_h(N) = u_h(N)*N/(N+1);
for (std::size_t k=N-1; k>=1; --k) {
u_h(k) = (u_h(k)+u_h(k+1))*k/(k+1);
}
}

//------------------------------------------------------------------------------

template <typename T, template<typename> class Vector>
void
init_zero(Vector<T> &u_h)
{
for (auto [i, ui] : u_h) {
i = i;
ui = T(0);
}
}

//------------------------------------------------------------------------------

template <typename T, template<typename> class Vector>
void
print_record(const Vector<T> &r_h, const char *descr)
{
auto N  = r_h.length() -2;

fmt::printf("\"%s\"\n", descr);
for (auto [i, ri] : r_h) {
fmt::printf("%f %f\n", double(i)/(N+1), ri);
}
fmt::printf("\n\n");
}

int
main()
{
auto f = [](double x) { return 0; };
auto g = [](double x) { return 0; };

std::size_t N0 = 512;
std::size_t N1 = 256;
std::size_t N2 = 128;
std::size_t N3 = 64;

DenseVector<double>  f_h(N0+1), u_h(N0+1), tmp_h(N0+1), r_h(N0+1);
DenseVector<double>  f_2h(N1+1), u_2h(N1+1);

discretizeProblem(f, g, f_h, u_h);

// First initialize u_h with exact solution
direct_solver(f_h, u_h);

double omega = 0.5;     // smoothing parameter

// On Omega_{h}
// ------------
print_record(r_h, "Resifual r_h of initial guess");

print_record(r_h, "r_h after one jacobi iteration");

// On Omega_{2h}
// -------------

// On Omega_{h}
// ------------
print_record(r_h, "r_{h} after updating with prolonagtion of u_{2h}");

print_record(r_h, "r_{h} after one jacobi iteration");
}


Exercise

Implement the restriction and prolongation operator. Create and have a look at the following plots:

• The residual of the initial guess $$r_h$$

• The residual $$r_h$$ after smoothing.

• The residual $$r_h$$ after updating $$u_h$$ with the coarse solution.

• The residual $$r_h$$ after smoothing again.

Note that you can compute $$r_h$$ with function evalResidual.