Sample solution

Content

#include <mpi.h>
#include <hpc/matvec/gematrix.hpp>
#include <hpc/matvec/iterators.hpp>
#include <hpc/matvec/print.hpp>
#include <hpc/mpi/matrix.hpp>

int main(int argc, char** argv) {
   MPI_Init(&argc, &argv);

   int nof_processes; MPI_Comm_size(MPI_COMM_WORLD, &nof_processes);
   int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank);

   using namespace hpc::matvec;
   using namespace hpc::mpi;

   using Matrix = GeMatrix<double>;
   int share = 3;
   int num_rows = nof_processes * share;
   int num_cols = 5;

   Matrix B(share, num_cols, Order::RowMajor); /* individual share */
   MPI_Datatype rowtype_B = get_row_type(B);

   if (rank == 0) {
      Matrix A(num_rows, num_cols); /* entire matrix */
      for (auto [i, j, Aij]: A) {
	 Aij = i * 100 + j;
      }
      MPI_Datatype rowtype_A = get_row_type(A);
      MPI_Scatter(&A(0, 0), share, rowtype_A,
	 &B(0, 0), share, rowtype_B,
	 0, MPI_COMM_WORLD);
      for (auto [i, j, Bij]: B) {
	 Bij += 10000 * (rank + 1);
	 (void) i; (void) j; // suppress gcc warning
      }
      MPI_Gather(&B(0, 0), share, rowtype_B,
	 &A(0, 0), share, rowtype_A,
	 0, MPI_COMM_WORLD);
      print(A, " %6g");
   } else {
      MPI_Scatter(nullptr, 0, nullptr, /* ignored parameters */
	 &B(0, 0), share, rowtype_B,
	 0, MPI_COMM_WORLD);
      for (auto [i, j, Bij]: B) {
	 B(i, j) += 10000 * (rank + 1);
	 (void) i; (void) j; // suppress gcc warning
      }
      MPI_Gather(&B(0, 0), share, rowtype_B,
	 nullptr, 0, nullptr, /* ignored parameters */
	 0, MPI_COMM_WORLD);
   }
   MPI_Finalize();
}
theon$ mpic++ -g -std=c++17 -I/home/numerik/pub/pp/ss19/lib -o scatter-gather2 scatter-gather2.cpp
theon$ mpirun -np 4 scatter-gather2
  10000  10001  10002  10003  10004
  10100  10101  10102  10103  10104
  10200  10201  10202  10203  10204
  20300  20301  20302  20303  20304
  20400  20401  20402  20403  20404
  20500  20501  20502  20503  20504
  30600  30601  30602  30603  30604
  30700  30701  30702  30703  30704
  30800  30801  30802  30803  30804
  40900  40901  40902  40903  40904
  41000  41001  41002  41003  41004
  41100  41101  41102  41103  41104
theon$ 
heim$ OMPI_CXX=g++-8.3 mpic++ -g -std=c++17 -I/home/numerik/pub/pp/ss19/lib -o scatter-gather2 scatter-gather2.cpp -Wno-literal-suffix
/usr/local/libexec/gcc/x86_64-pc-linux-gnu/8.3.0/cc1plus: error while loading shared libraries: libmpfr.so.4: cannot open shared object file: No such file or directory
heim$ mpirun -np 4 scatter-gather2
--------------------------------------------------------------------------
Open MPI tried to fork a new process via the "execve" system call but
failed.  Open MPI checks many things before attempting to launch a
child process, but nothing is perfect. This error may be indicative
of another problem on the target host, or even something as silly as
having specified a directory for your application. Your job will now
abort.

  Local host:        heim
  Working dir:       /home/numerik/pp/ss19/sessions/session07
  Application name:  /home/borchert/pp/ss19/sessions/session07/scatter-gather2
  Error:             No such file or directory
--------------------------------------------------------------------------
--------------------------------------------------------------------------
mpirun was unable to start the specified application as it encountered an
error:

Error code: 1
Error name: (null)
Node: heim

when attempting to start process rank 0.
--------------------------------------------------------------------------
[heim:02400] 3 more processes have sent help message help-orte-odls-default.txt / execve error
[heim:02400] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
4 total processes failed to start
heim$