1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      83
      84
      85
      86
      87
      88
      89
      90
      91
      92
      93
      94
      95
      96
      97
      98
      99
     100
     101
     102
     103
     104
     105
     106
     107
     108
     109
     110
     111
     112
     113
     114
     115
     116
     117
     118
     119
     120
     121
     122
     123
     124
     125
     126
     127
     128
     129
     130
     131
     132
     133
     134
     135
     136
     137
     138
     139
     140
     141
     142
     143
     144
     145
     146
     147
     148
     149
     150
     151
     152
     153
     154
     155
     156
     157
     158
     159
     160
     161
     162
     163
     164
     165
#ifndef HPC_MPI_MATRIX_HPP
#define HPC_MPI_MATRIX_HPP 1

#include <cassert>
#include <memory>
#include <type_traits>
#include <vector>
#include <mpi.h>
#include <hpc/aux/slices.hpp>
#include <hpc/matvec/gematrix.hpp>
#include <hpc/mpi/fundamental.hpp>

namespace hpc { namespace mpi {

/* construct MPI data type for a row of Matrix A
   where the extent is adapted such that consecutive
   rows can be combined even if they overlap */
template<typename T, template<typename> class Matrix,
   Require<Ge<Matrix<T>>> = true>
MPI_Datatype get_row_type(const Matrix<T>& A) {
   MPI_Datatype rowtype;
   MPI_Type_vector(
      /* count = */ A.numCols(),
      /* blocklength = */ 1,
      /* stride = */ A.incCol(),
      /* element type = */ get_type(A(0, 0)),
      /* newly created type = */ &rowtype);

   /* in case of row-major we are finished */
   if (A.incRow() == A.numCols()) {
      MPI_Type_commit(&rowtype);
      return rowtype;
   }

   /* the extent of the MPI data type does not match
      the offset of subsequent rows -- this is a problem
      whenever we want to handle more than one row;
      to fix this we need to use the resize function
      which allows us to adapt the extent to A.incRow() */
   MPI_Datatype resized_rowtype;
   MPI_Type_create_resized(rowtype, 0, /* lb remains unchanged */
      A.incRow() * sizeof(T), &resized_rowtype);
   MPI_Type_commit(&resized_rowtype);
   MPI_Type_free(&rowtype);
   return resized_rowtype;
}

/* create MPI data type for matrix A */
template<typename T, template<typename> class Matrix,
   Require<Ge<Matrix<T>>> = true>
MPI_Datatype get_type(const Matrix<T>& A) {
   MPI_Datatype datatype;
   if (A.incCol() == 1) {
      MPI_Type_vector(
	 /* count = */ A.numRows(),
	 /* blocklength = */ A.numCols(),
	 /* stride = */ A.incRow(),
	 /* element type = */ get_type(A(0, 0)),
	 /* newly created type = */ &datatype);
   } else {
      /* vector of row vectors */
      MPI_Datatype rowtype = get_row_type(A);
      MPI_Type_contiguous(A.numRows(), rowtype, &datatype);
      MPI_Type_free(&rowtype);
   }
   MPI_Type_commit(&datatype);
   return datatype;
}

template<typename T,
   template<typename> class MA,
   template<typename> class MB,
   Require<Ge<MA<T>>> = true, Require<Ge<MB<T>>> = true>
int scatter_by_row(const MA<T>& A, MB<T>& B, int root, MPI_Comm comm) {

   int nof_processes; MPI_Comm_size(comm, &nof_processes);
   int rank; MPI_Comm_rank(comm, &rank);

   MPI_Datatype rowtype_B = get_row_type(B);
   int rval;
   if (rank == root) {
      assert(A.numCols() == B.numCols());

      hpc::aux::UniformSlices<int> slices(nof_processes, A.numRows());
      std::vector<int> counts(nof_processes);
      std::vector<int> offsets(nof_processes);

      MPI_Datatype rowtype_A = get_row_type(A);
      for (int i = 0; i < nof_processes; ++i) {
	 if (i < A.numRows()) {
	    counts[i] = slices.size(i);
	    offsets[i] = slices.offset(i);
	 } else {
	    counts[i] = 0; offsets[i] = 0;
	 }
      }
      int recvcount = counts[rank];
      assert(B.numRows() == recvcount);

      /* aged OpenMPI implementations of Debian wheezy and jessie
         expect void* instead of const void*; hence we need to remove const */
      rval = MPI_Scatterv(
	 const_cast<void*>(reinterpret_cast<const void*>(&A(0, 0))),
	 &counts[0], &offsets[0], rowtype_A,
	 &B(0, 0), recvcount, rowtype_B, root, comm);
      MPI_Type_free(&rowtype_A);
   } else {
      int recvcount = B.numRows();
      rval = MPI_Scatterv(nullptr, nullptr, nullptr, nullptr,
	 &B(0, 0), recvcount, rowtype_B, root, comm);
   }
   MPI_Type_free(&rowtype_B);
   return rval;
}

template<typename T,
   template<typename> class MA,
   template<typename> class MB,
   Require<Ge<MA<T>>> = true, Require<Ge<MB<T>>> = true>
int gather_by_row(const MA<T>& A, MB<T>& B, int root, MPI_Comm comm) {
   int nof_processes; MPI_Comm_size(comm, &nof_processes);
   int rank; MPI_Comm_rank(comm, &rank);

   MPI_Datatype rowtype_A = get_row_type(A);

   int rval;
   if (rank == root) {
      assert(A.numCols() == B.numCols());

      hpc::aux::UniformSlices<int> slices(nof_processes, B.numRows());
      std::vector<int> counts(nof_processes);
      std::vector<int> offsets(nof_processes);

      for (int i = 0; i < nof_processes; ++i) {
	 if (i < B.numRows()) {
	    counts[i] = slices.size(i);
	    offsets[i] = slices.offset(i);
	 } else {
	    counts[i] = 0; offsets[i] = 0;
	 }
      }
      int sendcount = counts[rank];
      assert(A.numRows() == sendcount);

      MPI_Datatype rowtype_B = get_row_type(B);

      /* aged OpenMPI implementations of Debian wheezy and jessie
         expect void* instead of const void*; hence we need to remove const */
      rval = MPI_Gatherv(
	 const_cast<void*>(reinterpret_cast<const void*>(&A(0, 0))),
	 sendcount, rowtype_A,
	 &B(0, 0), &counts[0], &offsets[0], rowtype_B, root, comm);
      MPI_Type_free(&rowtype_B);
   } else {
      int sendcount = A.numRows();
      rval = MPI_Gatherv((void*) &A(0, 0), sendcount, rowtype_A,
	 nullptr, nullptr, nullptr, nullptr, root, comm);
   }
   MPI_Type_free(&rowtype_A);
   return rval;
}

} } // namespaces mpi, hpc

#endif // HPC_MPI_MATRIX_H