1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      83
      84
      85
      86
      87
      88
      89
      90
      91
      92
      93
      94
      95
      96
      97
      98
      99
     100
     101
     102
     103
     104
     105
     106
     107
     108
     109
     110
     111
     112
     113
     114
     115
     116
     117
     118
     119
     120
     121
     122
     123
     124
     125
     126
     127
     128
     129
     130
     131
     132
     133
     134
     135
     136
     137
     138
     139
     140
     141
#ifndef HPC_MPI_MATRIX_H
#define HPC_MPI_MATRIX_H 1

#include <cassert>
#include <memory>
#include <type_traits>
#include <vector>
#include <mpi.h>
#include <hpc/matvec/gematrix.h>
#include <hpc/matvec/isgematrix.h>
#include <hpc/mpi/fundamental.h>
#include <hpc/aux/slices.h>

namespace hpc { namespace mpi {

template<typename Matrix>
typename std::enable_if<hpc::matvec::IsGeMatrix<Matrix>::value,
   MPI_Datatype>::type
get_row_type(const Matrix& A) {
   using ElementType = typename Matrix::ElementType;
   MPI_Datatype rowtype;
   MPI_Type_vector(
      /* count = */ A.numCols,
      /* blocklength = */ 1,
      /* stride = */ A.incCol,
      /* element type = */ get_type(A(0, 0)),
      /* newly created type = */ &rowtype);

   /* in case of row major we are finished */
   if (A.incRow == A.numCols) {
      MPI_Type_commit(&rowtype);
      return rowtype;
   }

   /* the extent of the MPI data type does not match
      the offset of subsequent rows -- this is a problem
      whenever we want to handle more than one row;
      to fix this we need to use the resize function
      which allows us to adapt the extent to A.incRow */
   MPI_Datatype resized_rowtype;
   MPI_Type_create_resized(rowtype, 0, /* lb remains unchanged */
      A.incRow * sizeof(ElementType), &resized_rowtype);
   MPI_Type_commit(&resized_rowtype);
   return resized_rowtype;
}

template<typename Matrix>
typename std::enable_if<hpc::matvec::IsGeMatrix<Matrix>::value,
   MPI_Datatype>::type
get_type(const Matrix& A) {
   using ElementType = typename Matrix::ElementType;
   MPI_Datatype datatype;
   if (A.incCol == 1) {
      MPI_Type_vector(
         /* count = */ A.numRows,
         /* blocklength = */ A.numCols,
         /* stride = */ A.incRow,
         /* element type = */ get_type(A(0, 0)),
         /* newly created type = */ &datatype);
   } else {
      /* vector of row vectors */
      MPI_Datatype rowtype = get_row_type(A);
      MPI_Type_contiguous(A.numRows, rowtype, &datatype);
   }
   MPI_Type_commit(&datatype);
   return datatype;
}

template<typename MA, typename MB>
typename std::enable_if<
   hpc::matvec::IsGeMatrix<MA>::value && hpc::matvec::IsGeMatrix<MB>::value &&
      std::is_same<typename MA::ElementType, typename MB::ElementType>::value,
   int>::type
scatter_by_row(const MA& A, MB& B, int root, MPI_Comm comm) {
   assert(A.numCols == B.numCols);

   int nof_processes; MPI_Comm_size(comm, &nof_processes);
   int rank; MPI_Comm_rank(comm, &rank);

   hpc::aux::Slices<int> slices(nof_processes, A.numRows);
   std::vector<int> counts(nof_processes);
   std::vector<int> offsets(nof_processes);

   MPI_Datatype rowtype_A = get_row_type(A);
   for (int i = 0; i < nof_processes; ++i) {
      if (i < A.numRows) {
         counts[i] = slices.size(i);
         offsets[i] = slices.offset(i);
      } else {
         counts[i] = 0; offsets[i] = 0;
      }
   }
   int recvcount = counts[rank];
   assert(B.numRows == recvcount);

   MPI_Datatype rowtype_B = get_row_type(B);

   /* OpenMPI implementation of Debian wheezy expects void* instead
      of const void*; hence we need to remove const */
   return MPI_Scatterv((void*) &A(0, 0), &counts[0], &offsets[0], rowtype_A,
      &B(0, 0), recvcount, rowtype_B, root, comm);
}

template<typename MA, typename MB>
typename std::enable_if<
   hpc::matvec::IsGeMatrix<MA>::value && hpc::matvec::IsGeMatrix<MB>::value &&
      std::is_same<typename MA::ElementType, typename MB::ElementType>::value,
   int>::type
gather_by_row(const MA& A, MB& B, int root, MPI_Comm comm) {
   assert(A.numCols == B.numCols);

   int nof_processes; MPI_Comm_size(comm, &nof_processes);
   int rank; MPI_Comm_rank(comm, &rank);

   hpc::aux::Slices<int> slices(nof_processes, B.numRows);
   std::vector<int> counts(nof_processes);
   std::vector<int> offsets(nof_processes);

   for (int i = 0; i < nof_processes; ++i) {
      if (i < B.numRows) {
         counts[i] = slices.size(i);
         offsets[i] = slices.offset(i);
      } else {
         counts[i] = 0; offsets[i] = 0;
      }
   }
   int sendcount = counts[rank];
   assert(A.numRows == sendcount);

   MPI_Datatype rowtype_A = get_row_type(A);
   MPI_Datatype rowtype_B = get_row_type(B);

   /* OpenMPI implementation of Debian wheezy expects void* instead
      of const void*; hence we need to remove const */
   return MPI_Gatherv((void*) &A(0, 0), sendcount, rowtype_A,
      &B(0, 0), &counts[0], &offsets[0], rowtype_B, root, comm);
}

} } // namespaces mpi, hpc

#endif // HPC_MPI_MATRIX_H