=============== Sample solution [TOC] =============== ---- CODE (type=cpp) ---------------------------------------------------------- template class MA, template class MB, Require>, Ge>> = true> int scatter_by_block(const MA& A, MB& B, int root, MPI_Comm grid, unsigned int overlap = 0) { assert(overlap < A.numRows() && overlap < A.numCols()); int nof_processes; MPI_Comm_size(grid, &nof_processes); int rank; MPI_Comm_rank(grid, &rank); int dims[2]; int coords[2]; int periods[2]; MPI_Cart_get(grid, 2, dims, periods, coords); hpc::aux::UniformSlices rows(dims[0], A.numRows() - 2*overlap); hpc::aux::UniformSlices columns(dims[1], A.numCols() - 2*overlap); int rval = MPI_SUCCESS; if (rank == root) { MPI_Request requests[nof_processes-1]; int ri = 0; for (int i = 0; i < nof_processes; ++i) { MPI_Cart_coords(grid, i, 2, coords); auto A_ = A.block(rows.offset(coords[0]), columns.offset(coords[1])).dim( rows.size(coords[0]) + 2*overlap, columns.size(coords[1]) + 2*overlap); if (i == root) { hpc::matvec::copy(A_, B); } else { MPI_Isend( &A_(0, 0), 1, get_type(A_), i, 0, grid, &requests[ri++]); } } for (auto& request: requests) { MPI_Status status; MPI_Wait(&request, &status); if (status.MPI_ERROR != MPI_SUCCESS) { rval = status.MPI_ERROR; } } } else { MPI_Status status; rval = MPI_Recv(&B(0, 0), 1, get_type(B), root, 0, grid, &status); } return rval; } template typename MA, template typename MB, Require>, Ge>> = true> int gather_by_block(const MA& A, MB& B, int root, MPI_Comm grid, unsigned int overlap = 0) { assert(overlap < A.numRows() && overlap < A.numCols()); int nof_processes; MPI_Comm_size(grid, &nof_processes); int rank; MPI_Comm_rank(grid, &rank); int dims[2]; int coords[2]; int periods[2]; MPI_Cart_get(grid, 2, dims, periods, coords); hpc::aux::UniformSlices rows(dims[0], B.numRows() - 2*overlap); hpc::aux::UniformSlices columns(dims[1], B.numCols() - 2*overlap); auto A_ = A.block(overlap, overlap).dim( A.numRows() - 2*overlap, A.numCols() - 2*overlap); int rval = MPI_SUCCESS; if (rank == root) { MPI_Request requests[nof_processes-1]; int ri = 0; for (int i = 0; i < nof_processes; ++i) { MPI_Cart_coords(grid, i, 2, coords); auto B_ = B.block(rows.offset(coords[0]) + overlap, columns.offset(coords[1]) + overlap).dim( rows.size(coords[0]), columns.size(coords[1])); if (i == root) { hpc::matvec::copy(A_, B_); } else { MPI_Irecv(&B_(0, 0), 1, get_type(B_), i, 0, grid, &requests[ri++]); } } for (auto& request: requests) { MPI_Status status; MPI_Wait(&request, &status); if (status.MPI_ERROR != MPI_SUCCESS) { rval = status.MPI_ERROR; } } } else { rval = MPI_Send(&A_(0, 0), 1, get_type(A_), root, 0, grid); } return rval; } ------------------------------------------------------------------------------- ---- SHELL (path=session07) --------------------------------------------------- mpic++ -g -std=c++17 -I/home/numerik/pub/pp/ss19/lib -o scatter-gather5 scatter-gather5.cpp mpirun -np 4 scatter-gather5 ------------------------------------------------------------------------------- ---- SHELL (path=session07,hostname=heim) ------------------------------------- OMPI_CXX=g++-8.3 mpic++ -g -std=c++17 -I/home/numerik/pub/pp/ss19/lib -o scatter-gather5 scatter-gather5.cpp -Wno-literal-suffix mpirun -np 4 scatter-gather5 ------------------------------------------------------------------------------- :navigate: up -> doc:index back -> doc:session07/page09 next -> doc:session07/page11