Sample solution
Content |
#ifndef HPC_MPI_VECTOR_H #define HPC_MPI_VECTOR_H 1 #include <mpi.h> #include <hpc/matvec/densevector.hpp> #include <hpc/mpi/fundamental.hpp> namespace hpc { namespace mpi { template<typename T, template<typename> typename Vector, Require<Dense<Vector<T>>> = true> MPI_Datatype get_type(const Vector<T>& vector) { MPI_Datatype datatype; MPI_Type_vector( /* count = */ vector.length(), /* blocklength = */ 1, /* stride = */ vector.inc(), /* element type = */ get_type(vector(0)), /* newly created type = */ &datatype); MPI_Type_commit(&datatype); return datatype; } } } // namespaces mpi, hpc #endif // HPC_MPI_VECTOR_H
#include <cassert> #include <cstdlib> #include <mpi.h> #include <printf.hpp> #include <hpc/matvec/gematrix.hpp> #include <hpc/matvec/iterators.hpp> #include <hpc/mpi/vector.hpp> int main(int argc, char** argv) { MPI_Init(&argc, &argv); int nof_processes; MPI_Comm_size(MPI_COMM_WORLD, &nof_processes); int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); assert(nof_processes == 2); using namespace hpc::matvec; using namespace hpc::mpi; std::size_t nof_rows = 3; std::size_t nof_cols = 7; if (rank == 0) { GeMatrix<double> A(nof_rows, nof_cols, Order::RowMajor); for (auto [i, j, Aij]: A) { Aij = i * 100 + j; } auto row = A.row(2, 0); auto col = A.col(0, 0); MPI_Datatype row_type = get_type(row); MPI_Datatype col_type = get_type(col); MPI_Send(&row(0), 1, row_type, 1, 0, MPI_COMM_WORLD); MPI_Send(&col(0), 1, col_type, 1, 0, MPI_COMM_WORLD); /* receive it back for verification */ DenseVector<double> vec1(nof_cols), vec2(nof_rows); MPI_Datatype vec1_type = get_type(vec1); MPI_Datatype vec2_type = get_type(vec2); MPI_Status status; MPI_Recv(&vec1(0), 1, vec1_type, 1, 0, MPI_COMM_WORLD, &status); MPI_Recv(&vec2(0), 1, vec2_type, 1, 0, MPI_COMM_WORLD, &status); /* verify it */ for (auto [i, xi]: vec1) { if (vec1(i) != row(i)) { fmt::printf("verification failed for row(%d): %lg vs %lg\n", i, vec1(i), row(i)); } } for (auto [i, xi]: vec2) { if (vec2(i) != col(i)) { fmt::printf("verification failed for col(%d): %lg vs %lg\n", i, vec2(i), col(i)); } } } else { DenseVector<double> vec1(nof_cols), vec2(nof_rows); MPI_Datatype vec1_type = get_type(vec1); MPI_Datatype vec2_type = get_type(vec2); MPI_Status status; MPI_Recv(&vec1(0), 1, vec1_type, 0, 0, MPI_COMM_WORLD, &status); MPI_Recv(&vec2(0), 1, vec2_type, 0, 0, MPI_COMM_WORLD, &status); /* send it back for verification */ MPI_Send(&vec1(0), 1, vec1_type, 0, 0, MPI_COMM_WORLD); MPI_Send(&vec2(0), 1, vec2_type, 0, 0, MPI_COMM_WORLD); } MPI_Finalize(); }
theon$ mpic++ -g -std=c++17 -Istep01 -I/home/numerik/pub/pp/ss19/lib -o transfer_vectors2 transfer_vectors2.cpp theon$ mpirun -np 2 transfer_vectors2 theon$
heim$ OMPI_CXX=g++-8.3 mpic++ -g -std=c++17 -Istep01 -I/home/numerik/pub/pp/ss19/lib -o transfer_vectors2 transfer_vectors2.cpp -Wno-literal-suffix /usr/local/libexec/gcc/x86_64-pc-linux-gnu/8.3.0/cc1plus: error while loading shared libraries: libmpfr.so.4: cannot open shared object file: No such file or directory heim$ mpirun -np 2 transfer_vectors2 -------------------------------------------------------------------------- Open MPI tried to fork a new process via the "execve" system call but failed. Open MPI checks many things before attempting to launch a child process, but nothing is perfect. This error may be indicative of another problem on the target host, or even something as silly as having specified a directory for your application. Your job will now abort. Local host: heim Working dir: /home/numerik/pp/ss19/sessions/session06 Application name: /home/borchert/pp/ss19/sessions/session06/transfer_vectors2 Error: No such file or directory -------------------------------------------------------------------------- -------------------------------------------------------------------------- mpirun was unable to start the specified application as it encountered an error: Error code: -125 Error name: The specified application failed to start Node: heim when attempting to start process rank 0. -------------------------------------------------------------------------- [heim:02319] 1 more process has sent help message help-orte-odls-default.txt / execve error [heim:02319] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages 2 total processes failed to start heim$