Sample solution

Content

#include <mpi.h>
#include <iostream>
#include <cassert>
#include <cstdlib>
#include "primes.hpp"
#include <hpc/gmp/integer.hpp>

using namespace std;
using namespace hpc::gmp;

// #define JOBSIZE (1<<20)
#define JOBSIZE 100

enum { NEXT_JOB, NEXT_RESULT, FINISH };

void send_integer(const Integer& value, int dest, int tag) {
   assert(dest >= 0);
   if (tag == FINISH) {
      MPI_Send(0, 0, MPI_INT, dest, tag, MPI_COMM_WORLD);
   } else {
      ExportedInteger exp_value(value);
      int len = (int) exp_value.length();
      int header[2] = {exp_value.sgn(), len};
      MPI_Send(header, 2, MPI_INT, dest, tag, MPI_COMM_WORLD);
      MPI_Send(exp_value.words, len, MPI_INT, dest, tag, MPI_COMM_WORLD);
   }
}

void send_finish(int dest) {
   Integer dummy;
   send_integer(dummy, dest, FINISH);
}

bool receive_integer(Integer& value, int& source, int& tag) {
   MPI_Status status;
   int header[2];
   MPI_Recv(header, 2, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
   tag = status.MPI_TAG;
   source = status.MPI_SOURCE;
   if (status.MPI_TAG == FINISH) return false;
   int sgn = header[0]; unsigned int len = header[1];
   ExportedInteger exp_value(sgn, len);
   MPI_Recv(exp_value.words, len, MPI_INT, source, tag, MPI_COMM_WORLD, &status);
   value = exp_value.get();
   return true;
}

static void
primes_master(Integer& start_interval, Integer& end_interval,
      unsigned int k, unsigned int offsets[], int nofslaves) {
   // broadcast parameters that are required by all slaves
   MPI_Bcast(&k, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
   MPI_Bcast(offsets, k-1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);

   // send out initial tasks for all slaves
   struct Task {
      Integer start;
      Integer end;
      Task(const Integer& startval, unsigned long int intervallen,
	       const Integer& end_interval) :
	    start(startval), end(startval) {
	 end += intervallen;
	 if (end > end_interval) {
	    end = end_interval;
	 }
      }
   };
   int running_tasks = 0;
   Integer start(start_interval);
   for (int slave = 1; slave <= nofslaves; ++slave) {
      if (start < end_interval) {
	 Task task(start, JOBSIZE, end_interval); start += JOBSIZE;
	 send_integer(task.start, slave, NEXT_JOB);
	 send_integer(task.end, slave, NEXT_JOB);
	 ++running_tasks;
      } else {
         // there is no work left for this slave
	 send_finish(slave);
      }
   }

   // collect results and send out remaining tasks
   while (running_tasks > 0) {
      // receive result of a completed task
      int source = MPI_ANY_SOURCE; int tag = MPI_ANY_TAG;
      Integer result;
      if (receive_integer(result, source, tag)) {
	 cout << result << " (received from " << source << ")" << endl;
      } else if (start < end_interval) {
	 Task task(start, JOBSIZE, end_interval); start += JOBSIZE;
	 send_integer(task.start, source, NEXT_JOB);
	 send_integer(task.end, source, NEXT_JOB);
      } else {
	 send_finish(source); --running_tasks;
      }
   }
}

static void primes_slave() {
   unsigned int k;
   MPI_Bcast(&k, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
   unsigned int* offsets = new unsigned int[k-1];
   MPI_Bcast(offsets, k-1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);

   // receive tasks and process them
   for(;;) {
      int source = 0;
      int tag = MPI_ANY_TAG;
      Integer start;
      Integer end;
      if (!receive_integer(start, source, tag)) break;
      if (!receive_integer(end, source, tag)) break;
      Integer prime;
      while (search_prime_constellation(start, end, k, offsets, prime)) {
	 send_integer(prime, 0, NEXT_RESULT);
	 start = prime;
	 start += 1;
      }
      send_finish(0);
   }
   // release allocated memory
   delete[] offsets;
}

char* progname;

void usage() {
   cerr << "Usage: " << progname << " N1 N2 {n_i} " << endl;
   exit(1);
}

int main(int argc, char** argv) {
   MPI_Init(&argc, &argv);

   int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank);
   int nofslaves; MPI_Comm_size(MPI_COMM_WORLD, &nofslaves);
   --nofslaves; assert(nofslaves > 0);

   if (rank == 0) {
      progname = *argv++; --argc;
      if (argc < 3) usage();
      Integer start(*argv++); --argc;
      Integer end(*argv++); --argc;

      int k = argc + 1;
      unsigned int* offsets = new unsigned int[k-1];
      for (int i = 0; i < k-1; ++i) {
	 offsets[i] = atoi(*argv++); --argc;
      }

      primes_master(start, end, k, offsets, nofslaves);
   } else {
      primes_slave();
   }

   MPI_Finalize();
}
theon$ g++ -g -Wall -std=c++17 -I/home/numerik/pub/pp/ss19/lib -c primes.cpp
theon$ mpiCC -g -Wall -std=c++17 -I/home/numerik/pub/pp/ss19/lib -c mpi-primes.cpp
theon$ mpiCC -o mpi-primes mpi-primes.o primes.o -lgmpxx -lgmp
theon$ mpirun -np 4 mpi-primes 1 1000 2 6
107 (received from 2)
191 (received from 2)
311 (received from 2)
227 (received from 3)
347 (received from 2)
5 (received from 1)
461 (received from 3)
641 (received from 2)
821 (received from 3)
857 (received from 3)
881 (received from 3)
11 (received from 1)
17 (received from 1)
41 (received from 1)
101 (received from 1)
--------------------------------------------------------------------------
A system call failed during shared memory initialization that should
not have.  It is likely that your MPI job will now either abort or
experience performance degradation.

  Local host:  theon
  System call: unlink(2) /tmp/ompi.theon.120/pid.22776/1/vader_segment.theon.120.10b30001.1
  Error:       No such file or directory (errno 2)
--------------------------------------------------------------------------
theon$ 
heim$ g++-8.3 -g -Wall -std=c++17 -I/home/numerik/pub/pp/ss19/lib -c primes.cpp
/usr/local/libexec/gcc/x86_64-pc-linux-gnu/8.3.0/cc1plus: error while loading shared libraries: libmpfr.so.4: cannot open shared object file: No such file or directory
heim$ OMPI_CXX=g++-8.3 mpiCC -g -Wall -std=c++17 -I/home/numerik/pub/pp/ss19/lib -c mpi-primes.cpp -Wno-literal-suffix
/usr/local/libexec/gcc/x86_64-pc-linux-gnu/8.3.0/cc1plus: error while loading shared libraries: libmpfr.so.4: cannot open shared object file: No such file or directory
heim$ OMPI_CXX=g++-8.3 mpiCC -o mpi-primes mpi-primes.o primes.o -lgmpxx -lgmp
/usr/bin/ld: /usr/bin/ld: DWARF error: can't find .debug_ranges section.
mpi-primes.o: in function `send_integer(__gmp_expr<__mpz_struct [1], __mpz_struct [1]> const&, int, int)':
mpi-primes.cpp:(.text+0x3f): undefined reference to `__assert_c99'
/usr/bin/ld: mpi-primes.o: in function `main':
mpi-primes.cpp:(.text+0x931): undefined reference to `__assert_c99'
collect2: error: ld returned 1 exit status
heim$ mpirun -np 4 mpi-primes 1 1000 2 6
--------------------------------------------------------------------------
mpirun was unable to find the specified executable file, and therefore
did not launch the job.  This error was first reported for process
rank 0; it may have occurred for other processes as well.

NOTE: A common cause for this error is misspelling a mpirun command
      line parameter option (remember that mpirun interprets the first
      unrecognized command line token as the executable).

Node:       heim
Executable: mpi-primes
--------------------------------------------------------------------------
4 total processes failed to start
heim$