1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      83
      84
      85
      86
      87
      88
      89
      90
      91
      92
      93
      94
      95
      96
      97
      98
      99
     100
     101
     102
     103
     104
     105
     106
     107
     108
     109
     110
     111
     112
     113
     114
     115
     116
     117
     118
     119
     120
     121
     122
     123
     124
     125
     126
     127
     128
     129
     130
     131
     132
     133
     134
     135
     136
     137
     138
     139
     140
     141
     142
     143
     144
     145
     146
     147
     148
     149
     150
     151
     152
     153
     154
     155
     156
     157
     158
     159
     160
     161
     162
     163
     164
     165
     166
     167
     168
     169
     170
     171
     172
     173
     174
     175
     176
     177
     178
     179
     180
     181
     182
     183
     184
     185
     186
     187
     188
     189
     190
     191
     192
     193
     194
     195
     196
     197
     198
     199
     200
     201
     202
     203
     204
     205
     206
     207
     208
     209
     210
     211
     212
     213
     214
     215
     216
     217
     218
     219
     220
     221
     222
     223
     224
     225
     226
     227
     228
     229
     230
     231
     232
     233
     234
     235
     236
     237
     238
     239
     240
     241
     242
     243
     244
     245
     246
     247
     248
     249
     250
/* 
   Copyright (c) 2015, Andreas F. Borchert
   All rights reserved.

   Redistribution and use in source and binary forms, with or without
   modification, are permitted provided that the following conditions
   are met:

   1. Redistributions of source code must retain the above copyright
      notice, this list of conditions and the following disclaimer.
   2. Redistributions in binary form must reproduce the above copyright
      notice, this list of conditions and the following disclaimer in
      the documentation and/or other materials provided with the
      distribution.

   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
   ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   POSSIBILITY OF SUCH DAMAGE.
*/

/*
   This header-only C++ package provides hpc::mt::thread_pool which
   assigns incoming tasks to a fixed number of threads.  The template
   parameter determines the return type of a task.

   Following methods are supported:

    * ThreadPool(unsigned int nofthreads)
      construct thread pool with the given number of threads

    * ~ThreadPool()
      the destructor waits for all threads to finish

    * void join()
      wait for all threads to finish

    * void terminate()
      request a speedy termination, i.e. submitted but not yet assigned
      tasks remain undone; threads that wait for the corresponding futures
      will see broken promises

    * std::future<T> submit(F task_function)
      submit a task which is to be executed by one of the threads
      of the pool; the future objects allows to synchronize with
      the completion of the task and to receive the return value
      of the submitted task
*/

#ifndef HPC_MT_THREAD_POOL_H
#define HPC_MT_THREAD_POOL_H 1

#include <condition_variable>
#include <future>
#include <list>
#include <mutex>
#include <thread>
#include <type_traits>
#include <utility>
#include <vector>

namespace hpc { namespace mt {

#if defined(__GNUC__) && __GNUC__ == 4 && (__GNUC_MINOR__ == 7 || __GNUC__MINOR__ == 8)
   /* GCC 4.7.x and early releases of 4.8.x have a serious bug:
     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53921
      the bug arises when lambda expressions are used within templated classes
      that refer to members of that class (implicitly through this);
      the workaround is to use "this->" explicitly
   */
   template<typename T>
   class ThreadPool {
      public:
     ThreadPool(unsigned int nofthreads) :
           threads(nofthreads), active(0),
           joining(false), terminating(false) {
        for (auto& t: threads) {
           t = std::thread([=]() mutable -> void {
          for (;;) {
             std::packaged_task<T()> task;
             /* fetch next task, if there is any */
             {
            std::unique_lock<std::mutex> lock(this->mutex);
            while (!this->terminating &&
                  this->tasks.empty() &&
                  (this->active > 0 || !this->joining)) {
               this->cv.wait(lock);
            }
            if (this->terminating || this->tasks.empty()) {
               this->terminating = true;
               break; /* all done */
            }
            task = std::move(this->tasks.front());
            this->tasks.pop_front();
            ++this->active;
             }
             /* process task */
             task();
             /* decrement number of active threads */
             {
            std::unique_lock<std::mutex> lock(this->mutex);
            --this->active;
             }
          }
          this->cv.notify_all();
           });
        }
     }
     ~ThreadPool() {
        join();
     }

     void join() {
        if (!joining && !terminating) {
           std::unique_lock<std::mutex> lock(mutex);
           joining = true;
        }
        cv.notify_all();
        for (auto& t: threads) if (t.joinable()) t.join();
     }

     void terminate() {
        {
           std::unique_lock<std::mutex> lock(mutex);
           terminating = true;
           /* we make sure that all promises left are considered broken
          by emptying the list of remaining tasks;
          if we do not do it now, the waiting threads would
          have to wait until this object is destructed
           */
           tasks.empty();
        }
        cv.notify_all();
     }

     template<typename F>
     std::future<T> submit(F task_function) {
        std::packaged_task<T()> task(task_function);
        std::future<T> result = task.get_future();
        std::lock_guard<std::mutex> lock(mutex);
        tasks.push_back(std::move(task));
        cv.notify_one();
        return result;
     }
      private:
     std::vector<std::thread> threads;
     std::mutex mutex;
     std::list<std::packaged_task<T()>> tasks;
     std::condition_variable cv;
     unsigned int active;
     bool joining;
     bool terminating;
   };
#else
   template<typename T>
   class ThreadPool {
      public:
     ThreadPool(unsigned int nofthreads) :
           threads(nofthreads), active(0),
           joining(false), terminating(false) {
        for (auto& t: threads) {
           t = std::thread([=]() mutable -> void {
          for (;;) {
             std::packaged_task<T()> task;
             /* fetch next task, if there is any */
             {
            std::unique_lock<std::mutex> lock(mutex);
            while (!terminating &&
                  tasks.empty() &&
                  (active > 0 || !joining)) {
               cv.wait(lock);
            }
            if (terminating || tasks.empty()) {
               terminating = true;
               break; /* all done */
            }
            task = std::move(tasks.front());
            tasks.pop_front();
            ++active;
             }
             /* process task */
             task();
             /* decrement number of active threads */
             {
            std::unique_lock<std::mutex> lock(mutex);
            --active;
             }
          }
          cv.notify_all();
           });
        }
     }
     ~ThreadPool() {
        join();
     }

     void join() {
        if (!joining && !terminating) {
           std::unique_lock<std::mutex> lock(mutex);
           joining = true;
        }
        cv.notify_all();
        for (auto& t: threads) if (t.joinable()) t.join();
     }

     void terminate() {
        {
           std::unique_lock<std::mutex> lock(mutex);
           terminating = true;
           /* we make sure that all promises left are considered broken
          by emptying the list of remaining tasks;
          if we do not do it now, the waiting threads would
          have to wait until this object is destructed
           */
           tasks.empty();
        }
        cv.notify_all();
     }

     template<typename F>
     std::future<T> submit(F task_function) {
        std::packaged_task<T()> task(task_function);
        std::future<T> result = task.get_future();
        std::lock_guard<std::mutex> lock(mutex);
        tasks.push_back(std::move(task));
        cv.notify_one();
        return result;
     }
      private:
     std::vector<std::thread> threads;
     std::mutex mutex;
     std::list<std::packaged_task<T()>> tasks;
     std::condition_variable cv;
     unsigned int active;
     bool joining;
     bool terminating;
   };
#endif

} } // namespaces mt and hpc

#endif