1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      83
      84
      85
      86
      87
      88
      89
      90
      91
      92
      93
      94
      95
      96
      97
      98
      99
     100
     101
     102
     103
#ifndef TRIVIAL_AGGREGATE_HPP
#define TRIVIAL_AGGREGATE_HPP

#ifdef __CUDACC__

#include <cstddef>
#include <hpc/cuda/check.hpp>
#include <hpc/cuda/gematrix.hpp>

namespace hpc { namespace cuda {

namespace trivial_aggregate_impl {

constexpr std::size_t BLOCK_DIM = 32;

template<
   template<typename> class MatrixA,
   template<typename> class MatrixB,
   typename T,
   typename Aggregator,
   Require<
      DeviceGe<MatrixA<T>>, DeviceView<MatrixA<T>>,
      DeviceGe<MatrixB<T>>, DeviceView<MatrixB<T>>
   > = true
>
__global__ void aggregate2d(const MatrixA<T> A, MatrixB<T> B,
      Aggregator aggregator) {
   std::size_t i = threadIdx.x + blockIdx.x * blockDim.x;
   std::size_t j = threadIdx.y + blockIdx.y * blockDim.y;

   __shared__ T local_block[BLOCK_DIM][BLOCK_DIM];

   std::size_t me_i = threadIdx.x;
   std::size_t me_j = threadIdx.y;
   if (i < A.numRows() && j < A.numCols()) {
      local_block[me_i][me_j] = A(i, j);
   }

   std::size_t active_i = blockDim.x / 2;
   std::size_t active_j = blockDim.y / 2;

   while (active_i > 0) {
      __syncthreads();
      if (me_i < active_i && i + active_i < A.numRows()) {
	 local_block[me_i][me_j] = aggregator(local_block[me_i][me_j],
	    local_block[me_i + active_i][me_j]);
      }
      active_i /= 2;
   }
   while (active_j > 0) {
      __syncthreads();
      if (me_j < active_j && j + active_j < A.numCols()) {
	 local_block[me_i][me_j] = aggregator(local_block[me_i][me_j],
	    local_block[me_i][me_j + active_j]);
      }
      active_j /= 2;
   }
   if (me_i == 0 && me_j == 0) {
      B(blockIdx.x, blockIdx.y) = local_block[0][0];
   }
}

inline constexpr std::size_t ceildiv(std::size_t x, std::size_t y) {
   /* note that we expect x > 0 && y > 0;
      not safe against overflows but we expect y to be small */
   return (x + y - 1) / y;
}

} // namespace trivial_aggregate_impl

template<
   template<typename> class Matrix,
   typename T,
   typename Aggregator,
   Require< DeviceGe<Matrix<T>> > = true
>
T aggregate(const Matrix<T>& A, Aggregator&& aggregator) {
   using namespace trivial_aggregate_impl;
   if (A.numRows() > 1 || A.numCols() > 1) {
      DeviceGeMatrix<T> B(ceildiv(A.numRows(), BLOCK_DIM),
	 ceildiv(A.numCols(), BLOCK_DIM));
      auto target = B.view();
      const auto source = A.view();
      dim3 grid_dim(ceildiv(A.numRows(), BLOCK_DIM),
	 ceildiv(A.numCols(), BLOCK_DIM));
      dim3 block_dim(BLOCK_DIM, BLOCK_DIM);
      aggregate2d<<<grid_dim, block_dim>>>(source, target, aggregator);
      return aggregate(target, std::move(aggregator));
   } else {
      T result;
      CHECK_CUDA(cudaMemcpy, &result, A.data(), sizeof(T),
	 cudaMemcpyDeviceToHost);
      return result;
   }
}

} } // namespaces cuda, hpc

#else
#	error This CUDA source must be compiled using nvcc
#endif

#endif