1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      83
      84
      85
      86
      87
      88
      89
      90
      91
      92
      93
      94
      95
      96
      97
      98
      99
     100
     101
     102
     103
     104
     105
     106
     107
     108
     109
     110
     111
     112
     113
     114
     115
     116
     117
     118
     119
     120
     121
     122
     123
     124
     125
     126
     127
     128
     129
     130
     131
     132
     133
     134
     135
     136
     137
     138
     139
     140
     141
     142
     143
     144
     145
     146
     147
     148
     149
     150
     151
     152
     153
     154
     155
     156
     157
     158
     159
     160
     161
     162
     163
     164
     165
     166
     167
     168
     169
     170
     171
     172
     173
     174
     175
     176
     177
     178
     179
     180
     181
     182
     183
     184
     185
     186
     187
     188
     189
     190
     191
     192
     193
     194
     195
     196
     197
     198
     199
     200
     201
     202
     203
     204
     205
     206
     207
     208
     209
     210
     211
     212
     213
     214
     215
     216
     217
     218
     219
     220
     221
     222
     223
     224
     225
     226
     227
     228
     229
     230
     231
     232
     233
     234
     235
     236
     237
     238
     239
#ifndef HPC_CUDA_AGGREGATE_HPP
#define HPC_CUDA_AGGREGATE_HPP 1

#ifdef __CUDACC__

#include <hpc/cuda/check.hpp>
#include <hpc/cuda/gematrix.hpp>
#include <hpc/cuda/properties.hpp>

namespace hpc { namespace cuda {

namespace aggregate_impl {

/* we expect our GPU to support BLOCK_DIM x BLOCK_DIM threads per block;
   this must be a power of 2 and is best a multiply of the warp size */
constexpr std::size_t BLOCK_DIM = 32;

/* kernel that operates with a thread for each row
   of each block of matrix A */
template<
   template<typename> class MatrixA,
   template<typename> class MatrixB,
   typename T,
   typename Aggregator,
   Require<
      DeviceGe<MatrixA<T>>, DeviceView<MatrixA<T>>,
      DeviceGe<MatrixB<T>>, DeviceView<MatrixB<T>>
   > = true
>
__global__ void aggregate_row_wise(const MatrixA<T> A, MatrixB<T> B,
      Aggregator aggregator) {
   std::size_t i = threadIdx.x + blockIdx.x * BLOCK_DIM;
   std::size_t j = threadIdx.y + blockIdx.y * BLOCK_DIM;

   if (i < A.numRows()) {
      T val = A(i, j);
      std::size_t maxoffset = BLOCK_DIM;
      if (j + maxoffset >= A.numCols()) {
	 maxoffset = A.numCols() - j;
      }
      for (std::size_t offset = 1; offset < maxoffset; ++offset) {
	 val = aggregator(val, A(i, j + offset));
      }
      B(i, blockIdx.y) = val;
   }
}

/* kernel that operates with a thread for each column
   of each block of matrix A */
template<
   template<typename> class MatrixA,
   template<typename> class MatrixB,
   typename T,
   typename Aggregator,
   Require<
      DeviceGe<MatrixA<T>>, DeviceView<MatrixA<T>>,
      DeviceGe<MatrixB<T>>, DeviceView<MatrixB<T>>
   > = true
>
__global__ void aggregate_col_wise(const MatrixA<T> A, MatrixB<T> B,
      Aggregator aggregator) {
   std::size_t i = threadIdx.x + blockIdx.x * BLOCK_DIM;
   std::size_t j = threadIdx.y + blockIdx.y * BLOCK_DIM;

   if (j < A.numCols()) {
      T val = A(i, j);
      std::size_t maxoffset = BLOCK_DIM;
      if (i + maxoffset >= A.numRows()) {
	 maxoffset = A.numRows() - i;
      }
      for (std::size_t offset = 1; offset < maxoffset; ++offset) {
	 val = aggregator(val, A(i + offset, j));
      }
      B(blockIdx.x, j) = val;
   }
}

/* kernel that operates with a thread for each element of matrix A;
   note that this kernel function is expected to be configured
   with blockDim.x and blockDim.y to be powers of 2 */
template<
   template<typename> class MatrixA,
   template<typename> class MatrixB,
   typename T,
   typename Aggregator,
   Require<
      DeviceGe<MatrixA<T>>, DeviceView<MatrixA<T>>,
      DeviceGe<MatrixB<T>>, DeviceView<MatrixB<T>>
   > = true
>
__global__ void aggregate2d(const MatrixA<T> A, MatrixB<T> B,
      Aggregator aggregator) {
   std::size_t i = threadIdx.x + blockIdx.x * blockDim.x;
   std::size_t j = threadIdx.y + blockIdx.y * blockDim.y;

   __shared__ T local_block[BLOCK_DIM][BLOCK_DIM];

   std::size_t me_i = threadIdx.x;
   std::size_t me_j = threadIdx.y;
   if (i < A.numRows() && j < A.numCols()) {
      local_block[me_i][me_j] = A(i, j);
   }

   std::size_t active_i = blockDim.x / 2;
   std::size_t active_j = blockDim.y / 2;

   while (active_i > 0) {
      __syncthreads();
      if (me_i < active_i && i + active_i < A.numRows()) {
	 local_block[me_i][me_j] = aggregator(local_block[me_i][me_j],
	    local_block[me_i + active_i][me_j]);
      }
      active_i /= 2;
   }
   while (active_j > 0) {
      __syncthreads();
      if (me_j < active_j && j + active_j < A.numCols()) {
	 local_block[me_i][me_j] = aggregator(local_block[me_i][me_j],
	    local_block[me_i][me_j + active_j]);
      }
      active_j /= 2;
   }
   if (me_i == 0 && me_j == 0) {
      B(blockIdx.x, blockIdx.y) = local_block[0][0];
   }
}

template<
   template<typename> class Matrix,
   typename T,
   Require< DeviceGe<Matrix<T>> > = true
>
auto transposed_view(const Matrix<T>& A) {
   return DeviceGeMatrixConstView<T>(A.numCols(), A.numRows(), A.conj(),
      A.data(), A.incCol(), A.incRow());
}

template<
   template<typename> class Matrix,
   typename T,
   Require< DeviceGe<Matrix<T>> > = true
>
auto row_major_view(const Matrix<T>& A) {
   if (A.incRow() < A.incCol()) {
      return transposed_view(A);
   } else {
      return A.view();
   }
}

template<
   template<typename> class Matrix,
   typename T,
   Require< DeviceGe<Matrix<T>> > = true
>
auto col_major_view(const Matrix<T>& A) {
   if (A.incRow() > A.incCol()) {
      return transposed_view(A);
   } else {
      return A.view();
   }
}

inline constexpr std::size_t ceildiv(std::size_t x, std::size_t y) {
   /* note that we expect x > 0 && y > 0;
      not safe against overflows but we expect y to be small */
   return (x + y - 1) / y;
}

template<typename T>
inline constexpr T align_to_power_of_2(T val) {
   T res = 1;
   while (res < val) {
      res *= 2;
   }
   return res;
}

} // namespace aggregate_impl

template<
   template<typename> class Matrix,
   typename T,
   typename Aggregator,
   Require< DeviceGe<Matrix<T>> > = true
>
T aggregate(const Matrix<T>& A, Aggregator&& aggregator) {
   using namespace aggregate_impl;
   if (A.numRows() > 1 || A.numCols() > 1) {
      if (A.numRows() <= BLOCK_DIM && A.numCols() <= BLOCK_DIM) {
	 /* most efficiently handled by a trivial 2d aggregator */
	 DeviceGeMatrix<T> B(1, 1);
	 const auto source = col_major_view(A);
	 dim3 block_dim(align_to_power_of_2(source.numRows()),
	    align_to_power_of_2(source.numCols()));
	 auto target = B.view();
	 aggregate2d<<<1, block_dim>>>(source, target, aggregator);
	 return aggregate(target, std::move(aggregator));
      } else {
	 /* aggregate column- or row-wise */
	 if (A.numRows() > A.numCols()) {
	    auto source = row_major_view(A);
	    dim3 grid_dim(ceildiv(source.numRows(), BLOCK_DIM),
	       ceildiv(source.numCols(), BLOCK_DIM));
	    DeviceGeMatrix<T> B(ceildiv(source.numRows(), BLOCK_DIM),
	       source.numCols());
	    auto target = B.view();
	    dim3 block_dim(1, BLOCK_DIM);
	    aggregate_col_wise<<<grid_dim, block_dim>>>(source, target,
	       aggregator);
	    return aggregate(target, std::move(aggregator));
	 } else {
	    auto source = col_major_view(A);
	    dim3 grid_dim(ceildiv(source.numRows(), BLOCK_DIM),
	       ceildiv(source.numCols(), BLOCK_DIM));
	    DeviceGeMatrix<T> B(source.numRows(),
	       ceildiv(source.numCols(), BLOCK_DIM));
	    auto target = B.view();
	    dim3 block_dim(BLOCK_DIM, 1);
	    aggregate_row_wise<<<grid_dim, block_dim>>>(source, target,
	       aggregator);
	    return aggregate(target, std::move(aggregator));
	 }
      }
   } else {
      T result;
      CHECK_CUDA(cudaMemcpy, &result, A.data(), sizeof(T),
	 cudaMemcpyDeviceToHost);
      return result;
   }
}

} } // namespaces cuda and hpc

#else
#	error This CUDA source must be compiled using nvcc
#endif

#endif