Thrust inside user written kernels
As it was originally written, Thrust is purely a host side abstraction. It cannot be used inside kernels. You can pass the device memory encapsulated inside a thrust::device_vector
to your own kernel like this:
thrust::device_vector< Foo > fooVector;
// Do something thrust-y with fooVector
Foo* fooArray = thrust::raw_pointer_cast( fooVector.data() );
// Pass raw array and its size to kernel
someKernelCall<<< x, y >>>( fooArray, fooVector.size() );
and you can also use device memory not allocated by thrust within thrust algorithms by instantiating a thrust::device_ptr with the bare cuda device memory pointer.
Edited four and half years later to add that as per @JackOLantern's answer, thrust 1.8 adds a sequential execution policy which means you can run single threaded versions of thrust's alogrithms on the device. Note that it still isn't possible to directly pass a thrust device vector to a kernel and device vectors can't be directly used in device code.
Note that it is also possible to use the thrust::device
execution policy in some cases to have parallel thrust execution launched by a kernel as a child grid. This requires separate compilation/device linkage and hardware which supports dynamic parallelism. I am not certain whether this is actually supported in all thrust algorithms or not, but certainly works with some.
This is an update to my previous answer.
Starting from Thrust 1.8.1, CUDA Thrust primitives can be combined with the thrust::device
execution policy to run in parallel within a single CUDA thread exploiting CUDA dynamic parallelism. Below, an example is reported.
#include <stdio.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include "TimingGPU.cuh"
#include "Utilities.cuh"
#define BLOCKSIZE_1D 256
#define BLOCKSIZE_2D_X 32
#define BLOCKSIZE_2D_Y 32
/*************************/
/* TEST KERNEL FUNCTIONS */
/*************************/
__global__ void test1(const float * __restrict__ d_data, float * __restrict__ d_results, const int Nrows, const int Ncols) {
const unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Nrows) d_results[tid] = thrust::reduce(thrust::seq, d_data + tid * Ncols, d_data + (tid + 1) * Ncols);
}
__global__ void test2(const float * __restrict__ d_data, float * __restrict__ d_results, const int Nrows, const int Ncols) {
const unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Nrows) d_results[tid] = thrust::reduce(thrust::device, d_data + tid * Ncols, d_data + (tid + 1) * Ncols);
}
/********/
/* MAIN */
/********/
int main() {
const int Nrows = 64;
const int Ncols = 2048;
gpuErrchk(cudaFree(0));
// size_t DevQueue;
// gpuErrchk(cudaDeviceGetLimit(&DevQueue, cudaLimitDevRuntimePendingLaunchCount));
// DevQueue *= 128;
// gpuErrchk(cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, DevQueue));
float *h_data = (float *)malloc(Nrows * Ncols * sizeof(float));
float *h_results = (float *)malloc(Nrows * sizeof(float));
float *h_results1 = (float *)malloc(Nrows * sizeof(float));
float *h_results2 = (float *)malloc(Nrows * sizeof(float));
float sum = 0.f;
for (int i=0; i<Nrows; i++) {
h_results[i] = 0.f;
for (int j=0; j<Ncols; j++) {
h_data[i*Ncols+j] = i;
h_results[i] = h_results[i] + h_data[i*Ncols+j];
}
}
TimingGPU timerGPU;
float *d_data; gpuErrchk(cudaMalloc((void**)&d_data, Nrows * Ncols * sizeof(float)));
float *d_results1; gpuErrchk(cudaMalloc((void**)&d_results1, Nrows * sizeof(float)));
float *d_results2; gpuErrchk(cudaMalloc((void**)&d_results2, Nrows * sizeof(float)));
gpuErrchk(cudaMemcpy(d_data, h_data, Nrows * Ncols * sizeof(float), cudaMemcpyHostToDevice));
timerGPU.StartCounter();
test1<<<iDivUp(Nrows, BLOCKSIZE_1D), BLOCKSIZE_1D>>>(d_data, d_results1, Nrows, Ncols);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
printf("Timing approach nr. 1 = %f\n", timerGPU.GetCounter());
gpuErrchk(cudaMemcpy(h_results1, d_results1, Nrows * sizeof(float), cudaMemcpyDeviceToHost));
for (int i=0; i<Nrows; i++) {
if (h_results1[i] != h_results[i]) {
printf("Approach nr. 1; Error at i = %i; h_results1 = %f; h_results = %f", i, h_results1[i], h_results[i]);
return 0;
}
}
timerGPU.StartCounter();
test2<<<iDivUp(Nrows, BLOCKSIZE_1D), BLOCKSIZE_1D>>>(d_data, d_results1, Nrows, Ncols);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
printf("Timing approach nr. 2 = %f\n", timerGPU.GetCounter());
gpuErrchk(cudaMemcpy(h_results1, d_results1, Nrows * sizeof(float), cudaMemcpyDeviceToHost));
for (int i=0; i<Nrows; i++) {
if (h_results1[i] != h_results[i]) {
printf("Approach nr. 2; Error at i = %i; h_results1 = %f; h_results = %f", i, h_results1[i], h_results[i]);
return 0;
}
}
printf("Test passed!\n");
}
The above example performs reductions of the rows of a matrix in the same sense as Reduce matrix rows with CUDA, but it is done differently from the above post, namely, by calling CUDA Thrust primitives directly from user written kernels. Also, the above example serves to compare the performance of the same operations when done with two execution policies, namely, thrust::seq
and thrust::device
. Below, some graphs showing the difference in performance.
The performance has been evaluated on a Kepler K20c and on a Maxwell GeForce GTX 850M.