【cuda学习日记】3.2 CUDA执行模型--并行归约问题
#3.2.1 资源分配
网格和线程块大小的准则使用这些准则可以使应用程序适用于当前和将来的设备:
·保持每个块中线程数量是线程束大小(32)的倍数
·避免块太小:每个块至少要有128或256个线程
·根据内核资源的需求调整块大小
·块的数量要远远多于SM的数量,从而在设备中可以显示有足够的并行
·通过实验得到最佳执行配置和资源使用情况
串行的方法实现N个元素整数数组求和,需要N-1次求和, 需要N-1步
int sum = 0;
for (int i = 0 ; i < N; i ++){
sum += array[i];
}
并行加法运算:
.将输入向量划分到更小的数据块中。
2.用一个线程计算一个数据块的部分和。
3.对每个数据块的部分和再求和得出最终结果。
常用方法是使用迭代成对实现。一个数据块只包含一对元素。这种实现方式需要N-1次求和,进行log2 N步. 例子:
step1: 1+2,3+4, 5+6, 7+8;
step2: 3+7 , 11 + 15;
step3: 10 + 26
__syncthreads语句可以保证,线程块中的任一线程在进入下一次迭代之前,在当前迭代里每个线程的所有部分和都被保存在了全局内存中
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <windows.h>
#include "../common/common.h"
int recursiveReduce(int *data, int const size){
if (size == 1) return data[0];
int const stride = size /2;
for (int i = 0; i < stride; i ++){
data[i] += data[i + stride];
}
return recursiveReduce( data, stride);
}
__global__ void warmup( int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return; // boundary check
for(int stride=1;stride<blockDim.x;stride*=2){
if ((tid % (2 * stride)) == 0){
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
if (tid == 0){
g_odata[blockIdx.x] = idata[0];
}
}
//g input data, g output data
__global__ void reduceNeighbored( int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
int * idata = g_idata + blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > n) return; // boundary check
for (int stride = 1; stride < blockDim.x; stride *= 2){
if ((tid % (2 * stride)) == 0){
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
if (tid == 0){ g_odata[blockIdx.x] = idata[0];}
}
int main(int argc , char **argv)
{
printf("%s starting\n", argv[0]);
int dev = 0;
cudaDeviceProp deviceprop;
CHECK(cudaGetDeviceProperties(&deviceprop,dev));
printf("Using Device %d : %s\n", dev, deviceprop.name);
int size = 1 << 24;
int blocksize = 512;
if (argc > 1){
blocksize = atoi(argv[1]);
}
dim3 block(blocksize, 1); // 1d
dim3 grid ((size + block.x - 1) / block.x, 1);
size_t nBytes = size * sizeof(int);
int * h_idata = (int*) malloc(nBytes);
int * h_odata = (int*) malloc( grid.x * sizeof(int)); //you duoshao ge block
int * temp = (int*) malloc(nBytes);
//initial the array
for (int i = 0 ; i < size;i++){
h_idata[i] = (int)(rand() & 0xff);
}
int sum = 0;
for (int i = 0 ; i < size;i++){
sum += h_idata[i];
}
printf("sum value is : %d\n", sum);
memcpy(temp, h_idata, nBytes);
int gpu_sum = 0;
int *d_idata = NULL;
int *d_odata = NULL;
cudaMalloc((void**)&d_idata, nBytes);
cudaMalloc((void**)&d_odata, grid.x * sizeof(int));
//cpu sum
Timer timer;
timer.start();
int cpu_sum = recursiveReduce(temp, size);
timer.stop();
float elapsedTime = timer.elapsedms();
printf("cpu reduce time: %f, sum: %d\n", elapsedTime, cpu_sum);
//gpu sum
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
timer.start();
warmup<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
timer.stop();
float elapsedTime1 = timer.elapsedms();
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for (int i = 0; i < grid.x; i ++){
gpu_sum += h_odata[i];
}
printf("warm up reduce time: %f, sum: %d\n", elapsedTime1, gpu_sum);
//gpu sum
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
timer.start();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
timer.stop();
float elapsedTime2 = timer.elapsedms();
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for (int i = 0; i < grid.x; i ++){
gpu_sum += h_odata[i];
}
printf("gpu reduce time: %f, sum: %d, gird ,block (%d %d)\n", elapsedTime2, gpu_sum, grid.x, block.x);
cudaFree(d_idata);
cudaFree(d_odata);
cudaDeviceReset();
free(h_idata);
free(h_odata);
free(temp);
return 0;
}
输出结果:
sum value is : 2139095040
cpu reduce time: 11.190496, sum: 2139095040
warm up reduce time: 0.555072, sum: 2139095040
gpu reduce time: 0.228064, sum: 2139095040, gird ,block (32768 512)
#3.2.2 改善并行归约的分化
核函数中 if ((tid % (2 * stride)) == 0) 因为上述语句只对偶数ID的线程为true,所以这会导致很高的线程束分化。比如:
在step1时,线程ID (0,2,4,6…)是条件的主体,但是所有线程都必须被调度。
在step2时,线程ID(0,4…)是主体。
在之前的代码中添加核函数, 并调用:
__global__ void reduceNeighboredLess( int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
int * idata = g_idata + blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > n) return; // boundary check
for (int stride = 1; stride < blockDim.x; stride *= 2){
int index = 2* stride * tid;
if (index < blockDim.x){
idata[index] += idata[index + stride];
}
__syncthreads();
}
if (tid == 0){ g_odata[blockIdx.x] = idata[0];}
}
输出结果:
sum value is : 2139095040
cpu reduce time: 11.256832, sum: 2139095040
warm up reduce time: 11.377376, sum: 2139095040
reduceNeighbored gpu reduce time: 0.233728, sum: 2139095040, gird ,block (32768 512)
reduceNeighboredLess gpu reduce time: 0.175616, sum: 2139095040, gird ,block (32768 512)
快了1.33倍。
内核中的语句
int index = 2* stride * tid;
if (index < blockDim.x)
对于512个线程的block来说:
step1: 前256个线程(8x32, 即8个线程束)执行第一轮的规约,剩下8个线程束什么也不做
step2:前128线程(4个线程束执行)
step3:前64
因此不存在线程束分化。最后5轮中,总的线程总数小于线程束的大小,才会出现分化。
3.2.3 交错配对的归约
__global__ void reduceInterleaved( int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
int * idata = g_idata + blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > n) return; // boundary check
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
if (tid == 0){ g_odata[blockIdx.x] = idata[0];}
}
cpu reduce time: 11.505888, sum: 2139095040
warm up reduce time: 0.512352, sum: 2139095040
reduceNeighbored gpu reduce time: 0.323360, sum: 2139095040, gird ,block (32768 512)
reduceNeighboredLess gpu reduce time: 0.177888, sum: 2139095040, gird ,block (32768 512)
reduceInterleaved gpu reduce time: 0.116544, sum: 2139095040, gird ,block (32768 512)
与相邻配对相比,交错配对改变了元素的跨度。例:
数组 1~8, 相邻配对是 1+2, 3+ 4, 5+6, 7+8; 交错配对时1+5,2+6,3+7,4+8;
与reduceNeighboredLess 相比工作线程没有变化,但是每个线程在全局内存中加载/存储的位置是不同的。