▶ 本章介绍了多设备胸膛下的 CUDA 编程,以及一些特殊存储类型对计算速度的影响
● 显存和零拷贝内存的拷贝与计算对比
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "D:\Code\CUDA\book\common\book.h" #define imin(a,b) (a<b?a:b)
#define SIZE (33 * 1024 * 1024) const int threadsPerBlock = ;
const int blocksPerGrid = imin(, (SIZE + threadsPerBlock - ) / threadsPerBlock); __global__ void dot(int size, float *a, float *b, float *c)//分段计算点积写入全局内存
{
__shared__ float share[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x; float temp = ;
while (tid < size)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
} share[cacheIndex] = temp;
__syncthreads(); int i = blockDim.x / ;
while (i != )
{
if (cacheIndex < i)
share[cacheIndex] += share[cacheIndex + i];
__syncthreads();
i /= ;
}
if (cacheIndex == )
c[blockIdx.x] = share[]; return;
} void malloc_test()// 利用显存进行计算
{
cudaEvent_t start, stop;
float *a, *b, *partial_c, c;
float *dev_a, *dev_b, *dev_partial_c;
float elapsedTime; cudaEventCreate(&start);
cudaEventCreate(&stop); a = (float*)malloc(SIZE * sizeof(float));
b = (float*)malloc(SIZE * sizeof(float));
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
cudaMalloc((void**)&dev_a,SIZE * sizeof(float));
cudaMalloc((void**)&dev_b,SIZE * sizeof(float));
cudaMalloc((void**)&dev_partial_c,blocksPerGrid * sizeof(float)); for (int i = ; i<SIZE; i++)
{
a[i] = i;
b[i] = i * ;
} cudaEventRecord(start, ); cudaMemcpy(dev_a, a, SIZE * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, SIZE * sizeof(float),cudaMemcpyHostToDevice); dot << <blocksPerGrid, threadsPerBlock >> >(SIZE, dev_a, dev_b,dev_partial_c); cudaMemcpy(partial_c, dev_partial_c,blocksPerGrid * sizeof(float),cudaMemcpyDeviceToHost); cudaEventRecord(stop, );
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start, stop); c = ;
for (int i = ; i < blocksPerGrid; c += partial_c[i], i++); free(a);
free(b);
free(partial_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
cudaEventDestroy(start);
cudaEventDestroy(stop); printf("cudaMalloc, time:\t%3.1f ms,value:\t%f\n", elapsedTime, c);
return;
} void cuda_host_alloc_test()// 利用零拷贝内存进行计算
{
cudaEvent_t start, stop;
float *a, *b, *partial_c, c;
float *dev_a, *dev_b, *dev_partial_c;
float elapsedTime; cudaEventCreate(&start);
cudaEventCreate(&stop); cudaHostAlloc((void**)&a,SIZE * sizeof(float),cudaHostAllocWriteCombined |cudaHostAllocMapped);
cudaHostAlloc((void**)&b,SIZE * sizeof(float),cudaHostAllocWriteCombined |cudaHostAllocMapped);
cudaHostAlloc((void**)&partial_c,blocksPerGrid * sizeof(float),cudaHostAllocMapped);
cudaHostGetDevicePointer(&dev_a, a, );
cudaHostGetDevicePointer(&dev_b, b, );
cudaHostGetDevicePointer(&dev_partial_c,partial_c, ); for (int i = ; i < SIZE; i++)
{
a[i] = i;
b[i] = i * ;
} cudaEventRecord(start, ); dot << <blocksPerGrid, threadsPerBlock >> > (SIZE, dev_a, dev_b, dev_partial_c);
cudaThreadSynchronize(); cudaEventRecord(stop, );
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start, stop); c = ;
for (int i = ; i < blocksPerGrid; c += partial_c[i], i++); cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(partial_c);
cudaEventDestroy(start);
cudaEventDestroy(stop); printf("cudaHostAlloc, time:\t%3.1f ms,value:\t%f\n", elapsedTime, c);
return;
} int main(void)
{
cudaSetDeviceFlags(cudaDeviceMapHost); malloc_test();
cuda_host_alloc_test(); getchar();
return;
}
● 程序输出结果,第一行是利用显存进行计算的结果,第二行是利用零拷贝内存进行计算的结果。
● 零拷贝内存的使用过程:
float *a, *dev_a;
cudaSetDeviceFlags(cudaDeviceMapHost);// 设置内存标志,表明希望映射主机内存
cudaHostAlloc((void**)&a,SIZE * sizeof(float),cudaHostAllocWriteCombined |cudaHostAllocMapped);
//标记flag,分别是“合并式写入”和“GPU可访问”
cudaHostGetDevicePointer(&dev_a, a, );// 将内存地址映射到GPU上,以后就可以使用dev_a[n]等下标方式访问dev_a foo << <blocksize, threadsize >> > (dev_a); cudaThreadSynchronize();
cudaFreeHost(a);//释放内存
● 合并式写入不会改变应用程序的性能,却可以显著提升GPU读取内存的性能。但是如果CPU也需要读取这部分的内存时,效率较低.
●使用零拷贝内存见减小了PCIE通道的读写延迟,提高了数据访问速率。但是GPU不会缓存零拷贝内存的内容,对于需要多次读写的内存数据效率较低,不如直接在显存中存储。本例子中点积运算满足数据只访问一次的条件,所以效率提高很明显。
● 多设备并行计算(被改成了双线程)
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "D:\Code\CUDA\book\common\book.h" #define imin(a,b) (a<b?a:b)
#define SIZE (33*1024*1024)
#define THREADSIZE (256)
#define BLOCKSIZE (imin(32, (SIZE / 2 + THREADSIZE - 1) / THREADSIZE)) struct DataStruct
{
int deviceID;
int size;
float *a;
float *b;
float returnValue;
}; __global__ void dot(int size, float *a, float *b, float *c)//分段计算点积写入全局内存
{
__shared__ float share[THREADSIZE];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x; float temp = ;
while (tid < size)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
} share[cacheIndex] = temp;
__syncthreads(); int i = blockDim.x / ;
while (i != )
{
if (cacheIndex < i)
share[cacheIndex] += share[cacheIndex + i];
__syncthreads();
i /= ;
}
if (cacheIndex == )
c[blockIdx.x] = share[]; return;
} void* routine(void *pvoidData)// C回调函数标准,void*型函数,传入void型的指针,再转化回原来的的格式
{
DataStruct *data = (DataStruct*)pvoidData;
cudaSetDevice(data->deviceID); int i;
float *partial_c, c;
float *dev_a, *dev_b, *dev_partial_c; partial_c = (float*)malloc(BLOCKSIZE * sizeof(float));
cudaMalloc((void**)&dev_a, data->size * sizeof(float));
cudaMalloc((void**)&dev_b, data->size * sizeof(float));
cudaMalloc((void**)&dev_partial_c, BLOCKSIZE * sizeof(float)); cudaMemcpy(dev_a, data->a, data->size * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, data->b, data->size * sizeof(float),cudaMemcpyHostToDevice); dot << <BLOCKSIZE, THREADSIZE >> > (data->size, dev_a, dev_b, dev_partial_c); cudaMemcpy(partial_c, dev_partial_c, BLOCKSIZE * sizeof(float),cudaMemcpyDeviceToHost); for (i = ,c=; i < BLOCKSIZE; c += partial_c[i], i++); free(partial_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c); data->returnValue = c;
printf("\n\troutine finished!");
return NULL;
} int main(void)
{
float *a = (float*)malloc(sizeof(float) * SIZE);//公用的输入数组
float *b = (float*)malloc(sizeof(float) * SIZE); for (int i = ; i < SIZE; i++)
{
a[i] = i;
b[i] = i * ;
} DataStruct data[];
data[].deviceID = ;
data[].size = SIZE / ;
data[].a = a;
data[].b = b;
data[].deviceID = ;//源代码中这里等于1,使用第二台设备进行计算
data[].size = SIZE / ;
data[].a = a + SIZE / ;
data[].b = b + SIZE / ; // 使用CreateThread()创建新线程来分配计算
HANDLE thread = CreateThread(NULL, , (PTHREAD_START_ROUTINE)routine, &(data[]), , NULL);
routine(&(data[])); WaitForSingleObject(thread, INFINITE);// 等待和关闭线程
CloseHandle(thread); free(a);
free(b); printf("\n\tValue calculated: %f\n",data[].returnValue + data[].returnValue);
getchar();
return ;
}
● 关于创建和销毁线程:
HANDLE thread = CreateThread(NULL, , (PTHREAD_START_ROUTINE)routine, &(data[]), , NULL);
WaitForSingleObject(thread, INFINITE);
CloseHandle(thread); static HANDLE CreateThread(
LPSECURITY_ATTRIBUTES lpsa,//
DWORD dwStackSize,// 堆栈大小
LPTHREAD_START_ROUTINE pfnThreadProc,// 线程过程
void* pvParam,// 需要传递的线程参数
DWORD dwCreationFlags,// 创建标志,0或CREATE_SUSPENDED
DWORD* pdwThreadId// 接收新线程的线程ID_WORD变量地址
) throw();// 函数不会抛出异常 WaitForSingleObject(thread, INFINITE);
// 第1个参数为线程句柄
// 第2个参数为最小等待时间,可取INFINITE HANDLE list[N];
WaitForMultipleObjects(N, list, , );
// 第1个参数为线程个数
// 第2个参数为句柄列表
// 第3个参数为等待状态,0表示等待所有线程结束再继续,1表示一旦有线程结束或到达等待时间就继续
// 第4个参数为最小等待时间
● 使用可移动内存进行多设备并行计算(被改成了双线程)
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "D:\Code\CUDA\book\common\book.h" #define imin(a,b) (a<b?a:b)
#define SIZE (33*1024*1024)
#define THREADSIZE (256)
#define BLOCKSIZE (imin(32, (SIZE / 2 + THREADSIZE - 1) / THREADSIZE)) struct DataStruct
{
int deviceID;
int size;
float *a;
float *b;
float returnValue;
}; __global__ void dot(int size, float *a, float *b, float *c)//分段计算点积写入全局内存
{
__shared__ float share[THREADSIZE];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x; float temp = ;
while (tid < size)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
} share[cacheIndex] = temp;
__syncthreads(); int i = blockDim.x / ;
while (i != )
{
if (cacheIndex < i)
share[cacheIndex] += share[cacheIndex + i];
__syncthreads();
i /= ;
}
if (cacheIndex == )
c[blockIdx.x] = share[]; return;
} void* routine(void *pvoidData)
{
DataStruct *data = (DataStruct*)pvoidData;
cudaSetDevice(data->deviceID);
if (data->deviceID != )// 若使用新设备,则要设置内存地址映射(在我的电脑上这部分不会被运行)
cudaSetDeviceFlags(cudaDeviceMapHost); int i;
float *partial_c, c;
float *dev_a, *dev_b, *dev_partial_c; partial_c = (float*)malloc(BLOCKSIZE * sizeof(float));
cudaHostGetDevicePointer(&dev_a, data->a, );
cudaHostGetDevicePointer(&dev_b, data->b, );
cudaMalloc((void**)&dev_partial_c, BLOCKSIZE * sizeof(float)); dot << <BLOCKSIZE, THREADSIZE >> > (data->size, dev_a, dev_b, dev_partial_c); cudaMemcpy(partial_c, dev_partial_c, BLOCKSIZE * sizeof(float), cudaMemcpyDeviceToHost); for (i = , c = ; i < BLOCKSIZE; c += partial_c[i], i++); free(partial_c);
cudaFree(dev_partial_c); data->returnValue = c;
printf("\n\tRoutine finished!");
return ;
} int main(void)
{
float *a, *b;
cudaSetDevice();
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc((void**)&a, SIZE * sizeof(float),cudaHostAllocWriteCombined |cudaHostAllocPortable |cudaHostAllocMapped);
cudaHostAlloc((void**)&b, SIZE * sizeof(float),cudaHostAllocWriteCombined |cudaHostAllocPortable |cudaHostAllocMapped); for (int i = ; i < SIZE; i++)
{
a[i] = i;
b[i] = i * ;
} DataStruct data[];
data[].deviceID = ;
data[].size = SIZE / ;
data[].a = a;
data[].b = b;
data[].deviceID = ;
data[].size = SIZE / ;
data[].a = a + SIZE / ;
data[].b = b + SIZE / ; HANDLE thread = CreateThread(NULL, , (PTHREAD_START_ROUTINE)routine, &(data[]), , NULL);
routine(&(data[]));
WaitForSingleObject(thread, INFINITE);
CloseHandle(thread); cudaFreeHost(a);
cudaFreeHost(b); printf("\n\tValue calculated: %f\n",data[].returnValue + data[].returnValue);
getchar();
return ;
}
● 可移动内存说明:“固定内存”是针对线程而言的,也就是说除了申请该固定内存的县城以外其他线程不能将其看作固定内存,而是可分页内存。这时需要添加固定内存的“可移动”属性,使得所有线程都将该段内存看做固定内存。
● 可移动内存的使用
float *a;
cudaSetDeviceFlags(cudaDeviceMapHost);// 设置内存标志,表明希望映射主机内存
cudaHostAlloc((void**)&a, SIZE * sizeof(float), cudaHostAllocWriteCombined | cudaHostAllocPortable | cudaHostAllocMapped);
// 增加标记“可移动内存” cudaSetDeviceFlags(cudaDeviceMapHost);
// 在其他设备中使用这部分内存时需要再次声明内存标志 cudaFreeHost(a);