前面我们实现了向量的加法,今天我们实现复杂一些的运算,矩阵的加法,即将矩阵对应位置上的元素进行相加,相当于向量加法的升级版本。不过需要注意的是,malloc时需要分配二维矩阵,这样才能使用A[i][j];
CPU实现
CPP实现起来的注意点在于二维数组的开辟,通过给二维数组的每一个指针赋值实现二维数据的访问,具体算法两层循环即可。
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
#include <math.h>
const int ROWS=1024;
const int COLS=1024;
using namespace std;
int main()
{
struct timeval start, end;
gettimeofday( &start, NULL );
int *A, **A_ptr, *B, **B_ptr, *C, **C_ptr;
int total_size = ROWS*COLS*sizeof(int);
A = (int*)malloc(total_size);
B = (int*)malloc(total_size);
C = (int*)malloc(total_size);
A_ptr = (int**)malloc(ROWS*sizeof(int*));
B_ptr = (int**)malloc(ROWS*sizeof(int*));
C_ptr = (int**)malloc(ROWS*sizeof(int*));
//CPU一维数组初始化
for(int i=0;i<ROWS*COLS;i++)
{
A[i] = 80;
B[i] = 20;
}
for(int i=0;i<ROWS;i++)
{
A_ptr[i] = A + COLS*i;
B_ptr[i] = B + COLS*i;
C_ptr[i] = C + COLS*i;
}
for(int i=0;i<ROWS;i++)
for(int j=0;j<COLS;j++)
{
C_ptr[i][j] = A_ptr[i][j] + B_ptr[i][j];
}
//检查结果
int max_error = 0;
for(int i=0;i<ROWS*COLS;i++)
{
//cout << C[i] << endl;
max_error += abs(100-C[i]);
}
cout << "max_error is " << max_error <<endl;
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time is " << timeuse/1000 << "ms" <<endl;
return 0;
}
运行结果
运行时间为19ms,对于1024*1024的矩阵,这已经足够快。
CUDA版本
CUDA版本与CPU版本基本类似,在核函数中则基本与向量的加法基本类似,只不过一维数据变成了二维。
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#define Row 1024
#define Col 1024
__global__
void addKernel(int **C, int **A, int ** B)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
if (idx < Col && idy < Row) {
C[idy][idx] = A[idy][idx] + B[idy][idx];
}
}
int main()
{
struct timeval start, end;
gettimeofday( &start, NULL );
int **A = (int **)malloc(sizeof(int*) * Row);
int **B = (int **)malloc(sizeof(int*) * Row);
int **C = (int **)malloc(sizeof(int*) * Row);
int *dataA = (int *)malloc(sizeof(int) * Row * Col);
int *dataB = (int *)malloc(sizeof(int) * Row * Col);
int *dataC = (int *)malloc(sizeof(int) * Row * Col);
int **d_A;
int **d_B;
int **d_C;
int *d_dataA;
int *d_dataB;
int *d_dataC;
//malloc device memory
cudaMalloc((void**)&d_A, sizeof(int **) * Row);
cudaMalloc((void**)&d_B, sizeof(int **) * Row);
cudaMalloc((void**)&d_C, sizeof(int **) * Row);
cudaMalloc((void**)&d_dataA, sizeof(int) *Row*Col);
cudaMalloc((void**)&d_dataB, sizeof(int) *Row*Col);
cudaMalloc((void**)&d_dataC, sizeof(int) *Row*Col);
//set value
for (int i = 0; i < Row*Col; i++) {
dataA[i] = 90;
dataB[i] = 10;
}
//将主机指针A指向设备数据位置,目的是让设备二级指针能够指向设备数据一级指针
//A 和 dataA 都传到了设备上,但是二者还没有建立对应关系
for (int i = 0; i < Row; i++) {
A[i] = d_dataA + Col * i;
B[i] = d_dataB + Col * i;
C[i] = d_dataC + Col * i;
}
cudaMemcpy(d_A, A, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_dataA, dataA, sizeof(int) * Row * Col, cudaMemcpyHostToDevice);
cudaMemcpy(d_dataB, dataB, sizeof(int) * Row * Col, cudaMemcpyHostToDevice);
dim3 threadPerBlock(16, 16);
dim3 blockNumber( (Col + threadPerBlock.x - 1)/ threadPerBlock.x, (Row + threadPerBlock.y - 1) / threadPerBlock.y );
printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y);
addKernel << <blockNumber, threadPerBlock >> > (d_C, d_A, d_B);
//拷贝计算数据-一级数据指针
cudaMemcpy(dataC, d_dataC, sizeof(int) * Row * Col, cudaMemcpyDeviceToHost);
int max_error = 0;
for(int i=0;i<Row*Col;i++)
{
//printf("%d\n", dataC[i]);
max_error += abs(100-dataC[i]);
}
//释放内存
free(A);
free(B);
free(C);
free(dataA);
free(dataB);
free(dataC);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_dataA);
cudaFree(d_dataB);
cudaFree(d_dataC);
printf("max_error is %d\n", max_error);
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("total time is %d ms\n", timeuse/1000);
return 0;
}
这里需要注意的是,dim3 threadPerBlock(16, 16)这里采用了二维的线程,那么对应的threadIdx也为二维的。
dim3则为英伟达内置的三维数据类型,即英伟达认为每个grid,或者是thread都应当是三维的,尽管有些维度还未实现。
运行结果
运行结果依然比CPU版本慢,原因还是核函数过于简单,以至于线程调度占据了更多的时间。