A=cuda.to_device(a)
fgriddim, blockdim
b=A.copy_to_host()
cuda{
blockDim:[x],
blockIdx:[x],
threadDim:[x],
threadIdx:[x],
gridDim:[x],
}
from numba import cuda
import numpy as np
@cuda.jit
def addGPU(A,B,C):
i=cuda.blockIdx.x
j=cuda.threadIdx.x
C[i][j]=A[i][j]+B[i][j]
@cuda.jit
def mulGPU(A,B,C,k):
i=cuda.blockIdx.x
j=cuda.threadIdx.x
C[i][j]=0
for k0 in range(k):
C[i][j]=C[i][j]+A[i][k0]*B[k0][j]
if __name__ == '__main__':
print("start")
N=3
A=np.array([
[1,0,0],
[0,2,0],
[0,0,1]
])
B=np.array([
[1,0,0],
[0,1,1],
[0,1,1]
])
d_A = cuda.to_device(A)
d_B = cuda.to_device(B)
d_C = cuda.to_device(np.zeros((N,N)))
print("A:\n",A,"\nB:\n",B)
addGPU[N, N](d_A,d_B,d_C)
C = d_C.copy_to_host()
print("add:\n",C)
mulGPU[N, N](d_A,d_B,d_C,3)
C = d_C.copy_to_host()
print("mul:\n",C)
参考:https://blog.csdn.net/sazass/article/details/108649728