Created on 2013-8-5
URL : http://blog.sina.com.cn/s/blog_a502f1a30101mjch.html
@author: zhxfl
转载请说明出处
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
__global__ void matrixMulCUDA(int *A,int *B,int * C,
dim3 dimsA,dim3 dimsB, dim3 dimsC)
{
int i = blockIdx.x;
int j = threadIdx.x; for(int k = ; k < dimsA.y; k++)
{
C[i * dimsC.y + j] += A[i * dimsA.y + k] * B[k * dimsB.y + j];
//printf("id = %d %d %d A = %d B = %d C = %d \n", i,j,k, A[i * dimsA.y + k],
// B[k * dimsB.y + j],
// C[i * dimsC.y + j]);
}
} int* matrixMultiplyByGpu(int *h_A, int n1,int m1,int *h_B,int n2,int m2)
{
int *d_A, *d_B, *d_C;
int *h_C; dim3 dimsA(n1,m1);
dim3 dimsB(n2,m2);
dim3 dimsC(n1,m2); int mem_size_A = dimsA.x * dimsA.y * sizeof(int);
int mem_size_B = dimsB.x * dimsB.y * sizeof(int);
int mem_size_C = dimsC.x * dimsC.y * sizeof(int); cudaMalloc((void**)&d_A, mem_size_A);
cudaMalloc((void**)&d_B, mem_size_B);
cudaMalloc((void**)&d_C, mem_size_C); cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); h_C = (int*)malloc(sizeof(int)*mem_size_C);
for(int i = ; i<dimsC.x * dimsC.y;i++)h_C[i] = ;
cudaMemcpy(d_C, h_C, mem_size_C, cudaMemcpyHostToDevice);
dim3 grid(dimsC.x,dimsC.y);
matrixMulCUDA<<<dimsC.x,dimsC.y>>>(d_A,d_B,d_C,dimsA,dimsB,dimsC);
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return h_C;
} int* matrixMultiplyByCpu(int *h_A, int n1,int m1,int *h_B,int n2,int m2)
{
int *h_C = new int [n1 * m2];
for(int i = ; i < n1 * m2; i++)h_C[i] = ; for(int i = ; i < n1; i ++)
{
for(int j = ; j < m2; j++)
{
for(int k = ; k < m1; k++)
{
//h_C[i][j] = h_A[i][k] * h_B[k][j];
h_C[i * m2 + j] += h_A[i * m1 + k] * h_B[k * m2 + j];
}
}
}
return h_C;
} void outPutMatrix(char c,int *g, int n,int m)
{
return;
printf("matrix %c [%3d %3d]\n", c, n, m);
for(int i = ; i < n * m;i++)
{
printf("%5d ", g[i]);
if((i + ) % m == )printf("\n");
}
} const int base = ;
const int large = ;
int main()
{
int n1 = base;
int m1 = base + ;
int n2 = m1;
int m2 = base;
int *g1 = new int[n1 * m1];
int *g2 = new int[n2 * m2];
for(int i = ; i < n1 * m1;i++)g1[i] = rand() % large;
for(int i = ; i < n2 * m2;i++)g2[i] = rand() % large;
outPutMatrix('A',g1,n1,m1);
outPutMatrix('B',g2,n2,m2);
int *gg1,*gg2; clock_t start, finish; start = clock();
gg1 = matrixMultiplyByGpu(g1,n1,m1,g2,n2,m2);
finish = clock();
printf("GPU time = %f\n",(double)(finish - start) / CLOCKS_PER_SEC); start = clock();
gg2 = matrixMultiplyByCpu(g1,n1,m1,g2,n2,m2);
finish = clock();
printf("CPU time = %f\n",(double)(finish - start) / CLOCKS_PER_SEC); printf("check---");
for(int i = ; i< n1*m2;i++)
{
if(gg1[i] != gg2[i])
{
printf("wrong ans\n");
break;
}
}
outPutMatrix('',gg1,n1,m2);
outPutMatrix('',gg2,n1,m2);
}
版本一
版本一分析:
n 约等于 maxThreadsPerBlock
这里我们的矩阵空间复杂度大概是o(n^2),两个这样矩阵的乘法复杂度大概是0(n^3),这里使用GPU优化的方案是开启n个block,每个block有n个thread。这样我们的并发量就是n^2,也就是计算复杂度大概是0(n)。
版本一测试:
n 约等于 maxThreadsPerBlock
这里请注意,你的base + 1 < min(maxThreadsPerBlock,maxGridSize[0]),不然将超过cuda的最大计算量,会导致你的计算结果错误。
根据我的机子的情况 n = 1000,运行时间如下,可以看出计算时间大概是13.87倍
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
__global__ void matrixMulCUDA(float *A,float *B,float * C,
dim3 dimsA,dim3 dimsB, dim3 dimsC)
{
int i = blockIdx.x;
int j = threadIdx.x; for(int k = ; k < dimsA.y; k++)
{
C[i * dimsC.y + j] += A[i * dimsA.y + k] * B[k * dimsB.y + j];
//printf("id = %d %d %d A = %d B = %d C = %d \n", i,j,k, A[i * dimsA.y + k],
// B[k * dimsB.y + j],
// C[i * dimsC.y + j]);
}
} float* matrixMultiplyByGpu(float *h_A, int n1,int m1,float *h_B,int n2,int m2)
{
float *d_A, *d_B, *d_C;
float *h_C; dim3 dimsA(n1,m1);
dim3 dimsB(n2,m2);
dim3 dimsC(n1,m2); int mem_size_A = dimsA.x * dimsA.y * sizeof(float);
int mem_size_B = dimsB.x * dimsB.y * sizeof(float);
int mem_size_C = dimsC.x * dimsC.y * sizeof(float); cudaMalloc((void**)&d_A, mem_size_A);
cudaMalloc((void**)&d_B, mem_size_B);
cudaMalloc((void**)&d_C, mem_size_C); cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); h_C = (float*)malloc(sizeof(float)*mem_size_C);
for(int i = ; i<dimsC.x * dimsC.y;i++)h_C[i] = ;
cudaMemcpy(d_C, h_C, mem_size_C, cudaMemcpyHostToDevice);
dim3 grid(dimsC.x,dimsC.y);
matrixMulCUDA<<<dimsC.x,dimsC.y>>>(d_A,d_B,d_C,dimsA,dimsB,dimsC);
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return h_C;
} float* matrixMultiplyByCpu(float *h_A, int n1,int m1,float *h_B,int n2,int m2)
{
float *h_C = new float [n1 * m2];
for(int i = ; i < n1 * m2; i++)h_C[i] = ; for(int i = ; i < n1; i ++)
{
for(int j = ; j < m2; j++)
{
for(int k = ; k < m1; k++)
{
//h_C[i][j] = h_A[i][k] * h_B[k][j];
h_C[i * m2 + j] += h_A[i * m1 + k] * h_B[k * m2 + j];
}
}
}
return h_C;
} void outPutMatrix(char c,float *g, int n,int m)
{
return;
printf("matrix %c [%3d %3d]\n", c, n, m);
for(int i = ; i < n * m;i++)
{
printf("%5f ", g[i]);
if((i + ) % m == )printf("\n");
}
} const int base = ;
const int large = ;
int main()
{
int n1 = base;
int m1 = base + ;
int n2 = m1;
int m2 = base;
float *g1 = new float[n1 * m1];
float *g2 = new float[n2 * m2];
for(int i = ; i < n1 * m1;i++)g1[i] = rand() % large + 1.0f / 3.0f;
for(int i = ; i < n2 * m2;i++)g2[i] = rand() % large + 1.0f / 3.0f;
outPutMatrix('A',g1,n1,m1);
outPutMatrix('B',g2,n2,m2);
float *gg1,*gg2; clock_t start, finish; start = clock();
gg1 = matrixMultiplyByGpu(g1,n1,m1,g2,n2,m2);
finish = clock();
printf("GPU time = %f\n",(double)(finish - start) / CLOCKS_PER_SEC); start = clock();
gg2 = matrixMultiplyByCpu(g1,n1,m1,g2,n2,m2);
finish = clock();
printf("CPU time = %f\n",(double)(finish - start) / CLOCKS_PER_SEC); printf("check---");
for(int i = ; i< n1*m2;i++)
{
if(fabs(gg1[i] - gg2[i]) > 0.01)
{
printf("%f\n %f\nwrong ans\n",gg1[i],gg2[i]);
break;
}
}
outPutMatrix('',gg1,n1,m2);
outPutMatrix('',gg2,n1,m2);
}
版本二
版本一分析:
在版本一的基础上改成float运算
版本一测试:
结果如下,没有太大区别,本来预期是GPU的浮点计算能力会比CPU好很多的,但这里看来,并没有很明显的区别。