2013-03-26 125 views
0

我正在尝试编写一个简单的矩阵复制程序,该程序不断向两个矩阵的第三个结果矩阵中添加乘积(我本质上是在测量GPU时进行锻炼用单独设备的功耗)。CUDA矩阵乘法锁定并显示零点矩阵

当我指定大量的迭代时,会出现我的问题。我已经用BLOCK_SIZE和矩阵维度值的几种组合尝试了这一点,并且我已经注意到迭代次数可以随着较小矩阵维数增加,但BLOCK_SIZE必须是矩阵维(平方矩阵)的平方根。

在这种情况下产生的误差是39秒(无论迭代值是多少,只要它'太多')冻结,然后是全零矩阵输出。有趣的是,我用20000次的迭代运行了一次,它运行良好。我再次运行它并得到了冻结错误。

任何想法?提前致谢!

内核:

//******************************************************************** 
// matrixMultiplication_kernel.cu 
// 
// Kernel for a basic CUDA matrix multiplication program. 
//******************************************************************** 

#ifndef MATRIXMULTIPLICATION_KERNEL 
#define MATRIXMULTIPLICATION_KERNEL 

#define BLOCK_SIZE 16 // Set thread block size 
#define colsA 256  // Set matrix A column dimension 
#define rowsA 256  // Set matrix A row dimension 
#define colsB 256  // Set matrix B column dimension 
#define rowsB colsA // Set matrix B row dimension 
#define colsC colsB // Set matrix C column dimension 
#define rowsC rowsA // Set matrix C row dimension 

//-------------------------------------------------------------------- 
// matrixMultiplication() - Multiplies matrixA and matrixB, storing 
//       the result in device memory for matrixC. 
// 
// PRE: matrixA, matrixB, and matrixC are float pointers; numColsA 
//  numColsB are integers. 
// POST: The result of multiplying matrixA and matrixB is stored in 
//  matrixC. 
//-------------------------------------------------------------------- 
__global__ void matrixMultiplication(float * matrixA, float * matrixB, 
        float * matrixC, int numColsA, 
        int numColsB) { 

    /* Declare matrix-multplication holder value ouside of for loop */ 
    float val; 

    /* Set block and thread index positions */ 
    int blockX = blockIdx.x; 
    int blockY = blockIdx.y; 
    int threadX = threadIdx.x; 
    int threadY = threadIdx.y; 

    /* 
    Set starting and ending indices of the first sub-matrix of A 
    and sub-matrix size for matrix A 
    */ 
    int startA = numColsA * BLOCK_SIZE * blockY; 
    int endA = startA + numColsA - 1; 
    int subSizeA = BLOCK_SIZE; 

    /* 
    Set starting index of the first sub-matrix of B and sub-matrix 
    size for matrix B 
    */ 
    int startB = BLOCK_SIZE * blockX; 
    int subSizeB = BLOCK_SIZE * colsB; 

    /* Perform matrix multiplication 20000 times */ 
    for (int iteration = 0; iteration < 20000; iteration++) { 

     /* Loop through matrix A and matrix B's sub-matrices */ 
     for (int i = startA, j = startB; i <= endA; i += subSizeA, 
      j += subSizeB) { 

     /* 
      Declare shared memory arrays for matrix A and B 
      sub-matrices 
     */ 
     __shared__ float subA[BLOCK_SIZE][BLOCK_SIZE]; 
     __shared__ float subB[BLOCK_SIZE][BLOCK_SIZE]; 

     /* Fill sub-matrices */ 
     subA[threadY][threadX] = 
      matrixA[i + colsA * threadY + threadX]; 
     subB[threadY][threadX] = 
      matrixB[j + colsB * threadY + threadX]; 

     /* Ensure that the matrices are loaded */ 
     __syncthreads(); 

     /* Loop through the block */ 
     for (int k = 0; k < BLOCK_SIZE; ++k) { 

      /* Compute product of two matrix indices */ 
      val += subA[threadY][k] * subB[k][threadX]; 
     } 

     /* 
      Ensure completion before the next set of sub-matrices 
      begin computation 
     */ 
     __syncthreads(); 
    } 

    /* Set device memory for this sub-matrix */ 
    int position = colsB * BLOCK_SIZE * blockY + BLOCK_SIZE * blockX; 
    matrixC[position + colsB * threadY + threadX] = val; 
    } 
} 

#endif 

主持人:

//******************************************************************** 
// matrixMultiplication.cu 
// 
// A basic CUDA matrix multiplication program. 
//******************************************************************** 

/* Include necessary libraries and kernel */ 
#include <stdlib.h> 
#include <stdio.h> 
#include <math.h> 
#include <matrixMultiplication_kernel.cu> 

/* Function declarations */ 
void fillMatrix(float * matrix, int numIndices); 

//************* 
// Main Program 
//************* 
int main(int argc, char** argv) { 

    /* Declare device memory */ 
    float * deviceA; 
    float * deviceB; 
    float * deviceC; 

    srand(2013); // Set random seed 

    /* Determine total number of indices in each matrix */ 
    unsigned int numIndicesA = colsA * rowsA; 
    unsigned int numIndicesB = colsB * rowsB; 
    unsigned int numIndicesC = colsC * rowsC; 

    /* Determine memory size of each matrix */ 
    unsigned int memoryA = sizeof(float) * numIndicesA; 
    unsigned int memoryB = sizeof(float) * numIndicesB; 
    unsigned int memoryC = sizeof(float) * numIndicesC; 

    /* Allocate memory for each matrix */ 
    float * matrixA = (float *) malloc(memoryA); 
    float * matrixB = (float *) malloc(memoryB); 
    float * matrixC = (float *) malloc(memoryC); 

    /* Set contents of matrices A and B (matrix C is all zeros) */ 
    fillMatrix(matrixA, numIndicesA); 
    fillMatrix(matrixB, numIndicesB); 

    /* Allocate device memory for each matrix */ 
    cudaMalloc((void **) &deviceA, memoryA); 
    cudaMalloc((void **) &deviceB, memoryB); 
    cudaMalloc((void **) &deviceC, memoryC); 

    /* Copy host memory to device memory for matrices A and B */ 
    cudaMemcpy(deviceA, matrixA, memoryA, cudaMemcpyHostToDevice); 
    cudaMemcpy(deviceB, matrixB, memoryB, cudaMemcpyHostToDevice); 

    /* Set thread count to BLOCK_SIZE x BLOCK_SIZE */ 
    dim3 tCount(BLOCK_SIZE, BLOCK_SIZE); 

    /* Set thread block count */ 
    dim3 tbCount((colsC/tCount.x), (rowsC/tCount.y)); 

    /* Run kernel */ 
    matrixMultiplication <<< tbCount, tCount >>> (deviceA, deviceB, 
          deviceC, colsA, 
          colsB); 

    /* Copy device memory to host memory for matrix C */ 
    cudaMemcpy(matrixC, deviceC, memoryC, cudaMemcpyDeviceToHost); 

    for(int i = 0; i < 256; i++) { 
     printf("%f ", matrixC[i]); 
    } 
    printf("\n"); 

    /* Free up host and device memory for each matrix */ 
    free(matrixA); 
    free(matrixB); 
    free(matrixC); 
    cudaFree(deviceA); 
    cudaFree(deviceB); 
    cudaFree(deviceC); 
} 

//-------------------------------------------------------------------- 
// fillMatrix - Assigns a random float value to each indice of the 
//    matrix. 
// 
// PRE: matrix is a pointer to a block of bytes in memory; numIndices 
//  is the number of indicies in the matrix being instantiated. 
// POST: Each index of the matrix has been filled with random float 
//  values. 
//-------------------------------------------------------------------- 
void fillMatrix(float * matrix, int numIndices) { 

    /* Loop through each index of the matrix */ 
    for (int i = 0; i < numIndices; ++i) { 

    /* 
     Assign a random float between 0 and 1 for this index of 
     the matrix 
    */ 
    matrix[i] = rand()/(float)RAND_MAX; 
    } 
} 

的Makefile:

GCC = nvcc 
CUDA_INSTALL_PATH := /usr/local/cuda 
INCLUDES := -I. -I$(CUDA_INSTALL_PATH)/include 
CUDA_LIBS := -L$(CUDA_INSTALL_PATH)/lib -lcudart 

matrixMultiplication.o:  matrixMultiplication.cu 
        $(GCC) $(INCLUDES) -c matrixMultiplication.cu -o [email protected] 

matrixMultiplication:  matrixMultiplication.o 
     $(GCC) -o [email protected] matrixMultiplication.o $(CUDA_LIBS) 

clean: 
     $(RM) *.o *~ 
+0

冷却不足?由于您已经连接了功率计,因此您可以轻松验证方矩阵的情况是否是功耗最高的情况。 – tera 2013-03-26 01:09:45

+0

我实际上比较了CUDA和OpenCL,矩阵乘法只是它们之间的通用程序。 – 2013-03-26 01:13:08

+0

您是否正在专用GPU上进行计算?如果不是,则可能是系统超时,因为显示驱动程序没有响应。 (在Windows上,超时时间仅为2秒) – 2013-03-26 01:23:01

回答

1

问题解决了!由于内核持续时间长,这是系统超时问题。通过切换终端模式,我能够绕过这个问题。

感谢所有的帮助家伙!