2016-03-06 74 views
0

我在内核代码中挣扎。 我已经更新了这个包含支持文件,但提供了这些文件,并且应该是正确的。2D卷积不正确的结果Cuda常量内存

这是我的第一个GPU程序,我花了几个小时尝试新的东西,我似乎无法得到这个权利。它正在编译和运行,但结果不正确。

我基本上很难理解我需要做什么不同,因为这个内核给出了不正确的结果。我试图加载输入图像的瓷砖到共享内存(Ns [] [],我认为我已经做得正确),并在输入图像瓷砖(我正在努力)上应用筛选器。

如果有经验的人可以帮助我确定出错的地方,并告诉我如何解决问题,我将不胜感激。如果我错误地问了这个问题,我很感激你的时间和歉意。

main.cu:

#include <stdio.h> 
#include "support.h" 
#include "kernel.cu" 
#include <time.h> 

int main(int argc, char* argv[]){ 
Timer timer; 
time_t t; 


// Initialize host variables ---------------------------------------------- 

printf("\nSetting up the problem..."); fflush(stdout); 
startTime(&timer); 

Matrix M_h, N_h, P_h; // M: filter, N: input image, P: output image 
Matrix N_d, P_d; 
unsigned imageHeight, imageWidth; 
cudaError_t cuda_ret; 
dim3 dim_grid, dim_block; 

/* Read image dimensions */ 
if (argc == 1) { 
    imageHeight = 600; 
    imageWidth = 1000; 
} else if (argc == 2) { 
    imageHeight = atoi(argv[1]); 
    imageWidth = atoi(argv[1]); 
} else if (argc == 3) { 
    imageHeight = atoi(argv[1]); 
    imageWidth = atoi(argv[2]); 
} else { 
    printf("\n Invalid input parameters!" 
     "\n Usage: ./convolution   # Image is 600 x 1000" 
     "\n Usage: ./convolution <m>  # Image is m x m" 
     "\n Usage: ./convolution <m> <n> # Image is m x n" 
     "\n"); 
    exit(0); 
} 

/* Allocate host memory */ 
M_h = allocateMatrix(FILTER_SIZE, FILTER_SIZE); 
N_h = allocateMatrix(imageHeight, imageWidth); 
P_h = allocateMatrix(imageHeight, imageWidth); 

/* Initialize filter and images */ 
initMatrix(M_h); 
initMatrix(N_h); 

stopTime(&timer); printf("%f s\n", elapsedTime(timer)); 
printf(" Image: %u x %u\n", imageHeight, imageWidth); 
printf(" Mask: %u x %u\n", FILTER_SIZE, FILTER_SIZE); 

// Allocate device variables ---------------------------------------------- 

printf("Allocating device variables..."); fflush(stdout); 
startTime(&timer); 

N_d = allocateDeviceMatrix(imageHeight, imageWidth); 
P_d = allocateDeviceMatrix(imageHeight, imageWidth); 

cudaDeviceSynchronize(); 
stopTime(&timer); printf("%f s\n", elapsedTime(timer)); 

// Copy host variables to device ------------------------------------------ 

printf("Copying data from host to device..."); fflush(stdout); 
startTime(&timer); 

/* Copy image to device global memory */ 
copyToDeviceMatrix(N_d, N_h); 
cudaMemcpyToSymbol(M_h, M_c,FILTER_SIZE*sizeof(float)); 

dim_grid = dim3(((N_h.width/BLOCK_SIZE) + 1), ((N_h.height/BLOCK_SIZE) + 1)); 
dim_block = dim3(BLOCK_SIZE, BLOCK_SIZE); 


cudaDeviceSynchronize(); 
stopTime(&timer); printf("%f s\n", elapsedTime(timer)); 

// Launch kernel ---------------------------------------------------------- 
printf("Launching kernel..."); fflush(stdout); 
startTime(&timer); 


convolution<<<dim_grid, dim_block>>>(N_d, P_d); 

cuda_ret = cudaDeviceSynchronize(); 
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel"); 

cudaDeviceSynchronize(); 
stopTime(&timer); printf("%f s\n", elapsedTime(timer)); 

// Copy device variables from host ---------------------------------------- 

printf("Copying data from device to host..."); fflush(stdout); 
startTime(&timer); 

copyFromDeviceMatrix(P_h, P_d); 

cudaDeviceSynchronize(); 
stopTime(&timer); printf("%f s\n", elapsedTime(timer)); 

// Verify correctness ----------------------------------------------------- 

printf("Verifying results..."); fflush(stdout); 

verify(M_h, N_h, P_h); 

// Free memory ------------------------------------------------------------ 

freeMatrix(M_h); 
freeMatrix(N_h); 
freeMatrix(P_h); 
freeDeviceMatrix(N_d); 
freeDeviceMatrix(P_d); 

return 0; 
} 

kernel.cu:

__constant__ float M_c[FILTER_SIZE][FILTER_SIZE]; 
__global__ void convolution(Matrix N, Matrix P){ 

__shared__ float Ns[TILE_SIZE + 5 - 1][TILE_SIZE + 5 -1]; 
int i, j; 
float output = 0.0f; 
int tx = threadIdx.x; 
int ty = threadIdx.y; 
int row_o = blockIdx.y * TILE_SIZE + ty; 
int col_o = blockIdx.x * TILE_SIZE + tx; 
int row_i = row_o - 2; 
int col_i = col_o - 2;     
if((row_i >= 0) && (row_i < N.height) && (col_i >= 0) && (col_i < N.width)){ 
     Ns[ty][tx] = N.elements[row_i * N.width + col_i];   
} 
else{ 
     Ns[ty][tx] = 0.0f; 
} 
__syncthreads(); 
if(ty < TILE_SIZE && tx < TILE_SIZE){ 
     for(i = 0; i < 5; i++){ 
       for(j = 0; j < 5; j++){ 
       output += M_c[i][j] * Ns[i + ty][j + tx]; 
       } 
     } 
} 
if(row_o < P.height && col_o < P.width){ 
     P.elements[row_o * P.width + col_o] = output; 
} 
} 

support.h:

#ifndef __FILEH__ 
#define __FILEH__ 

#include <sys/time.h> 

typedef struct { 
    struct timeval startTime; 
    struct timeval endTime; 
} Timer; 

// Matrix Structure declaration 
typedef struct { 
    unsigned int width; 
    unsigned int height; 
    unsigned int pitch; 
    float* elements; 
} Matrix; 

#define FILTER_SIZE 5 
#define TILE_SIZE 12 
#define BLOCK_SIZE (TILE_SIZE + FILTER_SIZE - 1) 

Matrix allocateMatrix(unsigned height, unsigned width); 
void initMatrix(Matrix mat); 
Matrix allocateDeviceMatrix(unsigned height, unsigned width); 
void copyToDeviceMatrix(Matrix dst, Matrix src); 
void copyFromDeviceMatrix(Matrix dst, Matrix src); 
void verify(Matrix M, Matrix N, Matrix P); 
void freeMatrix(Matrix mat); 
void freeDeviceMatrix(Matrix mat); 
void startTime(Timer* timer); 
void stopTime(Timer* timer); 
float elapsedTime(Timer timer); 

#define FATAL(msg, ...) \ 
do {\ 
    fprintf(stderr, "[%s:%d] "msg"\n", __FILE__, __LINE__, ##__VA_ARGS__);\ 
    exit(-1);\ 
} while(0) 

#if __BYTE_ORDER != __LITTLE_ENDIAN 
# error "File I/O is not implemented for this system: wrong endianness." 
#endif 
#endif 

support.cu:

#include <stdlib.h> 
#include <stdio.h> 

#include "support.h" 

Matrix allocateMatrix(unsigned height, unsigned width) 
{ 
    Matrix mat; 
    mat.height = height; 
    mat.width = mat.pitch = width; 
    mat.elements = (float*)malloc(height*width*sizeof(float)); 
    if(mat.elements == NULL) FATAL("Unable to allocate host"); 

    return mat; 
} 

void initMatrix(Matrix mat) 
{ 
    for (unsigned int i=0; i < mat.height*mat.width; i++) { 
     mat.elements[i] = (rand()%100)/100.00; 
    } 
} 

Matrix allocateDeviceMatrix(unsigned height, unsigned width) 
{ 
    Matrix mat; 
    cudaError_t cuda_ret; 

    mat.height = height; 
    mat.width = mat.pitch = width; 
    cuda_ret = cudaMalloc((void**)&(mat.elements), height*width*sizeof(float)); 
    if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); 

    return mat; 
} 

void copyToDeviceMatrix(Matrix dst, Matrix src) 
{ 
    cudaError_t cuda_ret; 
    cuda_ret = cudaMemcpy(dst.elements, src.elements, src.height*src.width*sizeof(float), cudaMemcpyHostToDevice); 
    if(cuda_ret != cudaSuccess) FATAL("Unable to copy to device"); 
} 

void copyFromDeviceMatrix(Matrix dst, Matrix src) 
{ 
    cudaError_t cuda_ret; 
    cuda_ret = cudaMemcpy(dst.elements, src.elements, src.height*src.width*sizeof(float), cudaMemcpyDeviceToHost); 
    if(cuda_ret != cudaSuccess) FATAL("Unable to copy from device"); 
} 

void verify(Matrix M, Matrix N, Matrix P) { 

    const float relativeTolerance = 1e-6; 

    for(int row = 0; row < N.height; ++row) { 
    for(int col = 0; col < N.width; ++col) { 
     float sum = 0.0f; 
     for(int i = 0; i < M.height; ++i) { 
     for(int j = 0; j < M.width; ++j) { 
      int iN = row - M.height/2 + i; 
      int jN = col - M.width/2 + j; 
      if(iN >= 0 && iN < N.height && jN >= 0 && jN < N.width) { 
       sum += M.elements[i*M.width + j]*N.elements[iN*N.width + jN]; 
      } 
     } 
     } 
     float relativeError = (sum - P.elements[row*P.width + col])/sum; 
     if (relativeError > relativeTolerance 
     || relativeError < -relativeTolerance) { 
     printf("TEST FAILED\n\n"); 
     exit(0); 
     } 
    } 
    } 
    printf("TEST PASSED\n\n"); 

} 

void freeMatrix(Matrix mat) 
{ 
    free(mat.elements); 
    mat.elements = NULL; 
} 

void freeDeviceMatrix(Matrix mat) 
{ 
    cudaFree(mat.elements); 
    mat.elements = NULL; 
} 

void startTime(Timer* timer) { 
    gettimeofday(&(timer->startTime), NULL); 
} 

void stopTime(Timer* timer) { 
    gettimeofday(&(timer->endTime), NULL); 
} 

float elapsedTime(Timer timer) { 
    return ((float) ((timer.endTime.tv_sec - timer.startTime.tv_sec) \ 
       + (timer.endTime.tv_usec - timer.startTime.tv_usec)/1.0e6)); 
} 
+0

如果你的代码没有产生正确的结果,那么发布不完整的,不可编译的代码对任何人都没有帮助。错误出现在您选择不发布的代码中。您的代码不包含API错误检查。你确定没有生成运行时错误吗?如果您使用cuda-memcheck运行程序,会发生什么 – talonmies

+0

我编辑原始帖子以包含该信息。 – GiH

回答

3

一组的问题是在这里:

cudaMemcpyToSymbol(M_h, M_c,FILTER_SIZE*sizeof(float)); 

如果用cuda-memcheck运行你的代码会点你就在这条线是一个问题。

  1. 第一个参数应该是目的地符号,即M_c,并且所述第二参数应该是主机源指针,即M_h

  2. 此外,不应该是FILTER_SIZE*FILTER_SIZE?您要传输的数据大小是不是等于维度平方?

  3. 最后,M_h不是有效的源指针。你应该使用M_h.elements

因此,像这样:

cudaMemcpyToSymbol(M_c, M_h.elements,FILTER_SIZE*FILTER_SIZE*sizeof(float)); 

我不相信这能解决您的代码中的所有问题。要继续调试,我会在GPU结果中打印出与您的verify例程不匹配的一个元素,然后处理该元素的算术运算。如果有帮助,请在设备代码中使用printf

未来,请在此处寻求帮助之前,使用cuda-memcheck运行您的代码。即使你不理解输出结果,对于那些试图帮助你的人也会很有帮助。

+0

我很欣赏你接受帮助的时间。这是我第一次听说过memcheck,因此我将在未来进行一些研究和使用。谢谢。 – GiH