我试图在我的一个项目中介绍一些CUDA优化。但我认为我在这里做错了什么。我想实现一个简单的矩阵向量乘法(result
= matrix
* vector
)。但是当我想将结果复制回主机时,会发生错误(cudaErrorLaunchFailure
)。我的内核中是否有错误(matrixVectorMultiplicationKernel
),或者我错误地调用cudaMemcpy
?我发现这种错误状态没有有用的文档。我认为这完全破坏了GPU的状态,因为我不能在第一次出现之后再次出现此错误的情况下调用任何CUDA内核。将阵列从RAM复制到GPU并从GPU复制到RAM
编辑#1:更新的代码,遵循leftaroundabout的建议。
// code
...
Eigen::MatrixXf matrix(M, N); // matrix.data() usually should return a float array
Eigen::VectorXf vector(N); // same here for vector.data()
Eigen::VectorXf result(M);
... // fill matrix and vector
float* matrixOnDevice = copyMatrixToDevice(matrix.data(), matrix.rows(), matrix.cols());
matrixVectorMultiplication(matrixOnDevice, vector.data(), result.data(), matrix.rows(), cm.cols());
... // clean up
// helper functions
float* copyMatrixToDevice(const float* matrix, int mRows, int mCols)
{
float* matrixOnDevice;
const int length = mRows*mCols;
const int size = length * sizeof(float);
handleCUDAError(cudaMalloc((void**)&matrixOnDevice, size));
handleCUDAError(cudaMemcpy(matrixOnDevice, matrix, size, cudaMemcpyHostToDevice));
return matrixOnDevice;
}
void matrixVectorMultiplication(const float* matrixOnDevice, const float* vector, float* result, int mRows, int mCols)
{
const int vectorSize = mCols*sizeof(float);
const int resultSize = mRows*sizeof(float);
const int matrixLength = mRows*mCols;
float* deviceVector;
float* deviceResult;
handleCUDAError(cudaMalloc((void**)&deviceVector, vectorSize));
handleCUDAError(cudaMalloc((void**)&deviceResult, resultSize));
handleCUDAError(cudaMemset(deviceResult, 0, resultSize));
handleCUDAError(cudaMemcpy(deviceVector, vector, vectorSize, cudaMemcpyHostToDevice));
int threadsPerBlock = 256;
int blocksPerGrid = (mRows + threadsPerBlock - 1)/threadsPerBlock;
matrixVectorMultiplicationKernel<<<blocksPerGrid, threadsPerBlock>>>(matrixOnDevice, vector, result, mRows, mCols, matrixLength);
// --- no errors yet ---
handleCUDAError(cudaMemcpy(result, deviceResult, resultSize, cudaMemcpyDeviceToHost)); // cudaErrorLaunchFailure
handleCUDAError(cudaFree(deviceVector)); // cudaErrorLaunchFailure
handleCUDAError(cudaFree(deviceResult)); // cudaErrorLaunchFailure
}
__global__ void matrixVectorMultiplicationKernel(const float* matrix, const float* vector, float* result, int mRows, int mCols, int length)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < mRows)
{
for(int col = 0, mIdx = row*mCols; col < mCols; col++, mIdx++)
result[row] += matrix[mIdx] * vector[col];
}
}
使用CUBLAS而不是自己写这样的内核是合理的。 – leftaroundabout 2012-04-16 16:40:15
我想我会很快做到这一点。但cublas似乎很复杂,我想从简单的事情开始。 – alfa 2012-04-16 16:55:03
在我看来,CUBLAS更简单(但也更具限制性)。 – 2012-04-18 08:16:08