2012-07-20 83 views
1

在我的程序中,我对每个在内核lauch中发布的块都有一个工作变量。每个块停留在一个循环中,在其deque中进行处理,处理它并将动态生成的工作推回。一组deque标志被维护,指示哪些deques是活动的,即有工作。如果一个deque是空的,内核会进入另一个循环,试图从另一个块的双端队列中窃取工作。当没有更多的deques被激活时,达到停止条件。CUDA调度问题或内核启动中的错误?

在测试中,我设置了所有以1个工作项开始的deques。我的问题是,一些块似乎根本没有运行。由于其中一些未运行,它们保持活动状态,我的程序进入无限循环。

现在的代码。核心和辅助pop和push功能:

bool __inline__ __device__ pushBottom(int *aiDequeBottoms , const int &iBid , const unsigned int &uiSize , 
unsigned int &uiPushStartIdx) 
{ 
int iOldBot = aiDequeBottoms[iBid]; 
uiPushStartIdx = iOldBot; 
iOldBot += uiSize; 
if(iOldBot < DEQUE_SIZE) 
{ 
    aiDequeBottoms[iBid] = iOldBot; 
    return true; 
} 
else 
{ 
    return false; 
} 
} 

bool __inline__ __device__ popTop(int *aiDequesBottoms , unsigned int *auiDequesAges , const int &iBid , 
int2 &popStartIdxAndSize) 
{ 
int index; 
unsigned int oldAge = auiDequesAges[iBid]; 
int localBot = aiDequesBottoms[iBid]; 
index = oldAge >> WORK_STEALING_TAG_NBITS; 
if(localBot < index + 2*WORK_STEALING_BATCH_SIZE) 
{ 
    return false; 
} 

int localTag = oldAge & WORK_STEALING_TAG_MASK; 
int size = min(WORK_STEALING_BATCH_SIZE , localBot - index); 
unsigned int newAge = (index+size << WORK_STEALING_TAG_NBITS)| localTag; 

if(oldAge == atomicCAS(&auiDequesAges[iBid] , oldAge , newAge)) 
{ 
    popStartIdxAndSize.x = index; 
    popStartIdxAndSize.y = size; 
    return true; 
} 
else 
{ 
    return false; 
} 
} 

bool __inline__ __device__ popBottom(int *aiDequesBottoms , unsigned int *auiDequesAges , const int &iBid , 
int2 &popStartIdxAndSize) 
{ 
int localBot = aiDequesBottoms[iBid]; 
if(localBot == 0) 
{ 
    return false; 
} 

int index = localBot; 
localBot = localBot - WORK_STEALING_BATCH_SIZE; 
aiDequesBottoms[iBid] = localBot; 
unsigned int oldAge = auiDequesAges[iBid]; 
int oldAgeTop = int(oldAge >> WORK_STEALING_TAG_NBITS); 
if(localBot > oldAgeTop) 
{ 
    popStartIdxAndSize.y = WORK_STEALING_BATCH_SIZE; 
    popStartIdxAndSize.x = index - WORK_STEALING_BATCH_SIZE; 
    return true; 
} 

aiDequesBottoms[iBid] = 0; 
unsigned int newAge = ((oldAge & WORK_STEALING_TAG_MASK) + 1) % (WORK_STEALING_TAG_MASK + 1); 
if(index > oldAgeTop) 
{ 
    if(oldAge == atomicCAS(&auiDequesAges[iBid] , oldAge , newAge)) 
    { 
     popStartIdxAndSize.y = index - oldAgeTop; 
     popStartIdxAndSize.x = index - popStartIdxAndSize.y; 
     return true; 
    } 
} 

auiDequesAges[iBid] = newAge; 
return false; 
} 

//---------------------------------------------------------------------------------------------------------------------------- 
// Function to pop work from deques. Each block try to pop from its own deque. If work isn't available, it try to steal from 
// other deques. 
//---------------------------------------------------------------------------------------------------------------------------- 
template <typename Work> 
bool __inline__ __device__ popWork(bool *abDequeFlags , int *aiDequesBottoms , unsigned int *auiDequesAges , 
const Work *aDeques , const int &iTid , const int &iBid , unsigned int &uiPopDequeIdx , int2 &popStartIdxAndSize , 
int &iLocalDequeCounter , bool &bPopFlag , unsigned int *uiActiveDeques , unsigned int &uiActiveDequesIdx , Work &work) 
{ 
if(iTid == 0) 
{ //Try to pop from block deque 
    iLocalDequeCounter = 0; 
    bPopFlag = popBottom(aiDequesBottoms , auiDequesAges , iBid , popStartIdxAndSize); 
    if(bPopFlag) 
    { 
     uiPopDequeIdx = iBid; 
    } 
    else 
    { 
     abDequeFlags[iBid] = false; 
    } 
} 
__syncthreads(); 

while(!bPopFlag) 
{ //No more work, try to steal some! 
    if(iTid == 0) 
    { 
     uiActiveDequesIdx = 0; 
    } 
    __syncthreads(); 

    if(iTid < NDEQUES) 
    { 
     if(abDequeFlags[iTid] == true) //assuming iTid >= NDEQUES 
     { //Set this deque for a work stealing atempt. 
      unsigned int uiIdx = atomicAdd(&uiActiveDequesIdx,1); 
      uiActiveDeques[uiIdx] = iTid; 
     } 
    } 
    __syncthreads(); 

    if(iTid == 0) 
    { //Try to steal until succeeds or there are no more deques left to search 
     bPopFlag = false; 
     for(uiPopDequeIdx = 0 ; uiPopDequeIdx < uiActiveDequesIdx && bPopFlag == false ; ++uiPopDequeIdx) 
     { 
      bPopFlag = popTop(aiDequesBottoms , auiDequesAges , uiPopDequeIdx , popStartIdxAndSize); 
     } 
    } 
    __syncthreads(); 

    if(uiActiveDequesIdx == 0) 
    { //No more work to steal. End. 
     return false; 
    } 
} 

//Get poped data 
if(iTid < popStartIdxAndSize.y) //assuming number of threads >= WORK_SIZE 
{ 
    work = aDeques[uiPopDequeIdx*DEQUE_SIZE + popStartIdxAndSize.x + iTid]; 
} 

return true; 
} 

//---------------------------------------------------------------------------------------------------------------------------- 
// Function to push work on deques. To achieve better coalescent global memory accesses the input data is assumed to be tight 
// packed in shared mem. 
//---------------------------------------------------------------------------------------------------------------------------- 
template <typename Work> 
bool __inline__ __device__ pushWork(int *aiDequesBottoms , Work *aDeques , const int &iTid , const int &iBid , 
const unsigned int &uiDequeOutputCounter , Work *aOutputLocalWork) 
{ 
//Transfer to global mem. 
unsigned int uiWorkLeft = uiDequeOutputCounter; 
unsigned int uiThreadOffset = iTid; 

while(uiWorkLeft > 0) 
{ 
    unsigned int uiWorkTransfered = min(WORK_STEALING_BATCH_SIZE , uiWorkLeft); 
    unsigned int uiPushStartIdx; 
    bool bPushFlag; 
    if(iTid == 0) 
    { 
     bPushFlag = pushBottom(aiDequesBottoms , iBid , uiWorkTransfered , uiPushStartIdx); 
    } 
    __syncthreads(); 
    if(!bPushFlag) 
    { 
     return false; 
    } 

    if(iTid < uiWorkTransfered) 
    { 
     aDeques[DEQUE_SIZE*iBid + uiPushStartIdx + uiThreadOffset] = aOutputLocalWork[uiThreadOffset]; 
    } 

    uiThreadOffset += WORK_STEALING_BATCH_SIZE; 
    uiWorkLeft -= uiWorkTransfered; 
} 

return true; 
} 

void __global__ workKernel(bool *abDequeFlags , int *aiDequesBottoms , unsigned int *auiDequesAges , int2 *aDeques , 
int2 *aOutput , unsigned int *puiOutputCounter) 
{ 
int iTid = threadIdx.x; 
int iBid = blockIdx.x; 

__shared__ int2 aOutputLocalWork[DEQUE_SHARED_SIZE]; 
__shared__ unsigned int uiPopDequeIdx; 
__shared__ int2 popStartIdxAndSize; 
__shared__ int iLocalDequeCounter; 
__shared__ bool bPopFlag; 
__shared__ unsigned int uiActiveDeques[NDEQUES]; //Contains indices for deques with useful work that can be stolen 
__shared__ unsigned int uiActiveDequesIdx; 
__shared__ unsigned int uiLastOutputCounter; 

int2 work; 
int iRun = 0; 

while(true) //Work loop will continue until cannot pop from bottom or cannot steal work from other deques 
{ 
    if(!popWork<int2>(abDequeFlags , aiDequesBottoms , auiDequesAges , aDeques , iTid , iBid , uiPopDequeIdx , 
     popStartIdxAndSize , iLocalDequeCounter , bPopFlag , uiActiveDeques , uiActiveDequesIdx , work)) 
    { //No more work 
     return; 
    } 

    //Useful work comes here. For now, just some dummy code for testing. 
    if(iRun < 5) 
    { //Just 5 iterations that generate more work 
     if(iTid < popStartIdxAndSize.y) 
     { 
      unsigned int uiNewWorkCounter = 1; 
      int iDequeOutputCounter = atomicAdd(&iLocalDequeCounter , uiNewWorkCounter); 
      work.x++; work.y++; 
      aOutputLocalWork[iDequeOutputCounter] = work; 
      __syncthreads(); 

      if(iTid == 0) 
      { 
       uiLastOutputCounter = atomicAdd(puiOutputCounter , iLocalDequeCounter); 
      } 
      __syncthreads(); 

      if(iTid < iLocalDequeCounter) //assuming iLocalDequeCounter <= blockDim.x 
      { 
       aOutput[uiLastOutputCounter + iTid] = aOutputLocalWork[iTid]; 
      } 
     } 
    } 

    //Push back to global mem 
    if(!pushWork<int2>(aiDequesBottoms , aDeques , iTid , iBid , iLocalDequeCounter , aOutputLocalWork)) 
    { //overflow 
     return; 
    } 
    ++iRun; 
} 
} 

这是测试:

#define NDEQUES 256 
#define DEQUE_SIZE 20000 

void workStealingWrap(bool *abDequeFlags , int *auiDequesBottoms , unsigned int *auiDequesAges , int2 *aDeques , 
int2 *aOutput , unsigned int *puiOutputCounter) 
{ 
workKernel<<<NDEQUES , WORK_STEALING_THREADS>>>(abDequeFlags , auiDequesBottoms , auiDequesAges , aDeques , aOutput , 
    puiOutputCounter); 
CUT_CHECK_ERROR("workKernel"); 
} 


//---------------------------------------------------------------------------------------------------------- 
// This entry point is for work stealing testing. 
//---------------------------------------------------------------------------------------------------------- 
int main(int argc, char* argv[]) 
{ 
//Test 0: All deques start with 1 work item. 
bool h_abDequeFlags[NDEQUES]; 
int h_aiDequesBottoms[NDEQUES]; 
unsigned int h_auiDequesAges[NDEQUES]; 
int2 *h_aDeques = (int2*) malloc(sizeof(int2)*NDEQUES*DEQUE_SIZE); 
unsigned int h_uiOutputCounter; 
int2 *h_aOutput = (int2*) malloc(sizeof(int2)*NDEQUES*DEQUE_SIZE); 

for(int i = 0 ; i < NDEQUES ; ++i) 
{ 
    h_abDequeFlags[i] = true; 
    h_aiDequesBottoms[i] = 1; 
    h_auiDequesAges[i] = 0; 
    int2 work; work.x = i ; work.y = i; 
    h_aDeques[DEQUE_SIZE*i] = work; 
} 

bool *d_abDequeFlags; 
int *d_auiDequesBottoms; 
unsigned int *d_auiDequesAges; 
int2 *d_aDeques; 
GPUMALLOC((void**)&d_abDequeFlags , sizeof(bool)*NDEQUES); 
GPUMALLOC((void**)&d_auiDequesBottoms , sizeof(int)*NDEQUES); 
GPUMALLOC((void**)&d_auiDequesAges , sizeof(unsigned int)*NDEQUES); 
GPUMALLOC((void**)&d_aDeques , sizeof(int2)*NDEQUES*DEQUE_SIZE); 

TOGPU(d_abDequeFlags , h_abDequeFlags , sizeof(bool)*NDEQUES); 
TOGPU(d_auiDequesBottoms , h_aiDequesBottoms , sizeof(int)*NDEQUES); 
TOGPU(d_auiDequesAges , h_auiDequesAges , sizeof(unsigned int)*NDEQUES); 
TOGPU(d_aDeques , h_aDeques , sizeof(int2)*NDEQUES*DEQUE_SIZE); 

int2 *d_aOutput; 
unsigned int *d_puiOutputCounter; 
GPUMALLOC((void**)&d_aOutput , sizeof(int2)*NDEQUES*DEQUE_SIZE); 
GPUMALLOC((void**)&d_puiOutputCounter , sizeof(unsigned int)); 
GPUMEMSET(d_aOutput , -1 , sizeof(int2)*NDEQUES*DEQUE_SIZE); 
GPUMEMSET(d_puiOutputCounter , 0 , sizeof(unsigned int)); 

workStealingWrap(d_abDequeFlags , d_auiDequesBottoms , d_auiDequesAges , d_aDeques , d_aOutput , d_puiOutputCounter); 

FROMGPU(h_aOutput , d_aOutput , sizeof(int2)*NDEQUES*DEQUE_SIZE); 
FROMGPU(&h_uiOutputCounter , d_puiOutputCounter , sizeof(unsigned int)); 
assert(h_uiOutputCounter == NDEQUES); 
for(int i = 0 ; i < NDEQUES*DEQUE_SIZE ; ++i) 
{ 
    int2 work = h_aOutput[i]; 
    if(i < NDEQUES) 
    { 
     assert(work.x >= 1 && work.x < NDEQUES*5 && work.y >= 1 && work.y < NDEQUES*5); 
    } 
    else 
    { 
     assert(work.x == -1 && work.y == -1); 
    } 
} 

GPUFREE(d_abDequeFlags); 
GPUFREE(d_auiDequesBottoms); 
GPUFREE(d_auiDequesAges); 
GPUFREE(d_aDeques); 
GPUFREE(d_aOutput); 
GPUFREE(d_puiOutputCounter); 

safeFree(h_aDeques); 
safeFree(h_aOutput); 
} 

使用NSight我已经验证,只是第8块正在运行调试此代码。我想知道这是否是一个计划问题,并且popWork轮询正在消耗所有资源,或者它只是我的程序中的一个错误。任何帮助将非常感激。

+1

您的GPU能够一次运行更多的块吗?您可以在任何GPU上安排1000个块,但这并不意味着它们将同时运行。GPU将尽可能多地运行,而后面的块只有在前一个块结束时才会执行。 – CygnusX1 2012-07-20 16:08:33

+0

我知道块不会同时运行。但我希望这个scheduller至少更聪明,不要让内核阻止饥饿。你有没有任何参考资料可以说后面的块只在早期块完成后才运行?如果是这样,我会降低发布的块数。我不记得阅读过如何在CUDA中完成warp调度。 – 2012-07-20 17:03:22

+1

块从未安排过。如果没有足够的资源来启动所有内容,只有旧的块结束时才能运行新的块。我相信编程指南提到了它,但我也做了我自己的测试(尽管在费米GPU之前),它确实证实了这一点(并且也发现了一些其他问题)。 – CygnusX1 2012-07-20 17:13:25

回答

1

我在代码中发现了一些问题。我没有运行它,所以我不知道它与什么相关。

缺少popTop的实施。我认为它可以成功或者可以失败,结果会影响bPopFlag。我假设你可以有非零值uiActiveDequesIdxbPopFlag仍然为零。

  • 在这种情况下,在popWork你有一个while(!bPopFlag)循环。设想两条经线到达循环中最后的__syncthreads()。现在,第一个warp将检查uiActiveDequesIdx(假设为非零),返回到循环的开始,iTid线程将uiActiveDequesIdx设置为0.现在第二个warp从最后的__syncthreads()恢复执行,检查uiActiveDequesIdx(现在是0)并退出循环。从此时起,你的经纱在__syncthreads()上发生分歧,并且会发生不好的事情。

  • pushWork中,bPushFlag是本地注册表。它只被iTid == 0线程改变,但是它被所有线程读取。您可以读取未初始化的值并在更多的__syncthreads()上获得分散的经纱。

可能还有更多的问题我还没有看到。特别是,我担心在全局内存中设置标志时可能跳过了__threadfence()(请查看编程指南中的内容)

另外,请仔细检查其他环路是否有类似于我报告的问题的同步问题以上 - 其中一些经线仍旧在循环的旧迭代中,而其他经线已经在新的迭代中。我认为一个好的通用方法是,如果你有一个非退化循环取决于某些共享值,那么总是在循环结束时输入一个__syncthreads(),并且只有当你对代码进行了一些晚期的细粒度优化时,才能尝试删除它。

+0

感谢您的提示。由于块调度问题,我没有正确测试代码。我会稍后纠正指出的错误以避免问题。由于您在我的问题的评论中指出了主要问题,因此我会将您的帖子标记为答案。 – 2012-07-20 17:50:29

+0

如果你想正确编译它,我使用低级别的推送和弹出功能编辑了这个问题。 – 2012-07-20 17:56:06