2012-03-12 96 views
1

我试图将matlab中的旧练习转换为OpenCV。代码如下。我还没有找到任何我想要的OpenCv功能,可能是因为其他名称,然后我的期望。将Matlab高斯导数转换为Opencv

以下是在每个位置将最大响应作为标签时的输出。显然有些事情。

my opencv output Matlab output

这里是MATLAB代码:

function responses = getBifResponsesEx(im, myEps, sigma, kernelSize) 

    if (nargin == 3) 
     if (sigma >= 1) 
      kernelSize = 6*sigma + 1; 
     else 
      kernelSize = 7; 
     end   
    end 

    responses = zeros(size(im,1), size(im,2), 7); 

    % 
    % Gaussian derivatives  
    % 
    kernVal = ceil(kernelSize/2) - 1; 
    x = (-kernVal:kernVal); 
    g = 1/(2*pi*sigma^2)*exp(-(x.^2./(2*(sigma^2)))); 
    g = g/sum(g); 
    dg = -2*x/(2*sigma^2).*g*sigma; 
    ddg = ((2*x/(2*sigma^2)).^2 - 1/(sigma^2)).*g*sigma; 

    % 
    % Gaussian convolution of the image 
    % 
    s00 = filter2(g, im); 
    s00 = filter2(g', s00); 

    s10 = filter2(g', im); 
    s10 = filter2(dg, s10); 

    s01 = filter2(g, im); 
    s01 = filter2(dg', s01); 

    s11 = filter2(dg, im); 
    s11 = filter2(dg', s11); 

    s20 = filter2(g', im); 
    s20 = filter2(g', s20); 
    s20 = filter2(ddg, s20); 

    s02 = filter2(g, im); 
    s02 = filter2(g, s02); 
    s02 = filter2(ddg', s02);  

    % 
    % Symmetry types - MISSING CODE!!!! 
    % 
    lam = sigma^2*(s20+s02); 
    gam = sigma^2*(sqrt((s20-s02).^2+4*s11.^2)); 

    responses(:,:,1) = myEps*s00; 
    responses(:,:,2) = 2*sigma*sqrt(s10.^2+s01.^2); 
    responses(:,:,3) = +lam; 
    responses(:,:,4) = -lam; 
    responses(:,:,5) = 2^-.5*(gam+lam); 
    responses(:,:,6) = 2^-.5*(gam-lam); 
    responses(:,:,7) = gam; 


end 

这里是我的转换页。从我所能看到的情况来看,它与s20,s02的回应相差无几。任何人都可以告诉我该怎么办?

void extract_bif_features(const cv::Mat & src, 
     std::vector<cv::Mat> & dst, BIFParams params) 
    { 

     float sigma = params.sigma; 
     float n=0; 

     int kernelSize; 
     if(sigma>=1) 
      kernelSize = 6*sigma + 1; 
     else 
      kernelSize = 7; 



     cv::Mat gray,p00,p10,p01,p11,p20,p02; 
     cv::cvtColor(src,gray,CV_BGR2GRAY); 


     auto kernVal = (int)ceil(kernelSize/2.0) - 1; 
     cv::Mat_<float> g(1,kernelSize);float*gp = g.ptr<float>(); 
     cv::Mat_<float> dg(1,kernelSize);float*dgp = dg.ptr<float>(); 
     cv::Mat_<float> ddg(1,kernelSize); float*ddgp = ddg.ptr<float>(); 
     cv::Mat_<float> X(1,kernelSize);float*xp = X.ptr<float>(); 
     auto gsum=0.0f; 
     for(int x = -kernVal;x<=kernVal;++x) 
     { 
      xp[x+kernVal] = x; 
      gp[x+kernVal] = 1/(2*CV_PI*sigma*sigma)*exp(-(x*x/(2*(sigma*sigma)))); 
      gsum += gp[x+kernVal]; 
     } 

     g = g/gsum; 


     cv::multiply((-2*X/(2*sigma*sigma)),g*sigma,dg); 

     cv::pow((2*X/(2*sigma*sigma)),2,ddg); 
     ddg -=1/(sigma*sigma); 
     cv::multiply(ddg,g*sigma,ddg); 
     std::cout << ddg<< std::endl; 
     std::cout << dg<< std::endl; 





     cv::sepFilter2D(gray,p00,CV_32FC1,g,g,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE);  cv::sepFilter2D(gray,p01,CV_32FC1,dg,g,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE);  cv::sepFilter2D(gray,p10,CV_32FC1,g,dg,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE);  cv::sepFilter2D(gray,p11,CV_32FC1,dg,dg,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE);  

//NOT SURE HERE 
cv::sepFilter2D(gray,p20,CV_32FC1,g,ddg,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE); 
//cv::sepFilter2D(p20,p20,CV_32FC1,1,g,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE);  cv::sepFilter2D(gray,p02,CV_32FC1,g,ddg,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE);  //cv::sepFilter2D(p02,p02,CV_32FC1,g,1,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE);  
cv::filter2D(gray,p20,CV_32FC1,g,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE); 
cv::filter2D(p20,p20,CV_32FC1,g,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE); 
cv::filter2D(p20,p20,CV_32FC1,ddg,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE); 
cv::filter2D(gray,p02,CV_32FC1,g,cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE); 
cv::filter2D(p02,p02,CV_32FC1,g.t(),cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE); 
cv::filter2D(p02,p02,CV_32FC1,ddg.t(),cv::Point(-1,-1),0.0,cv::BORDER_REPLICATE); 




     dst.resize(6); 
     auto sigma_square = sigma*sigma; 

     cv::Mat Lam = sigma_square * (p20+p02); 
     cv::Mat Gam ; 
     cv::sqrt((((p20-p02)*(p20-p02))+4*p11*p11),Gam); 
     Gam *= sigma_square ; 

     cv::Mat test = p10*p10; 
     //slop 
     cv::sqrt(p10*p10 + p01*p01,dst[0]); 
     dst[0] = dst[0]*2*sigma;//slop 
     //blob 
     dst[1] = Lam; 
     dst[2] = -1*Lam; 
     //line 
     dst[3] = sqrt(2.0f)*(Gam+Lam); 
     dst[4] = sqrt(2.0f)*(Gam-Lam); 
     //saddle 
     dst[5] = Gam; 


    } 

回答

1

答案是从我得到的答案。

cv :: multiply((p20-p02),(p20-p02),Gam);与Gam =(p20-p02)*(p20-p02)不同;

完整代码:根据最高响应进行分类,Griffin(2008)。

RIDAR_API void extract_bif_features(const cv::Mat & src, 
    std::vector<cv::Mat> & dst, BIFParams params) 
{ 

    float sigma = params.sigma; 
    float eta = params.eta; 

    int kernelSize; 
    if(sigma>=1) 
     kernelSize = 4*sigma + 1; 
    else 
     kernelSize = 5; 

    auto kernVal = (int)ceil(kernelSize/2.0) - 1; 
    cv::Mat_<float> g(1,kernelSize);float*gp = g.ptr<float>(); 
    cv::Mat_<float> X(1,kernelSize);float*xp = X.ptr<float>(); 

    auto gsum=0.0f; 
    for(int x = -kernVal;x<=kernVal;++x) 
    { 
     xp[x+kernVal] = x; 
     gp[x+kernVal] = 1/(2*CV_PI*sigma*sigma)*exp(-(x*x/(2*(sigma*sigma)))); 
     gsum += gp[x+kernVal]; 
    } g = g/gsum; 

    cv::Mat dg = -2*X.mul(g*sigma)/(2*sigma*sigma); 
    cv::Mat ddg = ((2*X/(2*sigma*sigma)).mul((2*X/(2*sigma*sigma))) - 1/(sigma*sigma)).mul(g*sigma); 



    cv::Mat gray,p00,p10,p01,p11,p20,p02; 
    cv::cvtColor(src,gray,CV_BGR2GRAY); 

    cv::filter2D(gray,p00,CV_32FC1,g); 
    cv::filter2D(p00,p00,CV_32FC1,g.t()); 

    cv::filter2D(gray,p10,CV_32FC1,g.t()); 
    cv::filter2D(p10,p10,CV_32FC1,dg); 

    cv::filter2D(gray,p01,CV_32FC1,g); 
    cv::filter2D(p01,p01,CV_32FC1,dg.t()); 

    cv::filter2D(gray,p11,CV_32FC1,dg); 
    cv::filter2D(p11,p11,CV_32FC1,dg.t()); 

    cv::filter2D(gray,p20,CV_32FC1,g.t()); 
    cv::filter2D(p20,p20,CV_32FC1,g.t()); 
    cv::filter2D(p20,p20,CV_32FC1,ddg); 

    cv::filter2D(gray,p02,CV_32FC1,g); 
    cv::filter2D(p02,p02,CV_32FC1,g); 
    cv::filter2D(p02,p02,CV_32FC1,ddg.t()); 


#ifdef DISPLAY_WHILE_RUNNING 
    double max,min; 
    cv::imshow("p00",p00/255); 
    // 
    cv::minMaxIdx(p01,&min,&max); 
    cv::imshow("p01",(p01-min)/(max-min)); 
    // 
    cv::minMaxIdx(p10,&min,&max); 
    cv::imshow("p10",(p10-min)/(max-min)); 

    cv::minMaxIdx(p11,&min,&max); 
    cv::imshow("p11",(p11-min)/(max-min)); 

    cv::minMaxIdx(p02,&min,&max); 
    cv::imshow("p02",(p02-min)/(max-min)); 

    cv::minMaxIdx(p20,&min,&max); 
    cv::imshow("p20",(p20-min)/(max-min)); 
    cv::waitKey(); 
#endif 


    dst.resize(7); 
    auto sigma_square = sigma*sigma; 
    auto p2d = p20-p02; 

    //LAM 
    dst[2] = sigma_square * (p20+p02); 
    //GAM 
    cv::sqrt((p2d).mul(p2d) + (4.0f * p11.mul(p11)) ,dst[6]); 
    dst[6] = dst[6] * sigma_square; 

    //FLAT 
    dst[0] = eta*p00; 

    //slop 
    cv::sqrt(p10.mul(p10)+p01.mul(p01),dst[1]); 
    dst[1] *= 2.0f*sigma; 

    //blob dst[2] 
    dst[3] = -dst[2]; 

    //line 
    dst[4] = pow(2.0,-0.5)*(dst[6]+dst[2]); 
    dst[5] = pow(2.0,-0.5)*(dst[6]-dst[2]); 

    //saddle dst[6] 

#ifdef DISPLAY_WHILE_RUNNING   
    double max,min; 
    cv::minMaxIdx(dst[0],&min,&max);// 
    cv::imshow("FLAT",(dst[0]-min)/(max-min)); 

    cv::minMaxIdx(dst[1],&min,&max);// 
    cv::imshow("SLOPE",(dst[1]-min)/(max-min)); 

    cv::minMaxIdx(dst[2],&min,&max); 
    cv::imshow("BLOB+",(dst[2]-min)/(max-min)); 

    cv::minMaxIdx(dst[3],&min,&max);// 
    cv::imshow("BLOB-",(dst[3]-min)/(max-min)); 

    cv::minMaxIdx(dst[4],&min,&max);// 
    cv::imshow("LINE+",(dst[4]-min)/(max-min)); 

    cv::minMaxIdx(dst[5],&min,&max); 
    cv::imshow("LINE-",(dst[5]-min)/(max-min)); 

    cv::minMaxIdx(dst[6],&min,&max); 
    cv::imshow("SADDLE",(dst[6]-min)/(max-min)); 

    cv::waitKey(); 
#endif 
} 
0

为什么不把平均值放在dg和ddg上?

cv::filter2D(gray,p20,CV_32FC1,g.t()); 



cv::filter2D(p20,p20,CV_32FC1,g.t()); 

为什么要在这里过滤两次?

// GAM

CV :: SQRT((P2D).mul(P2D)+(4.0F * p11.mul(P11)),DST [6]); dst [6] = dst [6] * sigma_square; 你在哪里得到这个公式?