2009-08-21 107 views
4

我做了一个将图像转换为灰度的类。但它的工作方式太慢了。有没有办法让它工作得更快?转换为灰度 - 太慢

这里是我的类:

@implementation PixelProcessing 

SYNTHESIZE_SINGLETON_FOR_CLASS(PixelProcessing); 

#define bytesPerPixel 4 
#define bitsPerComponent 8 


-(UIImage*)scaleAndRotateImage: (UIImage*)img withMaxResolution: (int)kMaxResolution 
{ 
    CGImageRef imgRef = img.CGImage; 

    CGFloat width = CGImageGetWidth(imgRef); 
    CGFloat height = CGImageGetHeight(imgRef); 


    CGAffineTransform transform = CGAffineTransformIdentity; 
    CGRect bounds = CGRectMake(0, 0, width, height); 

    if ((kMaxResolution != 0) && (width > kMaxResolution || height > kMaxResolution)) { 
     CGFloat ratio = width/height; 
     if (ratio > 1) { 
      bounds.size.width = kMaxResolution; 
      bounds.size.height = bounds.size.width/ratio; 
     } 
     else { 
      bounds.size.height = kMaxResolution; 
      bounds.size.width = bounds.size.height * ratio; 
     } 
    } 

    CGFloat scaleRatio; 
    if (kMaxResolution != 0){ 
     scaleRatio = bounds.size.width/width; 
    } else 
    { 
     scaleRatio = 1.0f; 
    } 

    CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef)); 
    CGFloat boundHeight; 
    UIImageOrientation orient = img.imageOrientation; 
    switch(orient) { 

     case UIImageOrientationUp: //EXIF = 1 
      transform = CGAffineTransformIdentity; 
      break; 

     case UIImageOrientationUpMirrored: //EXIF = 2 
      transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0); 
      transform = CGAffineTransformScale(transform, -1.0, 1.0); 
      break; 

     case UIImageOrientationDown: //EXIF = 3 
      transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height); 
      transform = CGAffineTransformRotate(transform, M_PI); 
      break; 

     case UIImageOrientationDownMirrored: //EXIF = 4 
      transform = CGAffineTransformMakeTranslation(0.0, imageSize.height); 
      transform = CGAffineTransformScale(transform, 1.0, -1.0); 
      break; 

     case UIImageOrientationLeftMirrored: //EXIF = 5 
      boundHeight = bounds.size.height; 
      bounds.size.height = bounds.size.width; 
      bounds.size.width = boundHeight; 
      transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width); 
      transform = CGAffineTransformScale(transform, -1.0, 1.0); 
      transform = CGAffineTransformRotate(transform, 3.0 * M_PI/2.0); 
      break; 

     case UIImageOrientationLeft: //EXIF = 6 
      boundHeight = bounds.size.height; 
      bounds.size.height = bounds.size.width; 
      bounds.size.width = boundHeight; 
      transform = CGAffineTransformMakeTranslation(0.0, imageSize.width); 
      transform = CGAffineTransformRotate(transform, 3.0 * M_PI/2.0); 
      break; 

     case UIImageOrientationRightMirrored: //EXIF = 7 
      boundHeight = bounds.size.height; 
      bounds.size.height = bounds.size.width; 
      bounds.size.width = boundHeight; 
      transform = CGAffineTransformMakeScale(-1.0, 1.0); 
      transform = CGAffineTransformRotate(transform, M_PI/2.0); 
      break; 

     case UIImageOrientationRight: //EXIF = 8 
      boundHeight = bounds.size.height; 
      bounds.size.height = bounds.size.width; 
      bounds.size.width = boundHeight; 
      transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0); 
      transform = CGAffineTransformRotate(transform, M_PI/2.0); 
      break; 

     default: 
      [NSException raise:NSInternalInconsistencyException format: @"Invalid image orientation"]; 

    } 

    UIGraphicsBeginImageContext(bounds.size); 

    CGContextRef context = UIGraphicsGetCurrentContext(); 

    if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) { 
     CGContextScaleCTM(context, -scaleRatio, scaleRatio); 
     CGContextTranslateCTM(context, -height, 0); 
    } 
    else { 
     CGContextScaleCTM(context, scaleRatio, -scaleRatio); 
     CGContextTranslateCTM(context, 0, -height); 
    } 

    CGContextConcatCTM(context, transform); 

    CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef); 
    UIImage *tempImage = UIGraphicsGetImageFromCurrentImageContext(); 
    UIGraphicsEndImageContext(); 

    return tempImage; 
} 


#pragma mark Getting Ans Writing Pixels 
-(float*) getColorForPixel: (NSUInteger)xCoordinate andForY: (NSUInteger)yCoordinate 
{ 
    int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel; 

    float *colorToReturn = malloc(3); 
    colorToReturn[0] = bitmap[byteIndex]/255.f; //Red 
    colorToReturn[1] = bitmap[byteIndex + 1]/255.f; //Green 
    colorToReturn[2] = bitmap[byteIndex + 2]/255.f; //Blue 

    return colorToReturn; 
} 

-(void) writeColor: (float*)colorToWrite forPixelAtX: (NSUInteger)xCoordinate andY: (NSUInteger)yCoordinate 
{ 
    int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel; 

    bitmap[byteIndex] = (unsigned char) (colorToWrite[0] * 255); 
    bitmap[byteIndex + 1] = (unsigned char) (colorToWrite[1] * 255); 
    bitmap[byteIndex + 2] = (unsigned char) (colorToWrite[2] * 255); 
} 

#pragma mark Bitmap 

-(float) getAverageBrightnessForImage: (UIImage*)img 
{ 
    UIImage *tempImage = [self scaleAndRotateImage: img withMaxResolution: 100]; 

    unsigned char *rawData = [self getBytesForImage: tempImage]; 

    double aBrightness = 0; 

    for(int y = 0; y < tempImage.size.height; y++) { 
     for(int x = 0; x < tempImage.size.width; x++) { 
      int byteIndex = ((tempImage.size.width * y) + x) * bytesPerPixel; 

      aBrightness += (rawData[byteIndex] + rawData[byteIndex + 1] + rawData[byteIndex + 2]); 


     } 
    } 

    free(rawData); 

    aBrightness /= 3.0f; 
    aBrightness /= 255.0f; 
    aBrightness /= tempImage.size.width * tempImage.size.height; 

    return aBrightness; 
} 

-(unsigned char*) getBytesForImage: (UIImage*)pImage 
{ 
    CGImageRef image = [pImage CGImage]; 
    NSUInteger width = CGImageGetWidth(image); 
    NSUInteger height = CGImageGetHeight(image); 

    bytesPerRow = bytesPerPixel * width; 

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 
    unsigned char *rawData = malloc(height * width * bytesPerPixel); 
    CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast); 
    CGColorSpaceRelease(colorSpace); 

    CGContextDrawImage(context, CGRectMake(0, 0, width, height), image); 
    CGContextRelease(context); 

    return rawData; 
} 

-(void) loadWithImage: (UIImage*)img 
{ 
    averageBrightness = [self getAverageBrightnessForImage: img]; 
    currentImage = [self scaleAndRotateImage: img withMaxResolution: 0]; 

    imgWidth = currentImage.size.width; 
    imgHeight = currentImage.size.height; 

    bitmap = [self getBytesForImage: currentImage]; 

    bytesPerRow = bytesPerPixel * imgWidth; 
} 

-(void) processImage 
{ 
    // now convert to grayscale 

    for(int y = 0; y < imgHeight; y++) { 
     for(int x = 0; x < imgWidth; x++) { 
      float *currentColor = [self getColorForPixel: x andForY: y]; 

      //Grayscale 
      float averageColor = (currentColor[0] + currentColor[1] + currentColor[2])/3.0f; 

      averageColor += 0.5f - averageBrightness; 

      if (averageColor > 1.0f) averageColor = 1.0f; 

      currentColor[0] = averageColor; 
      currentColor[1] = averageColor; 
      currentColor[2] = averageColor; 

      [self writeColor: currentColor forPixelAtX: x andY: y]; 

      free(currentColor); 
     } 
    } 
} 

-(UIImage*) getProcessedImage 
{ 
    // create a UIImage 
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 
    CGContextRef context = CGBitmapContextCreate(bitmap, imgWidth, imgHeight, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast); 
    CGImageRef image = CGBitmapContextCreateImage(context); 
    CGContextRelease(context); 
    CGColorSpaceRelease(colorSpace); 
    UIImage *resultUIImage = [UIImage imageWithCGImage: image]; 
    CGImageRelease(image); 

    return resultUIImage; 
} 

-(void) releaseCurrentImage 
{ 
    free(bitmap); 
} 


@end 

我将图像转换成灰度以下列方式:

[ [PixelProcessing sharedPixelProcessing] loadWithImage: imageToDisplay.image]; 
    [ [PixelProcessing sharedPixelProcessing] processImage]; 
    imageToDisplay.image = [ [PixelProcessing sharedPixelProcessing] getProcessedImage]; 
    [ [PixelProcessing sharedPixelProcessing] releaseCurrentImage]; 

为什么工作这么慢?有没有办法获得像素的RGB颜色分量的浮点值?我怎样才能优化它?

谢谢。

+2

如果你关心alpha值,那么保存并传递它。也是一个更好的方法来实现灰度是不是做平均,但这: 灰度亮度= 0.3RED + 0.59GREEN + 0.11蓝色 – mahboudz 2009-08-31 19:32:00

回答

3

找出速度问题的方法是使用Shark进行配置文件。 (在Xcode中,运行 - >开始使用性能工具 - >鲨鱼。)但是,在这种情况下,我有理由确信,主要问题是每像素malloc/free,浮点运算和两个方法调用内部处理循环。

要避免的malloc /免费的,你想要做这样的事情,而不是:

- (void) getColorForPixelX:(NSUInteger)x y:(NSUInteger)y pixel:(float[3])pixel 
{ /* Write stuff to pixel[0], pixel[1], pixel[2] */ } 

// To call: 
float pixel[3]; 
for (each pixel) 
{ 
    [self getColorForPixelX:x y:y pixel:pixel]; 
    // Do stuff 
} 

放缓的第二个可能的来源是使用浮点的 - 或者更确切地说,转化为成本,从浮点。对于你正在编写的过滤器,使用整数数学很简单 - 添加整数像素值并除以255 * 3。 (顺便说一句,这是转换为灰度一个非常糟糕的方式。见http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale)。

方法调用的速度快于它们是什么,但相比滤波器的基本算法仍然相当缓慢。 (对于某些数字,请参阅this article。)消除方法调用的简单方法是用内联函数替换它们。

12

您可以让石英做灰度转换为您提供:

CGImageRef grayscaleCGImageFromCGImage(CGImageRef inputImage) { 
    size_t width = CGImageGetWidth(inputImage); 
    size_t height = CGImageGetHeight(inputImage); 

    // Create a gray scale context and render the input image into that 
    CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceGray(); 
    CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8, 
          4*width, colorspace, kCGBitmapByteOrderDefault); 
    CGContextDrawImage(context, CGRectMake(0,0, width,height), inputImage); 

    // Get an image representation of the grayscale context which the input 
    // was rendered into. 
    CGImageRef outputImage = CGBitmapContextCreateImage(context); 

    // Cleanup 
    CGContextRelease(context); 
    CGColorSpaceRelease(colorspace); 
    return (CGImageRef)[(id)outputImage autorelease]; 
} 
7

我最近已经解决了这个同样的问题,用下面的代码上来(它也保留了阿尔法):

@implementation UIImage (grayscale) 

typedef enum { 
    ALPHA = 0, 
    BLUE = 1, 
    GREEN = 2, 
    RED = 3 
} PIXELS; 

- (UIImage *)convertToGrayscale { 
    CGSize size = [self size]; 
    int width = size.width; 
    int height = size.height; 

    // the pixels will be painted to this array 
    uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t)); 

    // clear the pixels so any transparency is preserved 
    memset(pixels, 0, width * height * sizeof(uint32_t)); 

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

    // create a context with RGBA pixels 
    CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, 
               kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast); 

    // paint the bitmap to our context which will fill in the pixels array 
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]); 

    for(int y = 0; y < height; y++) { 
     for(int x = 0; x < width; x++) { 
      uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x]; 

      // convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale 
      uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE]; 

      // set the pixels to gray 
      rgbaPixel[RED] = gray; 
      rgbaPixel[GREEN] = gray; 
      rgbaPixel[BLUE] = gray; 
     } 
    } 

    // create a new CGImageRef from our context with the modified pixels 
    CGImageRef image = CGBitmapContextCreateImage(context); 

    // we're done with the context, color space, and pixels 
    CGContextRelease(context); 
    CGColorSpaceRelease(colorSpace); 
    free(pixels); 

    // make a new UIImage to return 
    UIImage *resultUIImage = [UIImage imageWithCGImage:image]; 

    // we're done with image now too 
    CGImageRelease(image); 

    return resultUIImage; 
} 

@end 
+0

这是什么对我有用。好答案! – Axeva 2010-11-26 15:48:25

+0

为了使这个工作与视网膜图像,还在开始时在宽度和高度添加* self.scale。 'int width = size.width * self.scale; int height = size.height * self.scale;' – vakio 2012-08-23 12:28:10

1

您是否尝试过使用亮度混合模式?用混合模式混合原始图像的白色图像似乎会产生灰度。 左边的右边和背景图像上这两个,前景图像:

alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/both_images.jpg

兑入kCGBlendModeLuminosity结果在此:

alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/luminosity_image.jpg

有关详细信息,请参阅:Drawing with Quartz 2D: Using Blend Modes With Images