This is what I use in my unit tests to compare images. Unlike other methods (e.g., UIImagePNGRepresentation), it works even if the images have a different color space (e.g., RGB and grayscale).

@implementation UIImage (HPIsEqualToImage)

- (BOOL)hp_isEqualToImage:(UIImage*)image
{
    NSData *data = [image hp_normalizedData];
    NSData *originalData = [self hp_normalizedData];
    return [originalData isEqualToData:data];
}

- (NSData*)hp_normalizedData
{
    const CGSize pixelSize = CGSizeMake(self.size.width * self.scale, self.size.height * self.scale);
    UIGraphicsBeginImageContext(pixelSize);
    [self drawInRect:CGRectMake(0, 0, pixelSize.width, pixelSize.height)];
    UIImage *drawnImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();
    return UIImagePNGRepresentation(drawnImage);
}

@end

It's not very efficient, so I would recommend against using it in production code unless performance is not an issue.


If you have two UIImages, you should get their CGImageRef quartz representations from those objects. Then create two new bitmap contexts backed by a memory buffer that you create and pass in, one for each of the images. Then use CGContextDrawImage to draw the images into the bitmap contexts. Now the bytes of the images are in the buffers. You can then loop through manually or memcmp to check for differences.

Apple's own detailed explanation and sample code around creating bitmap contexts and drawing into them is here:

https://developer.apple.com/library/content/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/dq_context/dq_context.html

The difference for you is that you're drawing an existing image into the context. Use CGContextDrawImage for this.


here 's code sample

-(NSMutableArray*)getImageBinary:(UIImage*)ImageToCompare
{
    int i = 0;    
    int step = 4;

    CGContextRef context = NULL;
    CGColorSpaceRef colorSpace;
    //void * bitmapData;
    //int bitmapByteCount;
    int bitmapBytesPerRow;

    // Get image width, height. We'll use the entire image.
    size_t pixelsWide = CGImageGetWidth(ImageToCompare.CGImage);
    size_t pixelsHigh = CGImageGetHeight(ImageToCompare.CGImage);

    // Declare the number of bytes per row. Each pixel in the bitmap in this
    // example is represented by 4 bytes; 8 bits each of red, green, blue, and
    // alpha.
    bitmapBytesPerRow = (pixelsWide * 4);
    NSMutableArray *firstImagearray=[[NSMutableArray alloc]init];

    //bitmapByteCount = (bitmapBytesPerRow * pixelsHigh);

    // Use the generic RGB color space.
    colorSpace = CGColorSpaceCreateDeviceRGB();

    if (colorSpace == NULL)
    {
        fprintf(stderr, "Error allocating color space\n");
        return nil;
    }

    // Allocate memory for image data. This is the destination in memory
    // where any drawing to the bitmap context will be rendered.
    //bitmapData = malloc( bitmapByteCount );
    //  if (bitmapData == NULL)
    //  {
    //      fprintf (stderr, "Memory not allocated!");
    //      CGColorSpaceRelease( colorSpace );
    //      return NULL;
    //  }

    // Create the bitmap context. We want pre-multiplied ARGB, 8-bits
    // per component. Regardless of what the source image format is
    // (CMYK, Grayscale, and so on) it will be converted over to the format
    // specified here by CGBitmapContextCreate.
    context = CGBitmapContextCreate (NULL,
                                     pixelsWide,
                                     pixelsHigh,
                                     8,      // bits        per component
                                     bitmapBytesPerRow,
                                     colorSpace,
                                     kCGImageAlphaPremultipliedFirst);

    if (context == NULL)
    {
        //free (bitmapData);
        fprintf (stderr, "Context not created!");
    }

    CGRect rect = {{0,0},{pixelsWide, pixelsHigh}};
    //
    // Draw the image to the bitmap context. Once we draw, the memory
    // allocated for the context for rendering will then contain the
    // raw image data in the specified color space.
    CGContextDrawImage(context, rect, ImageToCompare.CGImage);

    // Make sure and release colorspace before returning
    CGColorSpaceRelease( colorSpace );
    /////**********
    size_t _width = CGImageGetWidth(ImageToCompare.CGImage);
    size_t _height = CGImageGetHeight(ImageToCompare.CGImage);

    unsigned char* data = CGBitmapContextGetData (context);

    if (data != NULL) 
    {
        int max = _width * _height * 4;

        for (i = 0; i < max; i+=step)
        {
            [firstImagearray addObject:[NSNumber numberWithInt:data[i + 0]]];
            [firstImagearray addObject:[NSNumber numberWithInt:data[i + 1]]];
            [firstImagearray addObject:[NSNumber numberWithInt:data[i + 2]]];
            [firstImagearray addObject:[NSNumber numberWithInt:data[i + 3]]];     }
    }

    if (context == NULL)
        // error creating context
    return nil;


    //if (data) { free(data); }
    if (context) {
        CGContextRelease(context);
    }
    return firstImagearray;
}

-(BOOL)Compare:(UIImage*)ImageToCompare secondImage:(UIImage*)secondImage
{
    ImageToCompare=[ImageToCompare  scaleToSize:CGSizeMake(self.appdelegate.ScreenWidth,self.appdelegate.ScreenHeigth)];
    secondImage=[secondImage scaleToSize:CGSizeMake(self.appdelegate.ScreenWidth, self.appdelegate.ScreenHeigth)];

    NSArray *first=[[NSArray alloc] initWithArray:(NSArray *)[self    getImageBinary:ImageToCompare]];
    NSArray *second=[[NSArray alloc] initWithArray:(NSArray *)[self getImageBinary:secondImage]];

    for (int x=0; x<first.count; x++)
    {
        if ([((NSNumber*)[first objectAtIndex:x]) intValue] ==[((NSNumber*)[second objectAtIndex:x]) intValue])
        {

        }
        else
        {
             return NO;
        }
    }
    return YES;
}

Update

Based on Skycamelfalling's comment, I verified that my unit tests still pass when using UIImage's pngData() method, instead of drawing with an image context. Much simpler!

For historic interest: Here is a Swift 4 variant of hpique's answer. It's working for me in my unit tests, when I need to test two UIImages for "sameness".

fileprivate extension UIImage {
    func makeNormalizedData() -> Data? {
        defer { UIGraphicsEndImageContext() }
        let pixelSize = CGSize(width: size.width * scale, height: size.height * scale)
        UIGraphicsBeginImageContext(pixelSize)
        draw(in: CGRect(origin: CGPoint.zero, size: pixelSize))
        guard let drawnImage = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
        return UIImagePNGRepresentation(drawnImage)
    }
}