2016-06-26 5 views
1

YUV 420画像の小さな領域を抽出したいだけです。つまり、元の画像の矩形部分のみを含むCVImageBufferRefからCVImageBufferRefを作成します。ここでCVImageBufferRefのサブ画像の抽出

は、私がこれまで試したものです:

上記のコードで
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection 
{ 
    // callback from AVCaptureOutput 
    // 
    CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef); 
    if (imageBufferRef) 
    { 
     // Take a subset of buffer to create a smaller image 
     CVPixelBufferLockBaseAddress(imageBufferRef, 0); 
     size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0); 
     size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1); 
     size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0); 
     size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1); 
     size_t cropHeightY = 320; 
     size_t cropWidthY = 320; 
     size_t cropHeightUV = cropHeightY/2; 
     size_t cropWidthUV = cropWidthY; 
     size_t cropY_X0 = widthY/2 - (cropWidthY/2); 
     size_t cropY_Y0 = heightY/2 - (cropHeightY/2); 
     size_t cropUV_X0 = widthUV/2 - (cropWidthUV/2); 
     size_t cropUV_Y0 = heightUV/2 - (cropHeightUV/2); 

     void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0); 
     void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1); 

     size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0); 
     size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1); 

     size_t pixelBytesY = bytesPerRowY/widthY; 
     size_t pixelBytesUV = bytesPerRowUV/widthUV; 

     void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY); 
     void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV); 

     size_t bytesPerRowOut = cropWidthY * pixelBytesY; 

     size_t sizeY = bytesPerRowOut * cropHeightY; 
     size_t sizeUV = bytesPerRowOut * cropHeightUV; 

     unsigned char * pixelY = (unsigned char *)malloc(sizeY); 
     unsigned char * pixelUV = (unsigned char *)malloc(sizeUV); 

     for (int i = 0; i < cropHeightY; ++i) { 
      memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut); 
     } 

     for (int i = 0; i < cropHeightUV; ++i) { 
      memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut); 
     } 

     void *baseAddresses[2] = {pixelY, pixelUV}; 
     size_t planeWidths[2] = {cropWidthY, cropWidthUV}; 
     size_t planeHeights[2] = {cropHeightY, cropHeightUV}; 
     size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut}; 

     // create a new CVImageBufferRef from pixelY and pixelUV 
     CVPixelBufferRef outBuff; 
     CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outBuff); 

     if(logCameraSettings) { 
      NSLog(@"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY); 
      size_t outWidthY = CVPixelBufferGetWidthOfPlane(outBuff, 0); 
      size_t outHeightY = CVPixelBufferGetHeightOfPlane(outBuff, 0); 
      NSLog(@"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY); 
     } 

     // Here would be the place where I actually want to do something with the image 

     // TEST: show image (in debugger in following method) 
     [self convertToUIImage:imageBufferRef]; // --> works 
     [self convertToUIImage:outBuff]; // --> only gray, does not work 

     // Release the allocated memory 
     CVPixelBufferUnlockBaseAddress(imageBufferRef,0); 
     free(pixelY); 
     free(pixelUV); 
    } 
} 

-(void) convertToUIImage:(CVImageBufferRef)imageBuffer 
{ 
    CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer]; 
    CIContext *temporaryContext = [CIContext contextWithOptions:nil]; 
    CGImageRef videoImage = [temporaryContext 
          createCGImage:ciImage 
          fromRect:CGRectMake(0, 0, 
               CVPixelBufferGetWidth(imageBuffer), 
               CVPixelBufferGetHeight(imageBuffer))]; 

    // Inspect the following UIImage in debugger. 
    UIImage *image = [[UIImage alloc] initWithCGImage:videoImage]; 

    CGImageRelease(videoImage); 
} 

私は私は、デバッガでUIImageとして作成CVImageBufferRefを検査できるようにする以外に目的を持っていない小さな関数convertToUIImageを作成しました。

imageBufferRefを調べると、正しいカメラフィードが表示されます。

しかし、outBuffの検査では、そのカメラのフィードの小さな領域は表示されませんが、正しいサイズのすべてのグレーのパッチが表示されます。

だから私の質問は次のとおりです。

  1. 私はここで間違って何をしているのですか?

  2. これは私の目標を達成するための正しい方法ですか?

本当にありがとうございます。前もって感謝します。ここで

答えて

0

は、私はそれを

- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection 
{ 
    // callback from AVCaptureOutput 
    // 
    CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef); 
    if (imageBufferRef) 
    { 
     // Take a subset of buffer to create a smaller image 
     CVPixelBufferLockBaseAddress(imageBufferRef, 0); 
     size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0); 
     size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1); 
     size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0); 
     size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1); 
     size_t cropHeightY = 500; 
     size_t cropWidthY = 500; 
     size_t cropHeightUV = cropHeightY/2; 
     size_t cropWidthUV = cropWidthY; 
     size_t cropY_X0 = widthY/2 - (cropWidthY/2); 
     size_t cropY_Y0 = heightY/2 - (cropHeightY/2); 
     size_t cropUV_X0 = widthUV/2 - (cropWidthUV/2); 
     size_t cropUV_Y0 = heightUV/2 - (cropHeightUV/2); 

     void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0); 
     void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1); 

     size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0); 
     size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1); 

     size_t pixelBytesY = bytesPerRowY/widthY; 
     size_t pixelBytesUV = bytesPerRowUV/widthUV; 

     void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY); 
     void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV); 

     size_t bytesPerRowOut = cropWidthY * pixelBytesY; 

     size_t sizeY = bytesPerRowOut * cropHeightY; 
     size_t sizeUV = bytesPerRowOut * cropHeightUV; 

     unsigned char * pixelY = (unsigned char *)malloc(sizeY); 
     unsigned char * pixelUV = (unsigned char *)malloc(sizeUV); 

     for (int i = 0; i < cropHeightY; ++i) { 
      memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut); 
     } 

     for (int i = 0; i < cropHeightUV; ++i) { 
      memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut); 
     } 

     void *baseAddresses[2] = {pixelY, pixelUV}; 
     size_t planeWidths[2] = {cropWidthY, cropWidthUV}; 
     size_t planeHeights[2] = {cropHeightY, cropHeightUV}; 
     size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut}; 

     // Transform input to UIImage 
     UIImage *inputAsUIImage = [self convertToUIImage:imageBufferRef]; 

     // Extract subimage of UIImage 
     CGRect fromRect = CGRectMake(cropY_X0, cropY_Y0, cropWidthY, cropHeightY); // or whatever rectangle 
     CGImageRef drawImage = CGImageCreateWithImageInRect(inputAsUIImage.CGImage, fromRect); 
     UIImage *newImage = [UIImage imageWithCGImage:drawImage]; 
     CGImageRelease(drawImage); 

     // Convert UIImage back to CVImageBufferRef 
     // 1. Create a CIImage with the underlying CGImage encapsulated by the UIImage (referred to as 'image'): 
     CIImage *inputImage = [CIImage imageWithCGImage:newImage.CGImage]; 
     // 2. Create a CIContext: 
     CIContext *ciContext = [CIContext contextWithCGContext:UIGraphicsGetCurrentContext() options:nil]; 
     // 3. Render the CIImage to a CVPixelBuffer (referred to as 'outputBuffer'): 
     CVPixelBufferRef outputBuffer; 
     CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outputBuffer); 
     [ciContext render:inputImage toCVPixelBuffer:outputBuffer]; 


     if(logCameraSettings) { 
      NSLog(@"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY); 
      size_t outWidthY = CVPixelBufferGetWidthOfPlane(outputBuffer, 0); 
      size_t outHeightY = CVPixelBufferGetHeightOfPlane(outputBuffer, 0); 
      NSLog(@"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY); 
     } 

     // Do something with it here 

     // Release the allocated memory 
     CVPixelBufferUnlockBaseAddress(imageBufferRef,0); 
     free(pixelY); 
     free(pixelUV); 
    } 
} 

-(UIImage*) convertToUIImage:(CVImageBufferRef)imageBuffer 
{ 
    CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer]; 
    CIContext *temporaryContext = [CIContext contextWithOptions:nil]; 
    CGImageRef videoImage = [temporaryContext 
          createCGImage:ciImage 
          fromRect:CGRectMake(0, 0, 
               CVPixelBufferGetWidth(imageBuffer), 
               CVPixelBufferGetHeight(imageBuffer))]; 

    UIImage *image = [[UIImage alloc] initWithCGImage:videoImage]; 

    CGImageRelease(videoImage); 
    return image; 
} 
を解決する方法であります
関連する問題