2017-03-01 7 views
0

Objective Cでこれを行ういくつかのコードを使用しています。私はそれを迅速に翻訳しており、AVCaptureStillImageOutputからCIImageを作成するのに苦労しています。だから、もしこのコードを見て、私が間違っている場所を教えてくれれば、それはすばらしいだろう。AVCaptureStillImageOutputからCIImageを迅速に作成する方法は?

この目的Cコード

- (void)captureImageWithCompletionHander:(void(^)(NSString *fullPath))completionHandler 
{ 
dispatch_suspend(_captureQueue); 

AVCaptureConnection *videoConnection = nil; 
for (AVCaptureConnection *connection in self.stillImageOutput.connections) 
{ 
    for (AVCaptureInputPort *port in connection.inputPorts) 
    { 
     if ([port.mediaType isEqual:AVMediaTypeVideo]) 
     { 
      videoConnection = connection; 
      break; 
     } 
    } 
    if (videoConnection) break; 
} 

__weak typeof(self) weakSelf = self; 

[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error) 
{ 
    if (error) 
    { 
     dispatch_resume(_captureQueue); 
     return; 
    } 

    __block NSArray *filePath = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES); //create an array and store result of our search for the documents directory in it 

    NSString *documentsDirectory = [filePath objectAtIndex:0]; //create NSString object, that holds our exact path to the documents directory 

    NSString *fullPath = [documentsDirectory stringByAppendingPathComponent:[NSString stringWithFormat:@"/iScan_img_%i.pdf",(int)[NSDate date].timeIntervalSince1970]]; 


    @autoreleasepool 
    { 
     NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer]; 
     CIImage *enhancedImage = [[CIImage alloc] initWithData:imageData options:@{kCIImageColorSpace:[NSNull null]}]; 
     imageData = nil; 

     if (weakSelf.cameraViewType == DocScannerCameraViewTypeBlackAndWhite) 
     { 
      enhancedImage = [self filteredImageUsingEnhanceFilterOnImage:enhancedImage]; 
     } 
     else 
     { 
      enhancedImage = [self filteredImageUsingContrastFilterOnImage:enhancedImage]; 
     } 

     if (weakSelf.isBorderDetectionEnabled && rectangleDetectionConfidenceHighEnough(_imageDedectionConfidence)) 
     { 
      CIRectangleFeature *rectangleFeature = [self biggestRectangleInRectangles:[[self highAccuracyRectangleDetector] featuresInImage:enhancedImage]]; 

      if (rectangleFeature) 
      { 
       enhancedImage = [self correctPerspectiveForImage:enhancedImage withFeatures:rectangleFeature]; 
      } 
     } 

     CIFilter *transform = [CIFilter filterWithName:@"CIAffineTransform"]; 
     [transform setValue:enhancedImage forKey:kCIInputImageKey]; 
     NSValue *rotation = [NSValue valueWithCGAffineTransform:CGAffineTransformMakeRotation(-90 * (M_PI/180))]; 
     [transform setValue:rotation forKey:@"inputTransform"]; 
     enhancedImage = transform.outputImage; 

     if (!enhancedImage || CGRectIsEmpty(enhancedImage.extent)) return; 

     static CIContext *ctx = nil; 
     if (!ctx) 
     { 
      ctx = [CIContext contextWithOptions:@{kCIContextWorkingColorSpace:[NSNull null]}]; 
     } 

     CGSize bounds = enhancedImage.extent.size; 
     bounds = CGSizeMake(floorf(bounds.width/4) * 4,floorf(bounds.height/4) * 4); 
     CGRect extent = CGRectMake(enhancedImage.extent.origin.x, enhancedImage.extent.origin.y, bounds.width, bounds.height); 

     static int bytesPerPixel = 8; 
     uint rowBytes = bytesPerPixel * bounds.width; 
     uint totalBytes = rowBytes * bounds.height; 
     uint8_t *byteBuffer = malloc(totalBytes); 

     CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

     [ctx render:enhancedImage toBitmap:byteBuffer rowBytes:rowBytes bounds:extent format:kCIFormatRGBA8 colorSpace:colorSpace]; 

     CGContextRef bitmapContext = CGBitmapContextCreate(byteBuffer,bounds.width,bounds.height,bytesPerPixel,rowBytes,colorSpace,kCGImageAlphaNoneSkipLast); 
     CGImageRef imgRef = CGBitmapContextCreateImage(bitmapContext); 
     CGColorSpaceRelease(colorSpace); 
     CGContextRelease(bitmapContext); 
     free(byteBuffer); 

     if (imgRef == NULL) 
     { 
      CFRelease(imgRef); 
      return; 
     } 
     saveCGImageAsJPEGToFilePath(imgRef, fullPath); 



     CFRelease(imgRef); 

     dispatch_async(dispatch_get_main_queue(),^
         { 
          completionHandler(fullPath); 

          dispatch_resume(_captureQueue); 
         }); 

     _imageDedectionConfidence = 0.0f; 
    } 
}]; 

}

は今基本的にはコンテンツをキャプチャし、いくつかifステートメントが真であるならば、それが表示CIRectangleFeature内のコンテンツをキャプチャした後CIImageに変換します保存機能で呼び出されるCGImage

私はこのように迅速に翻訳されています。

func captureImage(completionHandler: @escaping (_ imageFilePath: String) -> Void) { 

    self.captureQueue?.suspend() 
    var videoConnection: AVCaptureConnection! 
    for connection in self.stillImageOutput.connections{ 
     for port in (connection as! AVCaptureConnection).inputPorts { 
      if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) { 
       videoConnection = connection as! AVCaptureConnection 
       break 
      } 
     } 
     if videoConnection != nil { 
      break 
     } 
    } 
    weak var weakSelf = self 
    self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer, error) -> Void in 
     if error != nil { 
      self.captureQueue?.resume() 
      return 
     } 
     let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true) 
     let documentsDirectory: String = filePath[0] 
     let fullPath: String = URL(fileURLWithPath: documentsDirectory).appendingPathComponent("iScan_img_\(Int(Date().timeIntervalSince1970)).pdf").absoluteString 
     autoreleasepool { 
      let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)) 
      var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()]) 


      if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite { 
       enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!) 
      } 
      else { 
       enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!) 
      } 
      if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) { 
       let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!)) 
       if rectangleFeature != nil { 
        enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!) 
       } 
      } 
      let transform = CIFilter(name: "CIAffineTransform") 
      let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi/180))) 
      transform?.setValue(rotation, forKey: "inputTransform") 
      enhancedImage = transform?.outputImage 
      if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! { 
       return 
      } 
      var ctx: CIContext? 
      if (ctx != nil) { 
       ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()]) 
      } 
      var bounds: CGSize = (enhancedImage?.extent.size)! 
      bounds = CGSize(width: CGFloat((floorf(Float(bounds.width))/4) * 4), height: CGFloat((floorf(Float(bounds.height))/4) * 4)) 
      let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height)) 
      let bytesPerPixel: CGFloat = 8 
      let rowBytes = bytesPerPixel * bounds.width 
      let totalBytes = rowBytes * bounds.height 
      let byteBuffer = malloc(Int(totalBytes)) 
      let colorSpace = CGColorSpaceCreateDeviceRGB() 
      ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace) 
      let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue) 
      let imgRef = bitmapContext?.makeImage() 
      free(byteBuffer) 

      self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath) 
      DispatchQueue.main.async(execute: {() -> Void in 
       completionHandler(fullPath) 
       self.captureQueue?.resume() 
      }) 
      self.imageDedectionConfidence = 0.0 
     } 
    } 
} 

だから、節約のためにCGImageに変換し、その後AVCaptureStillImageOutputは、すべての必要な用途にCIImageに変換かかります。私は翻訳で間違って何をしているのですか?それとももっと良い方法がありますか?

私は本当にこれについて尋ねたくはありませんでしたが、このような質問は見つからないようです。少なくともからCIImageという名前のキャプチャを参照するものはありません。

ありがとうございました!

答えて

2

これはPrientusは私ではない万一@私は

func captureImage(completionHandler: @escaping (_ imageFilePath: String) -> Void) { 

    self.captureQueue?.suspend() 
    var videoConnection: AVCaptureConnection! 
    for connection in self.stillImageOutput.connections{ 
     for port in (connection as! AVCaptureConnection).inputPorts { 
      if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) { 
       videoConnection = connection as! AVCaptureConnection 
       break 
      } 
     } 
     if videoConnection != nil { 
      break 
     } 
    } 
    weak var weakSelf = self 
    self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer: CMSampleBuffer?, error) -> Void in 
     if error != nil { 
      self.captureQueue?.resume() 
      return 
     } 
     let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true) 
     let documentsDirectory: String = filePath[0] 
     let fullPath: String = documentsDirectory.appending("/iScan_img_\(Int(Date().timeIntervalSince1970)).pdf") 
     autoreleasepool { 

      let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)) 
      var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()]) 


      if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite { 
       enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!) 
      } 
      else { 
       enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!) 
      } 
      if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) { 
       let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!)) 
       if rectangleFeature != nil { 
        enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!) 
       } 
      } 
      let transform = CIFilter(name: "CIAffineTransform") 
      transform?.setValue(enhancedImage, forKey: kCIInputImageKey) 
      let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi/180))) 
      transform?.setValue(rotation, forKey: "inputTransform") 
      enhancedImage = (transform?.outputImage)! 
      if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! { 
       return 
      } 
      var ctx: CIContext? 
      if (ctx == nil) { 
       ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()]) 
      } 
      var bounds: CGSize = (enhancedImage!.extent.size) 
      bounds = CGSize(width: CGFloat((floorf(Float(bounds.width))/4) * 4), height: CGFloat((floorf(Float(bounds.height))/4) * 4)) 
      let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height)) 
      let bytesPerPixel: CGFloat = 8 
      let rowBytes = bytesPerPixel * bounds.width 
      let totalBytes = rowBytes * bounds.height 
      let byteBuffer = malloc(Int(totalBytes)) 
      let colorSpace = CGColorSpaceCreateDeviceRGB() 
      ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace) 
      let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue) 
      let imgRef = bitmapContext?.makeImage() 
      free(byteBuffer) 
      if imgRef == nil { 
       return 
      } 
      self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath) 
      DispatchQueue.main.async(execute: {() -> Void in 
       completionHandler(fullPath) 
       self.captureQueue?.resume() 
      }) 
      self.imageDedectionConfidence = 0.0 
     } 
    } 
} 
1

次の行を使用してCIImageの作成を交換してみてください:

guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer), let enhancedImage = CIImage(cvPixelBuffer: pixelBuffer) else { 
    return 
} 
+0

私の間違いを見つける手助けのために再びPrientusへの迅速なおかげで正しい翻訳であります'CIRectangleFeature'と実際に画像をキャプチャする前に使用しているフィルタを描画するには' CIImage'が必要ですか?私は 'CGImage'としてキャプチャして変換する方法を知っていますが、キャプチャする前にイメージに行っているすべてのものに対して' CIImage'が必要です。しかし、答えをありがとう! – CarpenterBlood

+0

@CarpenterBloodああ、私は優先順位を誤解しています。ごめんなさい。私はまだ少し考えています:) – Prientus

+0

@CarpenterBlood私の更新された答えをご覧ください。あなたのコードが正しく動作していない可能性があります。なぜなら、あなたのautoreleasepoolでCIImageを正しく取得していないかもしれないからです。 – Prientus

関連する問題