2017-01-24 3 views
4

とGLKViewで描画する場合、私はGLKViewAVCaptureVideoDataOutputからカメラ出力を描くんだけど、カメラが4 AVCaptureVideoDataOutputを埋めます。私はアスペクトの塗りつぶしをしようとしていますが、カメラの出力はちょうど潰れて、ビューの枠を越えないように見えます。アスペクト比を乱すことなく、GLKViewを使用してフルスクリーンカメラビューを取得するにはどうすればよいですか?アスペクトはCIContext

func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { 

    // Need to shimmy this through type-hell 
    let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) 
    // Force the type change - pass through opaque buffer 
    let opaqueBuffer = Unmanaged<CVImageBuffer>.passUnretained(imageBuffer!).toOpaque() 
    let pixelBuffer = Unmanaged<CVPixelBuffer>.fromOpaque(opaqueBuffer).takeUnretainedValue() 

    let sourceImage = CIImage(cvPixelBuffer: pixelBuffer, options: nil) 

    // Do some detection on the image 
    let detectionResult = applyFilter?(sourceImage) 
    var outputImage = sourceImage 
    if detectionResult != nil { 
     outputImage = detectionResult! 
    } 

    if videoDisplayView.context != EAGLContext.current() { 
     EAGLContext.setCurrent(videoDisplayView.context) 
    } 
    videoDisplayView.bindDrawable() 

    // clear eagl view to grey 
    glClearColor(0.5, 0.5, 0.5, 1.0); 
    glClear(0x00004000) 

    // set the blend mode to "source over" so that CI will use that 
    glEnable(0x0BE2); 
    glBlendFunc(1, 0x0303); 

    renderContext.draw(outputImage, in: videoDisplayViewBounds, from: outputImage.extent) 

    videoDisplayView.display() 
} 
:出力レンダリング

let videoOutput = AVCaptureVideoDataOutput() 
videoOutput.setSampleBufferDelegate(self, queue: sessionQueue) 
if captureSession.canAddOutput(videoOutput) { 
    captureSession.addOutput(videoOutput) 
} 

videoDisplayView = GLKView(frame: superview.bounds, context: EAGLContext(api: .openGLES2)) 
videoDisplayView.transform = CGAffineTransform(rotationAngle: CGFloat(M_PI_2)) 
videoDisplayView.frame = superview.bounds 
superview.addSubview(videoDisplayView) 
superview.sendSubview(toBack: videoDisplayView) 

renderContext = CIContext(eaglContext: videoDisplayView.context) 
sessionQueue = DispatchQueue(label: "AVSessionQueue", attributes: []) 

videoDisplayView.bindDrawable() 
videoDisplayViewBounds = CGRect(x: 0, y: 0, width: videoDisplayView.drawableWidth, height: videoDisplayView.drawableHeight) 

がビデオ出力を初期化:

ビューを初期化私が試した

もの:

// Results in 4:3 stream leaving a gap at the bottom 
renderContext.draw(outputImage, in: outputImage.extent, from: outputImage.extent) 

// Results in same 4:3 stream 
let rect = CGRect(x: 0, y: 0, width: outputImage.extent.width, height: videoDisplayViewBounds.height) 
renderContext.draw(outputImage, in: rect, from: outputImage.extent) 
+0

どれ運をこれまでに。?私も同様の挑戦をしています。 –

+0

@PaulvanRoosendaal私が投稿した答えを見て、私はちょうどビューのサイズに私の出力イメージを切り取った –

答えて

1

私は実際に私は出力を表示してビューのサイズに私の出力をトリミングするためになってしまった

func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { 

    // Need to shimmy this through type-hell 
    let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) 
    // Force the type change - pass through opaque buffer 
    let opaqueBuffer = Unmanaged<CVImageBuffer>.passUnretained(imageBuffer!).toOpaque() 
    let pixelBuffer = Unmanaged<CVPixelBuffer>.fromOpaque(opaqueBuffer).takeUnretainedValue() 

    let sourceImage = CIImage(cvPixelBuffer: pixelBuffer, options: nil) 

    // Make a rect to crop to that's the size of the view we want to display the image in 
    let cropRect = AVMakeRect(aspectRatio: CGSize(width: videoDisplayViewBounds.width, height: videoDisplayViewBounds.height), insideRect: sourceImage.extent) 
    // Crop 
    let croppedImage = sourceImage.cropping(to: cropRect) 
    // Cropping changes the origin coordinates of the cropped image, so move it back to 0 
    let translatedImage = croppedImage.applying(CGAffineTransform(translationX: 0, y: -croppedImage.extent.origin.y)) 

    // Do some detection on the image 
    let detectionResult = applyFilter?(translatedImage) 
    var outputImage = translatedImage 
    if detectionResult != nil { 
     outputImage = detectionResult! 
    } 

    if videoDisplayView.context != EAGLContext.current() { 
     EAGLContext.setCurrent(videoDisplayView.context) 
    } 
    videoDisplayView.bindDrawable() 

    // clear eagl view to grey 
    glClearColor(0.5, 0.5, 0.5, 1.0) 
    glClear(0x00004000) 

    // set the blend mode to "source over" so that CI will use that 
    glEnable(0x0BE2); 
    glBlendFunc(1, 0x0303) 

    renderContext.draw(outputImage, in: videoDisplayViewBounds, from: outputImage.extent) 

    videoDisplayView.display() 
} 
関連する問題