CoreImage提供了大量用于使用GPU调整图像的过滤器,并且可以有效地与来自摄像机源或视频文件的视频数据一起使用。
objc.io上有一篇文章介绍了如何执行此操作。例子在Objective-C中,但是解释应该足够清楚以至于可以遵循。
基本步骤是:
- 创建一个
EAGLContext
配置为使用OpenGLES2的。 - 使用创建一个
GLKView
以显示渲染的输出EAGLContext
。 - 创建一个
CIContext
,使用相同的EAGLContext
。 CIFilter
使用CIColorMonochrome
CoreImage过滤器创建一个。- 使用创建
AVCaptureSession
一个AVCaptureVideoDataOutput
。 - 在
AVCaptureVideoDataOutputDelegate
方法中,将转换CMSampleBuffer
为CIImage
。将应用于CIFilter
图像。将过滤后的图像绘制到CIImageContext
。
该流水线确保视频像素缓冲区保留在GPU(从摄像机到显示器)上,并避免将数据移至CPU,以保持实时性能。
要保存过滤后的视频,请实施
AVAssetWriter,然后将样本缓冲区附加到
AVCaptureVideoDataOutputDelegate进行过滤的位置。
这是Swift中的示例。
GitHub上的示例。
import UIKitimport GLKitimport AVFoundationprivate let rotationTransform = CGAffineTransformMakeRotation(CGFloat(-M_PI * 0.5))class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate { private var context: CIContext! private var targetRect: CGRect! private var session: AVCaptureSession! private var filter: CIFilter! @IBOutlet var glView: GLKView! override func prefersStatusBarHidden() -> Bool { return true } override func viewDidAppear(animated: Bool) { super.viewDidAppear(animated) let whiteColor = CIColor( red: 1.0, green: 1.0, blue: 1.0 ) filter = CIFilter( name: "CIColorMonochrome", withInputParameters: [ "inputColor" : whiteColor, "inputIntensity" : 1.0 ] ) // GL context let glContext = EAGLContext( API: .OpenGLES2 ) glView.context = glContext glView.enableSetNeedsDisplay = false context = CIContext( EAGLContext: glContext, options: [ kCIContextOutputColorSpace: NSNull(), kCIContextWorkingColorSpace: NSNull(), ] ) let screenSize = UIScreen.mainScreen().bounds.size let screenScale = UIScreen.mainScreen().scale targetRect = CGRect( x: 0, y: 0, width: screenSize.width * screenScale, height: screenSize.height * screenScale ) // Setup capture session. let cameraDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo) let videoInput = try? AVCaptureDeviceInput( device: cameraDevice ) let videoOutput = AVCaptureVideoDataOutput() videoOutput.setSampleBufferDelegate(self, queue: dispatch_get_main_queue()) session = AVCaptureSession() session.beginConfiguration() session.addInput(videoInput) session.addOutput(videoOutput) session.commitConfiguration() session.startRunning() } func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) { guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return } let originalImage = CIImage( CVPixelBuffer: pixelBuffer, options: [ kCIImageColorSpace: NSNull() ] ) let rotatedImage = originalImage.imageByApplyingTransform(rotationTransform) filter.setValue(rotatedImage, forKey: kCIInputImageKey) guard let filteredImage = filter.outputImage else { return } context.drawImage(filteredImage, inRect: targetRect, fromRect: filteredImage.extent) glView.display() } func captureOutput(captureOutput: AVCaptureOutput!, didDropSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) { let seconds = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) print("dropped sample buffer: (seconds)") }}


