iOS人脸识别 用苹果api<AVFoundation/AVFoundation.h>

Demo 地址 : https://github.com/wwpeter/FaceID.git

iOS人脸识别 用苹果api<AVFoundation/AVFoundation.h>

https://img-blog.****img.cn/2020072417

iOS 系统自带了人脸识别的方法,而且非常简单。无论针对图片 or 摄像头,都有系统级方法。

一、使用 AVCaptureVideoDataOutput 需要实现AVCaptureVideoDataOutputSampleBufferDelegate 的代理方法。

这个 delegate 会返回每个视频帧给我们,但不是我们常见的 UIImage,需要我们做格式转换。

并且 delegate 是在非主线程,我们要做 UI 展示的时候,需要主动切换主线程。

 

你还是可以用视频获取到的每一帧转换为 UIImage 来识别,速度就...所以我们不会直接用这种方法来做人脸识别。

//摄像头相关设置
-(void)faceDeviceInit{
        //1.获取输入设备(摄像头)
        NSArray *devices = [AVCaptureDeviceDiscoverySession discoverySessionWithDeviceTypes:@[AVCaptureDeviceTypeBuiltInWideAngleCamera] mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionFront].devices;
        AVCaptureDevice *deviceF = devices[0];
        
        
        //2.根据输入设备创建输入对象
        AVCaptureDeviceInput*input = [[AVCaptureDeviceInput alloc] initWithDevice:deviceF error:nil];
        
        //3.创建原数据的输出对象
        AVCaptureMetadataOutput *metaout = [[AVCaptureMetadataOutput alloc] init];
        
        
        
        //4.设置代理监听输出对象输出的数据,在主线程中刷新
        [metaout setMetadataObjectsDelegate:self queue:dispatch_get_main_queue()];
        
        self.session = [[AVCaptureSession alloc] init];
        
        //5.设置输出质量(高像素输出)
        if ([self.session canSetSessionPreset:AVCaptureSessionPreset640x480]) {
            [self.session setSessionPreset:AVCaptureSessionPreset640x480];
        }
        
        //6.添加输入和输出到会话
        [self.session beginConfiguration];
        if ([self.session canAddInput:input]) {
            [self.session addInput:input];
        }
        if ([self.session canAddOutput:metaout]) {
            [self.session addOutput:metaout];
        }
        
        if ([_session canAddOutput:self.videoDataOutput]) {
               [_session addOutput:self.videoDataOutput];
        }
        [self.session commitConfiguration];
        
        //7.告诉输出对象要输出什么样的数据,识别人脸, 最多可识别10张人脸
        [metaout setMetadataObjectTypes:@[AVMetadataObjectTypeFace]];
        
        AVCaptureSession *session = (AVCaptureSession *)self.session;
        
        //8.创建预览图层
          _previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:session];
          _previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
        _previewLayer.frame = CGRectMake((ScreenWidth-ScaleW(30))/2-ScaleW(100), ScaleW(65), ScaleW(200), ScaleW(200));
          _previewLayer.cornerRadius = 100;
          [self.groundView.layer insertSublayer:_previewLayer atIndex:0];
        
        //9.设置有效扫描区域(默认整个屏幕区域)(每个取值0~1, 以屏幕右上角为坐标原点)
        metaout.rectOfInterest = self.bounds;
        
        //前置摄像头一定要设置一下 要不然画面是镜像
        for (AVCaptureVideoDataOutput* output in session.outputs) {
            for (AVCaptureConnection * av in output.connections) {
                //判断是否是前置摄像头状态
                if (av.supportsVideoMirroring) {
                    //镜像设置
                    av.videoOrientation = AVCaptureVideoOrientationPortrait;
    //                av.videoMirrored = YES;
                }
            }
        }
        
        //10. 开始扫描
        [self.session startRunning];
        
    
    
}

- (void)captureOutput:(AVCaptureOutput *)output didOutputMetadataObjects:(NSArray<__kindof AVMetadataObject *> *)metadataObjects fromConnection:(AVCaptureConnection *)connection
{
    if (metadataObjects.count>1) {
        self.tishiLable.text = @"必须一个人进行人脸识别~";
        return;
    } else {
        for(AVMetadataObject *metaObject in metadataObjects){
              
             if([metaObject isKindOfClass:[AVMetadataFaceObject class ]] && metaObject.type == AVMetadataObjectTypeFace){
                 if (!_successful) {
                     if (!self.progress) {
                         //进行网络请求
                          [self cleanupSelfReferencecleanupSelfReference];
                     }
                     
                 }

             } else {
                 self.tishiLable.text = @"未检测到人脸~";
             }
         }
    }
 
}
- (UIImage*)imageFromPixelBuffer:(CMSampleBufferRef)p {
    CVImageBufferRef buffer;
    buffer = CMSampleBufferGetImageBuffer(p);

    CVPixelBufferLockBaseAddress(buffer, 0);
    uint8_t *base;
    size_t width, height, bytesPerRow;
    base = (uint8_t *)CVPixelBufferGetBaseAddress(buffer);
    width = CVPixelBufferGetWidth(buffer);
    height = CVPixelBufferGetHeight(buffer);
    bytesPerRow = CVPixelBufferGetBytesPerRow(buffer);

    CGColorSpaceRef colorSpace;
    CGContextRef cgContext;
    colorSpace = CGColorSpaceCreateDeviceRGB();
    cgContext = CGBitmapContextCreate(base, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
    CGColorSpaceRelease(colorSpace);

    CGImageRef cgImage;
    UIImage *image;
    cgImage = CGBitmapContextCreateImage(cgContext);
    image = [UIImage imageWithCGImage:cgImage];
    CGImageRelease(cgImage);
    CGContextRelease(cgContext);

    CVPixelBufferUnlockBaseAddress(buffer, 0);


    return image;
}

-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{

    [connection setVideoOrientation:AVCaptureVideoOrientationPortrait];

    
    self.imgTemp = [self imageFromSampleBuffer:sampleBuffer];

}