現在,下記リンクを参考にリアルタイムでの顔認識アプリのデモを製作しております.
https://qiita.com/TakahiroYamamoto/items/e970658a98a4e659cf9e
しかしながら,xcodeでの開発経験がなく,一つのエラー解決に多大な時間が掛っています.
エラーメッセージは
Fatal error: Unexpectedly found nil while unwrapping an Optional value
となっております.
プロジェクト開始後変更したファイルは下記二点のみです.
エラーメッセージは,一つ目のファイル(ViewController.swift)内の「エラー箇所」とコメントした分に対応付けされています.
不必要なnilがエラーの原因担っていることは分かったのですが,そのnilの正体がどこで宣言されたものであるのか理解できません.
検討の付く方,ご教示いただけないでしょうか.
よろしくお願い致します.
swiftコード
ViewController.swift
swift
1// 2// ViewController.swift 3// get_face_to_hello 4// 5// Created by name on 2019/02/18. 6// Copyright © 2019年 name. All rights reserved. 7// 8 9import UIKit 10 11class ViewController: UIViewController { 12 13 override func viewDidLoad() { 14 super.viewDidLoad() 15 // Do any additional setup after loading the view, typically from a nib. 16 } 17 18 var faceTracker:FaceTracker? = nil; 19 @IBOutlet var cameraView :UIView!//viewController上に一つviewを敷いてそれと繋いでおく 20 21 var rectView = UIView() 22 override func viewDidAppear(_ animated: Bool) { 23 super.viewDidAppear(animated) 24 self.rectView.layer.borderWidth = 3//四角い枠を用意しておく 25 self.view.addSubview(self.rectView) 26 faceTracker = FaceTracker(view: self.cameraView, findface:{arr in // エラー箇所 27 let rect = arr[0];//一番の顔だけ使う 28 self.rectView.frame = rect;//四角い枠を顔の位置に移動する 29 }) 30 } 31 32} 33
FaceTracker.swift
swift
1import UIKit 2import AVFoundation 3 4class FaceTracker: NSObject,AVCaptureVideoDataOutputSampleBufferDelegate { 5 let captureSession = AVCaptureSession() 6 let videoDevice = AVCaptureDevice.default(for: AVMediaType.video) 7 let audioDevice = AVCaptureDevice.default(for: AVMediaType.audio) 8 9 var videoOutput = AVCaptureVideoDataOutput() 10 var view:UIView 11 private var findface : (_ arr:Array<CGRect>) -> Void 12 required init(view:UIView, findface: @escaping (_ arr:Array<CGRect>) -> Void) 13 { 14 self.view=view 15 self.findface = findface 16 super.init() 17 self.initialize() 18 } 19 20 21 func initialize() 22 { 23 //各デバイスの登録(audioは実際いらない) 24 do { 25 let videoInput = try AVCaptureDeviceInput(device: self.videoDevice!) as AVCaptureDeviceInput 26 self.captureSession.addInput(videoInput) 27 } catch let error as NSError { 28 print(error) 29 } 30 do { 31 let audioInput = try AVCaptureDeviceInput(device: self.audioDevice!) as AVCaptureInput 32 self.captureSession.addInput(audioInput) 33 } catch let error as NSError { 34 print(error) 35 } 36 37 self.videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable as! String : Int(kCVPixelFormatType_32BGRA)] 38 39 //フレーム毎に呼び出すデリゲート登録 40 //let queue:DispatchQueue = DispatchQueue(label:"myqueue",attribite: DISPATCH_QUEUE_SERIAL) 41 let queue:DispatchQueue = DispatchQueue(label: "myqueue", attributes: .concurrent) 42 self.videoOutput.setSampleBufferDelegate(self, queue: queue) 43 self.videoOutput.alwaysDiscardsLateVideoFrames = true 44 45 self.captureSession.addOutput(self.videoOutput) 46 47 let videoLayer : AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession) 48 videoLayer.frame = self.view.bounds 49 videoLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill 50 51 self.view.layer.addSublayer(videoLayer) 52 53 //カメラ向き 54 for connection in self.videoOutput.connections { 55 let conn = connection 56 if conn.isVideoOrientationSupported { 57 conn.videoOrientation = AVCaptureVideoOrientation.portrait 58 } 59 } 60 61 self.captureSession.startRunning() 62 } 63 64 func imageFromSampleBuffer(sampleBuffer: CMSampleBuffer) -> UIImage { 65 //バッファーをUIImageに変換 66 let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! 67 CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0)) 68 let baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0) 69 let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer) 70 let width = CVPixelBufferGetWidth(imageBuffer) 71 let height = CVPixelBufferGetHeight(imageBuffer) 72 73 let colorSpace = CGColorSpaceCreateDeviceRGB() 74 let bitmapInfo = (CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue) 75 let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) 76 let imageRef = context!.makeImage() 77 78 CVPixelBufferUnlockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0)) 79 let resultImage: UIImage = UIImage(cgImage: imageRef!) 80 return resultImage 81 } 82 83 func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) 84 { 85 //同期処理(非同期処理ではキューが溜まりすぎて画面がついていかない) 86 DispatchQueue.main.sync(execute: { 87 88 //バッファーをUIImageに変換 89 let image = self.imageFromSampleBuffer(sampleBuffer: sampleBuffer) 90 let ciimage:CIImage! = CIImage(image: image) 91 92 //CIDetectorAccuracyHighだと高精度(使った感じは遠距離による判定の精度)だが処理が遅くなる 93 let detector : CIDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options:[CIDetectorAccuracy: CIDetectorAccuracyLow] )! 94 let faces : NSArray = detector.features(in: ciimage) as NSArray 95 96 if faces.count != 0 97 { 98 var rects = Array<CGRect>(); 99 var _ : CIFaceFeature = CIFaceFeature() 100 for feature in faces { 101 102 // 座標変換 103 var faceRect : CGRect = (feature as AnyObject).bounds 104 let widthPer = (self.view.bounds.width/image.size.width) 105 let heightPer = (self.view.bounds.height/image.size.height) 106 107 // UIKitは左上に原点があるが、CoreImageは左下に原点があるので揃える 108 faceRect.origin.y = image.size.height - faceRect.origin.y - faceRect.size.height 109 110 //倍率変換 111 faceRect.origin.x = faceRect.origin.x * widthPer 112 faceRect.origin.y = faceRect.origin.y * heightPer 113 faceRect.size.width = faceRect.size.width * widthPer 114 faceRect.size.height = faceRect.size.height * heightPer 115 116 rects.append(faceRect) 117 } 118 self.findface(rects) 119 } 120 }) 121 } 122} 123
回答1件
あなたの回答
tips
プレビュー