前提・実現したいこと
https://qiita.com/TakahiroYamamoto/items/e970658a98a4e659cf9e
のサイトのコードを用いて顔検出をしています。元コードは一つの顔だけ用いるように書かれています。検出した顔に画像を貼るよう書き換えました。
画面に映った全ての顔に対して画像を貼り付けたいのですが、自分で書いてみてもうまくできません。お力添えいただきたいです。
発生している問題・エラーメッセージ
該当のソースコード
swift
1import UIKit 2import AVFoundation 3class ViewController:UIViewController{ 4var faceTracker:FaceTracker? = nil; 5 6 7 @IBOutlet var cameraView :UIView! 8 9 var hideView = UIView() 10 var hideView2 = UIView() 11var rectView = UIView() 12override func viewDidAppear(_ animated: Bool) { 13 super.viewDidAppear(animated) 14 //顔画像 15 self.hideView = UIView(frame: self.cameraView.bounds) 16 self.view.addSubview(self.hideView) 17 let hideImage = UIImageView(image:UIImage(named:"niconico.png")) 18 // self.rectView.layer.borderWidth = 3//四角い枠を用意しておく 19 self.view.addSubview(self.rectView) 20 faceTracker = FaceTracker(view: self.cameraView, findface:{arr in 21 var rects = Array<CGRect>(); 22 23 for num in 0..<arr.count{ 24 rects.append(arr[num]);//num番の顔 25 hideImage.frame = rects[num] 26 self.hideView.addSubview(hideImage) 27 28 } 29 //self.rectView.frame = rect;//四角い枠を顔の位置に移動する 30 31 //self.hideView.addSubview(hideImage) 32 }) 33 }
swift
1 2import UIKit 3import AVFoundation 4 5class FaceTracker: NSObject,AVCaptureVideoDataOutputSampleBufferDelegate { 6 let captureSession = AVCaptureSession() 7 let videoDevice = AVCaptureDevice.default(for: AVMediaType.video) 8 let audioDevice = AVCaptureDevice.default(for: AVMediaType.audio) 9 10 var videoOutput = AVCaptureVideoDataOutput() 11 // 12 //var hideView = UIView() 13 var view:UIView 14 private var findface : (_ arr:Array<CGRect>) -> Void 15 required init(view:UIView, findface: @escaping (_ arr:Array<CGRect>) -> Void) 16 { 17 self.view=view 18 self.findface = findface 19 super.init() 20 self.initialize() 21 } 22 23 24 func initialize() 25 { 26 //各デバイスの登録(audioは実際いらない) 27 do { 28 let videoInput = try AVCaptureDeviceInput(device: self.videoDevice!) as AVCaptureDeviceInput 29 self.captureSession.addInput(videoInput) 30 } catch let error as NSError { 31 print(error) 32 } 33 do { 34 let audioInput = try AVCaptureDeviceInput(device: self.audioDevice!) as AVCaptureInput 35 self.captureSession.addInput(audioInput) 36 } catch let error as NSError { 37 print(error) 38 } 39 40 self.videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable as! String : Int(kCVPixelFormatType_32BGRA)] 41 42 //フレーム毎に呼び出すデリゲート登録 43 //let queue:DispatchQueue = DispatchQueue(label:"myqueue",attribite: DISPATCH_QUEUE_SERIAL) 44 let queue:DispatchQueue = DispatchQueue(label: "myqueue", attributes: .concurrent) 45 self.videoOutput.setSampleBufferDelegate(self, queue: queue) 46 self.videoOutput.alwaysDiscardsLateVideoFrames = true 47 48 self.captureSession.addOutput(self.videoOutput) 49 50 let videoLayer : AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession) 51 videoLayer.frame = self.view.bounds 52 videoLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill 53 54 self.view.layer.addSublayer(videoLayer) 55 56 //カメラ向き 57 for connection in self.videoOutput.connections { 58 let conn = connection 59 if conn.isVideoOrientationSupported { 60 conn.videoOrientation = AVCaptureVideoOrientation.portrait 61 } 62 } 63 64 65 self.captureSession.startRunning() 66 } 67 68 func imageFromSampleBuffer(sampleBuffer: CMSampleBuffer) -> UIImage { 69 //バッファーをUIImageに変換 70 let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! 71 CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0)) 72 let baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0) 73 let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer) 74 let width = CVPixelBufferGetWidth(imageBuffer) 75 let height = CVPixelBufferGetHeight(imageBuffer) 76 77 let colorSpace = CGColorSpaceCreateDeviceRGB() 78 let bitmapInfo = (CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue) 79 let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) 80 let imageRef = context!.makeImage() 81 82 CVPixelBufferUnlockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0)) 83 let resultImage: UIImage = UIImage(cgImage: imageRef!) 84 return resultImage 85 } 86 87 func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) 88 { 89 //同期処理(非同期処理ではキューが溜まりすぎて画面がついていかない) 90 DispatchQueue.main.sync(execute: { 91 92 //バッファーをUIImageに変換 93 let image = self.imageFromSampleBuffer(sampleBuffer: sampleBuffer) 94 let ciimage:CIImage! = CIImage(image: image) 95 96 //CIDetectorAccuracyHighだと高精度(使った感じは遠距離による判定の精度)だが処理が遅くなる 97 let detector : CIDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options:[CIDetectorAccuracy: CIDetectorAccuracyHigh] )! 98 let faces : NSArray = detector.features(in: ciimage) as NSArray 99 100 if faces.count != 0 101 { 102 var rects = Array<CGRect>(); 103 var _ : CIFaceFeature = CIFaceFeature() 104 for feature in faces { 105 106 // 座標変換 107 var faceRect : CGRect = (feature as AnyObject).bounds 108 let widthPer = (self.view.bounds.width/image.size.width) 109 let heightPer = (self.view.bounds.height/image.size.height) 110 111 // UIKitは左上に原点があるが、CoreImageは左下に原点があるので揃える 112 faceRect.origin.y = image.size.height - faceRect.origin.y - faceRect.size.height 113 114 //倍率変換 115 faceRect.origin.x = faceRect.origin.x * widthPer 116 faceRect.origin.y = faceRect.origin.y * heightPer 117 faceRect.size.width = faceRect.size.width * widthPer 118 faceRect.size.height = faceRect.size.height * heightPer 119 120 121 rects.append(faceRect) 122 } 123 self.findface(rects) 124 } 125 }) 126 } 127} 128
試したこと
for num in 0..<arr.count{
}
で囲んだのですが、それだと意味ないのですか?
補足情報(FW/ツールのバージョンなど)
swift5 Xcode ver11.1
回答1件
あなたの回答
tips
プレビュー