使用環境
unity
OpenCv Plus Unity
RealSense D415
realsenseを使ってARマーカの検出がしたいです。
もともとやろうとしていたVuforiaが使えず(Could not connect pins - RenderStream() UnityEngine.WebCamTexture:Play()というエラーが出てしまいます)別の手段を探していたところOpenCV Plus Unityというアセットを見つけました。
これのデモを試したところ顔を認識して四角で囲む顔認証のデモが動くことを確認したので、この顔認証をARマーカに変更したいです。
質問内容
ARマーカ検出をするにはどうすればいいか
スクリプトのopencvsharp.demoというのはどういう意味なのか
この二点お答えいただきたいです
よろしくお願いします
namespace OpenCvSharp.Demo { using System; using UnityEngine; using System.Collections.Generic; using UnityEngine.UI; using OpenCvSharp; public class FaceDetectorScene : WebCamera { public TextAsset faces; public TextAsset eyes; public TextAsset shapes; private FaceProcessorLive<WebCamTexture> processor; /// <summary> /// Default initializer for MonoBehavior sub-classes /// </summary> protected override void Awake() { base.Awake(); base.forceFrontalCamera = true; // we work with frontal cams here, let's force it for macOS s MacBook doesn't state frontal cam correctly byte[] shapeDat = shapes.bytes; if (shapeDat.Length == 0) { string errorMessage = "In order to have Face Landmarks working you must download special pre-trained shape predictor " + "available for free via DLib library website and replace a placeholder file located at " + "\"OpenCV+Unity/Assets/Resources/shape_predictor_68_face_landmarks.bytes\"\n\n" + "Without shape predictor demo will only detect face rects."; #if UNITY_EDITOR // query user to download the proper shape predictor if (UnityEditor.EditorUtility.DisplayDialog("Shape predictor data missing", errorMessage, "Download", "OK, process with face rects only")) Application.OpenURL("http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"); #else UnityEngine.Debug.Log(errorMessage); #endif } processor = new FaceProcessorLive<WebCamTexture>(); processor.Initialize(faces.text, eyes.text, shapes.bytes); // data stabilizer - affects face rects, face landmarks etc. processor.DataStabilizer.Enabled = true; // enable stabilizer processor.DataStabilizer.Threshold = 2.0; // threshold value in pixels processor.DataStabilizer.SamplesCount = 2; // how many samples do we need to compute stable data // performance data - some tricks to make it work faster processor.Performance.Downscale = 256; // processed image is pre-scaled down to N px by long side processor.Performance.SkipRate = 0; // we actually process only each Nth frame (and every frame for skipRate = 0) } /// <summary> /// Per-frame video capture processor /// </summary> protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output) { // detect everything we're interested in processor.ProcessTexture(input, TextureParameters); // mark detected objects processor.MarkDetected(); // processor.Image now holds data we'd like to visualize output = Unity.MatToTexture(processor.Image, output); // if output is valid texture it's buffer will be re-used, otherwise it will be re-created return true; } } }
public void MarkDetected(bool drawSubItems = true) { // mark each found eye foreach (DetectedFace face in Faces) { // face rect Cv2.Rectangle((InputOutputArray)Image, face.Region, Scalar.FromRgb(255, 0, 0), 2); // convex hull //Cv2.Polylines(Image, new IEnumerable<Point>[] { face.Info.ConvexHull }, true, Scalar.FromRgb(255, 0, 0), 2); // render face triangulation (should we have one) if (face.Info != null) { foreach (DetectedFace.Triangle tr in face.Info.DelaunayTriangles) Cv2.Polylines(Image, new IEnumerable<Point>[] { tr.ToArray() }, true, Scalar.FromRgb(0, 0, 255), 1); } // Sub-items if (drawSubItems) { List<string> closedItems = new List<string>(new string[] { "Nose", "Eye", "Lip" }); foreach (DetectedObject sub in face.Elements) if (sub.Marks != null) Cv2.Polylines(Image, new IEnumerable<Point>[] { sub.Marks }, closedItems.Contains(sub.Name), Scalar.FromRgb(0, 255, 0), 1); } } }

あなたの回答
tips
プレビュー