diff --git a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Objects/FaceDetection.prefab b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Objects/FaceDetection.prefab index 69eb4c969..c515e8fa9 100644 --- a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Objects/FaceDetection.prefab +++ b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Objects/FaceDetection.prefab @@ -47,6 +47,7 @@ MonoBehaviour: gpuConfig: {fileID: 4900000, guid: ea3b02438eeb2743c8ca34c733a1ab8a, type: 3} cpuConfig: {fileID: 4900000, guid: 51972370b918e54f78553d2d36c8e40a, type: 3} androidConfig: {fileID: 4900000, guid: 754b23d93f14744ee82f1115e5afdb37, type: 3} + modelType: 0 --- !u!114 &5680418270291803702 MonoBehaviour: m_ObjectHideFlags: 0 diff --git a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_android.txt b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_android.txt index 75cef864a..30c0484e5 100644 --- a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_android.txt +++ b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_android.txt @@ -55,7 +55,21 @@ node: { node { calculator: "FaceDetectionShortRangeGpu" input_stream: "IMAGE:transformed_input_video" - output_stream: "DETECTIONS:face_detections" + output_stream: "DETECTIONS:short_range_detections" +} + +node { + calculator: "FaceDetectionFullRangeGpu" + input_stream: "IMAGE:transformed_input_video" + output_stream: "DETECTIONS:full_range_detections" +} + +node { + calculator: "SwitchMuxCalculator" + input_side_packet: "SELECT:model_type" + input_stream: "C0__FUNC_INPUT:short_range_detections" + input_stream: "C1__FUNC_INPUT:full_range_detections" + output_stream: "FUNC_INPUT:face_detections" } node { diff --git a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_cpu.txt b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_cpu.txt index 1aaaf63c1..ec70f68bf 100644 --- a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_cpu.txt +++ b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_cpu.txt @@ -16,6 +16,7 @@ # # CHANGES: # - Add ImageTransformationCalculator and rotate the input +# - Switch models # - Remove AnnotationOverlayCalculator # - Add PacketPresenceCalculator @@ -62,7 +63,21 @@ node: { node { calculator: "FaceDetectionShortRangeCpu" input_stream: "IMAGE:transformed_input_video" - output_stream: "DETECTIONS:face_detections" + output_stream: "DETECTIONS:short_range_detections" +} + +node { + calculator: "FaceDetectionFullRangeCpu" + input_stream: "IMAGE:transformed_input_video" + output_stream: "DETECTIONS:full_range_detections" +} + +node { + calculator: "SwitchMuxCalculator" + input_side_packet: "SELECT:model_type" + input_stream: "C0__FUNC_INPUT:short_range_detections" + input_stream: "C1__FUNC_INPUT:full_range_detections" + output_stream: "FUNC_INPUT:face_detections" } node { diff --git a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_gpu.txt b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_gpu.txt index 231b5e9a6..5dee80c39 100644 --- a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_gpu.txt +++ b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Resources/face_detection_desktop_gpu.txt @@ -17,6 +17,7 @@ # CHANGES: # - `input_video` is ImageFrame (ImageFrameToGpuBufferCalculator converts it into GpuBuffer) # - Add ImageTransformationCalculator and rotate the input +# - Switch models # - Remove AnnotationOverlayCalculator # - Add PacketPresenceCalculator @@ -69,7 +70,21 @@ node: { node { calculator: "FaceDetectionShortRangeGpu" input_stream: "IMAGE:transformed_input_video" - output_stream: "DETECTIONS:face_detections" + output_stream: "DETECTIONS:short_range_detections" +} + +node { + calculator: "FaceDetectionFullRangeGpu" + input_stream: "IMAGE:transformed_input_video" + output_stream: "DETECTIONS:full_range_detections" +} + +node { + calculator: "SwitchMuxCalculator" + input_side_packet: "SELECT:model_type" + input_stream: "C0__FUNC_INPUT:short_range_detections" + input_stream: "C1__FUNC_INPUT:full_range_detections" + output_stream: "FUNC_INPUT:face_detections" } node { diff --git a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Scripts/FaceDetectionGraph.cs b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Scripts/FaceDetectionGraph.cs index b1a4fd845..a29053845 100644 --- a/Assets/Mediapipe/Samples/Graphs/FaceDetection/Scripts/FaceDetectionGraph.cs +++ b/Assets/Mediapipe/Samples/Graphs/FaceDetection/Scripts/FaceDetectionGraph.cs @@ -1,7 +1,15 @@ using Mediapipe; using System.Collections.Generic; +using UnityEngine; public class FaceDetectionGraph : DemoGraph { + enum ModelType { + ShortRange = 0, + FullRangeSparse = 1, + } + + [SerializeField] ModelType modelType = ModelType.ShortRange; + private const string faceDetectionsStream = "face_detections"; private OutputStreamPoller> faceDetectionsStreamPoller; private DetectionVectorPacket faceDetectionsPacket; @@ -10,6 +18,8 @@ public class FaceDetectionGraph : DemoGraph { private OutputStreamPoller faceDetectionsPresenceStreamPoller; private BoolPacket faceDetectionsPresencePacket; + private SidePacket sidePacket; + public override Status StartRun() { faceDetectionsStreamPoller = graph.AddOutputStreamPoller>(faceDetectionsStream).Value(); faceDetectionsPacket = new DetectionVectorPacket(); @@ -17,7 +27,10 @@ public override Status StartRun() { faceDetectionsPresenceStreamPoller = graph.AddOutputStreamPoller(faceDetectionsPresenceStream).Value(); faceDetectionsPresencePacket = new BoolPacket(); - return graph.StartRun(); + sidePacket = new SidePacket(); + sidePacket.Emplace("model_type", new IntPacket((int)modelType)); + + return graph.StartRun(sidePacket); } public override void RenderOutput(WebCamScreenController screenController, TextureFrame textureFrame) { @@ -42,5 +55,6 @@ private void RenderAnnotation(WebCamScreenController screenController, List