//======= Copyright (c) Stereolabs Corporation, All rights reserved. ===============
using UnityEngine;
using System.Collections.Generic;
using System;
using System.Runtime.InteropServices;
namespace sl
{
///
/// Main interface between Unity and the ZED SDK. Primarily consists of extern calls to the ZED SDK wrapper .dll and
/// low-level logic to process data sent to/received from it.
///
/// The ZEDManager component creates a ZEDCamera instance in Awake() and handles all initialization.
/// Most ZED functionality can be handled simply in Unity via ZEDManager or other high-level manager components
/// (ZEDSpatialMappingManager, ZEDPlaneDetectionManager, etc.)
/// Textures created by ZEDCamera by CreateTextureImageType() and CreateTextureMeasureType()
/// are updated automatically by the wrapper whenever a new frame is available. They represent a specific kind of
/// output, like left RGB image, or depth relative to the right sensor. As such, you never need more than one texture
/// of each kind, since it can simply be reused by all scripts that need it. Therefore, textures created in ZEDCamera
/// are indexed by their type (Image or Measure) and then by the options of each type. If a script needs a certain kind
/// of output, ZEDCamera will make a new one if it doesn't exist, but refer to the existing one otherwise.
///
public class ZEDCamera
{
///
/// Type of textures requested.
///
public enum TYPE_VIEW
{
///
/// Image-type texture. Human-viewable but loses measurement accuracy.
///
RETRIEVE_IMAGE,
///
/// Measure-type texture. Preserves measurement accuracy but isn't human-viewable.
///
RETRIEVE_MEASURE
}
///
/// Information for a requested texture. Stored in the texturesRequested list so DestroyTexture()
/// can be called with the correct arguments in DestroyAllTexture().
///
private struct TextureRequested
{
///
/// Texture type - 'image' or 'measure.' Assigned using ZEDCamera.TYPE_VIEW.
///
public int type;
///
/// View mode (left/right eye, depth, etc.) Assigned using ZEDCommon.VIEW.
///
public int option;
};
/********* Camera members ********/
///
/// DLL name, used for extern calls to the wrapper.
///
public const string nameDll = sl.ZEDCommon.NameDLL;
///
/// List of all created textures, representing SDK output. Indexed by ints corresponding to its ZEDCamera.TYPE_VIEW
/// and its ZEDCommon.VIEW as you can't have more than one texture for each combination (nor would it be useful to).
///
private Dictionary> textures;
///
/// List of all requested textures. Used for destroying all textures when the camera closes.
///
private List texturesRequested;
///
/// Width of the textures in pixels. Corresponds to the ZED's current resolution setting.
///
private int imageWidth;
///
/// Width of the images returned by the ZED in pixels. Corresponds to the ZED's current resolution setting.
///
public int ImageWidth
{
get
{
return imageWidth;
}
}
///
/// Height of the textures in pixels. Corresponds to the ZED's current resolution setting.
///
private int imageHeight;
///
/// Height of the images returned by the ZED in pixels. Corresponds to the ZED's current resolution setting.
///
public int ImageHeight
{
get
{
return imageHeight;
}
}
///
/// Projection matrix corresponding to the ZED's camera traits. Useful for lining up virtual cameras with the ZED image.
///
private Matrix4x4 projection = new Matrix4x4();
///
/// Projection matrix corresponding to the ZED's camera traits. Useful for lining up virtual cameras with the ZED image.
///
public Matrix4x4 Projection
{
get
{
return projection;
}
}
///
/// True if the ZED SDK is installed.
///
private static bool pluginIsReady = true;
///
/// Mutex for the image acquisition thread.
///
public object grabLock = new object();
///
/// Current ZED resolution setting. Set at initialization.
///
private RESOLUTION currentResolution;
///
/// Callback for c++ debugging. Should not be used in C#.
///
private delegate void DebugCallback(string message);
///
/// Desired FPS from the ZED camera. This is the maximum FPS for the ZED's current
/// resolution unless a lower setting was specified in Init().
/// Maximum values are bound by the ZED's output, not system performance.
///
private uint fpsMax = 60; //Defaults to HD720 resolution's output.
///
/// Desired FPS from the ZED camera. This is the maximum FPS for the ZED's current
/// resolution unless a lower setting was specified in Init().
/// Maximum values are bound by the ZED's output, not system performance.
///
public float GetRequestedCameraFPS()
{
return fpsMax;
}
///
/// Holds camera settings like brightness/contrast, gain/exposure, etc.
///
private ZEDCameraSettings cameraSettingsManager = new ZEDCameraSettings();
///
/// Camera's stereo baseline (distance between the cameras). Extracted from calibration files.
///
private float baseline = 0.0f;
///
/// Camera's stereo baseline (distance between the cameras). Extracted from calibration files.
///
public float Baseline
{
get { return baseline; }
}
///
/// ZED's current horizontal field of view in degrees.
///
private float fov_H = 0.0f;
///
/// ZED's current vertical field of view in degrees.
///
private float fov_V = 0.0f;
///
/// ZED's current horizontal field of view in degrees.
///
public float HorizontalFieldOfView
{
get { return fov_H; }
}
///
/// ZED's current vertical field of view in degrees.
///
public float VerticalFieldOfView
{
get { return fov_V; }
}
///
/// Structure containing information about all the sensors available in the current device
///
private SensorsConfiguration sensorsConfiguration;
///
/// Stereo parameters for current ZED camera prior to rectification (distorted).
///
private CalibrationParameters calibrationParametersRaw;
///
/// Stereo parameters for current ZED camera after rectification (undistorted).
///
private CalibrationParameters calibrationParametersRectified;
///
/// Camera model - ZED or ZED Mini.
///
private sl.MODEL cameraModel;
///
/// Whether the camera has been successfully initialized.
///
private bool cameraReady = false;
///
/// Structure containing information about all the sensors available in the current device
///
public SensorsConfiguration SensorsConfiguration
{
get { return sensorsConfiguration; }
}
///
/// Stereo parameters for current ZED camera prior to rectification (distorted).
///
public CalibrationParameters CalibrationParametersRaw
{
get { return calibrationParametersRaw; }
}
///
/// Stereo parameters for current ZED camera after rectification (undistorted).
///
public CalibrationParameters CalibrationParametersRectified
{
get { return calibrationParametersRectified; }
}
///
/// Camera model - ZED or ZED Mini.
///
public sl.MODEL CameraModel
{
get { return cameraModel; }
}
///
/// Whether the camera has been successfully initialized.
///
public bool IsCameraReady
{
get { return cameraReady; }
}
///
/// Whether the current device (ZED or ZED Mini) should be used for pass-through AR.
/// True if using ZED Mini, false if using ZED. Note: the plugin will allow using the original ZED
/// for pass-through but it will feel quite uncomfortable due to the baseline.
public bool IsHmdCompatible
{
get { return cameraModel == sl.MODEL.ZED_M; }
}
///
/// Camera ID (for multiple cameras)
///
public int CameraID = 0;
///
/// Layer that the ZED can't see, but overlay cameras created by ZEDMeshRenderer and ZEDPlaneRenderer can.
///
//int tagInvisibleToZED = 16;
///
/// Layer that the ZED can't see, but overlay cameras created by ZEDMeshRenderer and ZEDPlaneRenderer can.
///
public int TagInvisibleToZED
{
get { return ZEDLayers.tagInvisibleToZED; }
}
public const int brightnessDefault = 4;
public const int contrastDefault = 4;
public const int hueDefault = 0;
public const int saturationDefault = 4;
public const int sharpnessDefault = 3;
public const int gammaDefault = 5;
public const int whitebalanceDefault = 2600;
#region DLL Calls
///
/// Current Plugin Version.
///
public static readonly System.Version PluginVersion = new System.Version(3, 7, 1);
/******** DLL members ***********/
[DllImport(nameDll, EntryPoint = "GetRenderEventFunc")]
private static extern IntPtr GetRenderEventFunc();
[DllImport(nameDll, EntryPoint = "sl_register_callback_debuger")]
private static extern void dllz_register_callback_debuger(DebugCallback callback);
/*
* Utils function.
*/
[DllImport(nameDll, EntryPoint = "sl_unload_all_instances")]
private static extern void dllz_unload_all_instances();
[DllImport(nameDll, EntryPoint = "sl_unload_instance")]
private static extern void dllz_unload_instance(int id);
[DllImport(nameDll, EntryPoint = "sl_find_usb_device")]
private static extern bool dllz_find_usb_device(USB_DEVICE dev);
[DllImport(nameDll, EntryPoint = "sl_generate_unique_id")]
private static extern int dllz_generate_unique_id([In, Out] byte[] id);
/*
* Create functions
*/
[DllImport(nameDll, EntryPoint = "sl_create_camera")]
private static extern bool dllz_create_camera(int cameraID);
/*
* Opening function (Opens camera and creates textures).
*/
[DllImport(nameDll, EntryPoint = "sl_open_camera")]
private static extern int dllz_open(int cameraID, ref dll_initParameters parameters, System.Text.StringBuilder svoPath, System.Text.StringBuilder ipStream, int portStream, System.Text.StringBuilder output, System.Text.StringBuilder opt_settings_path, System.Text.StringBuilder opencv_calib_path);
/*
* Close function.
*/
[DllImport(nameDll, EntryPoint = "sl_close_camera")]
private static extern void dllz_close(int cameraID);
/*
* Grab function.
*/
[DllImport(nameDll, EntryPoint = "sl_grab")]
private static extern int dllz_grab(int cameraID, ref sl.RuntimeParameters runtimeParameters);
/*
* GetDeviceList function
*/
[DllImport(nameDll, EntryPoint = "sl_get_device_list")]
private static extern void dllz_get_device_list(sl.DeviceProperties[] deviceList, out int nbDevices);
/*
* Reboot function.
*/
[DllImport(nameDll, EntryPoint = "sl_reboot")]
private static extern int dllz_reboot(int serialNumber, bool fullReboot);
/*
* Recording functions.
*/
[DllImport(nameDll, EntryPoint = "sl_enable_recording")]
private static extern int dllz_enable_recording(int cameraID, System.Text.StringBuilder video_filename, int compresssionMode,int bitrate,int target_fps,bool transcode);
[DllImport(nameDll, EntryPoint = "sl_disable_recording")]
private static extern bool dllz_disable_recording(int cameraID);
/*
* Texturing functions.
*/
[DllImport(nameDll, EntryPoint = "sl_retrieve_textures")]
private static extern void dllz_retrieve_textures(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_updated_textures_timestamp")]
private static extern ulong dllz_get_updated_textures_timestamp(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_swap_textures")]
private static extern void dllz_swap_textures(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_register_texture_image_type")]
private static extern int dllz_register_texture_image_type(int cameraID, int option, IntPtr id, int width, int height);
[DllImport(nameDll, EntryPoint = "sl_register_texture_measure_type")]
private static extern int dllz_register_texture_measure_type(int cameraID, int option, IntPtr id, int width, int height);
[DllImport(nameDll, EntryPoint = "sl_unregister_texture_measure_type")]
private static extern int dllz_unregister_texture_measure_type(int cameraID, int option);
[DllImport(nameDll, EntryPoint = "sl_unregister_texture_image_type")]
private static extern int dllz_unregister_texture_image_type(int cameraID, int option);
[DllImport(nameDll, EntryPoint = "sl_get_copy_mat_texture_image_type")]
private static extern IntPtr dllz_get_copy_mat_texture_image_type(int cameraID, int option);
[DllImport(nameDll, EntryPoint = "sl_get_copy_mat_texture_measure_type")]
private static extern IntPtr dllz_get_copy_mat_texture_measure_type(int cameraID, int option);
/*
* Camera control functions.
*/
[DllImport(nameDll, EntryPoint = "sl_set_video_settings")]
private static extern void dllz_set_video_settings(int id, int mode, int value);
[DllImport(nameDll, EntryPoint = "sl_get_video_settings")]
private static extern int dllz_get_video_settings(int id, int mode);
[DllImport(nameDll, EntryPoint = "sl_set_roi_for_aec_agc")]
private static extern int dllz_set_roi_for_aec_agc(int id, int side, iRect roi,bool reset);
[DllImport(nameDll, EntryPoint = "sl_get_roi_for_aec_agc")]
private static extern int dllz_get_roi_for_aec_agc(int id, int side, ref iRect roi);
[DllImport(nameDll, EntryPoint = "sl_get_input_type")]
private static extern int dllz_get_input_type(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_camera_fps")]
private static extern float dllz_get_camera_fps(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_width")]
private static extern int dllz_get_width(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_height")]
private static extern int dllz_get_height(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_update_self_calibration")]
private static extern void dllz_update_self_calibration(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_calibration_parameters")]
private static extern IntPtr dllz_get_calibration_parameters(int cameraID, bool raw);
[DllImport(nameDll, EntryPoint = "sl_get_sensors_configuration")]
private static extern IntPtr dllz_get_sensors_configuration(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_camera_model")]
private static extern int dllz_get_camera_model(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_camera_firmware")]
private static extern int dllz_get_camera_firmware(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_sensors_firmware")]
private static extern int dllz_get_sensors_firmware(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_zed_serial")]
private static extern int dllz_get_zed_serial(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_camera_imu_transform")]
private static extern void dllz_get_camera_imu_transform(int cameraID, out Vector3 translation, out Quaternion rotation);
[DllImport(nameDll, EntryPoint = "sl_get_camera_timestamp")]
private static extern ulong dllz_get_image_timestamp(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_current_Timestamp")]
private static extern ulong dllz_get_current_timestamp(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_frame_dropped_count")]
private static extern uint dllz_get_frame_dropped_count(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_frame_dropped_percent")]
private static extern float dllz_get_frame_dropped_percent(int cameraID);
/*
* SVO control functions.
*/
[DllImport(nameDll, EntryPoint = "sl_set_svo_position")]
private static extern void dllz_set_svo_position(int cameraID, int frame);
[DllImport(nameDll, EntryPoint = "sl_get_svo_number_of_frames")]
private static extern int dllz_get_svo_number_of_frames(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_svo_position")]
private static extern int dllz_get_svo_position(int cameraID);
/*
* Depth Sensing utils functions.
*/
/* Removed as of ZED SDK v3.0.
[DllImport(nameDll, EntryPoint = "set_confidence_threshold")]
private static extern void dllz_set_confidence_threshold(int cameraID, int threshold);
[DllImport(nameDll, EntryPoint = "set_depth_max_range_value")]
private static extern void dllz_set_depth_max_range_value(int cameraID, float distanceMax);
*/
[DllImport(nameDll, EntryPoint = "sl_get_confidence_threshold")]
private static extern int dllz_get_confidence_threshold(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_depth_max_range_value")]
private static extern float dllz_get_depth_max_range_value(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_depth_value")]
private static extern float dllz_get_depth_value(int cameraID, uint x, uint y);
[DllImport(nameDll, EntryPoint = "sl_get_distance_value")]
private static extern float dllz_get_distance_value(int cameraID, uint x, uint y);
[DllImport(nameDll, EntryPoint = "sl_get_normal_value")]
private static extern bool dllz_get_normal_value(int cameraID, uint x, uint y, out Vector4 value);
[DllImport(nameDll, EntryPoint = "sl_get_xyz_value")]
private static extern bool dllz_get_xyz_value(int cameraID, uint x, uint y, out Vector4 value);
[DllImport(nameDll, EntryPoint = "sl_get_depth_min_range_value")]
private static extern float dllz_get_depth_min_range_value(int cameraID);
/*
* Motion Tracking functions.
*/
[DllImport(nameDll, EntryPoint = "sl_enable_positional_tracking_unity")]
private static extern int dllz_enable_tracking(int cameraID, ref Quaternion quat, ref Vector3 vec, bool enableSpatialMemory = false, bool enablePoseSmoothing = false, bool enableFloorAlignment = false,
bool trackingIsStatic = false, bool enableIMUFusion = true, System.Text.StringBuilder areaFilePath = null);
[DllImport(nameDll, EntryPoint = "sl_disable_positional_tracking")]
private static extern void dllz_disable_tracking(int cameraID, System.Text.StringBuilder path);
[DllImport(nameDll, EntryPoint = "sl_save_area_map")]
private static extern int dllz_save_current_area(int cameraID, System.Text.StringBuilder path);
[DllImport(nameDll, EntryPoint = "sl_get_position_data")]
private static extern int dllz_get_position_data(int cameraID, ref Pose pose, int reference_frame);
[DllImport(nameDll, EntryPoint = "sl_get_position")]
private static extern int dllz_get_position(int cameraID, ref Quaternion quat, ref Vector3 vec, int reference_frame);
[DllImport(nameDll, EntryPoint = "sl_get_position_at_target_frame")]
private static extern int dllz_get_position_at_target_frame(int cameraID, ref Quaternion quaternion, ref Vector3 translation, ref Quaternion targetQuaternion, ref Vector3 targetTranslation, int reference_frame);
[DllImport(nameDll, EntryPoint = "sl_transform_pose")]
private static extern void dllz_transform_pose(ref Quaternion quaternion, ref Vector3 translation, ref Quaternion targetQuaternion, ref Vector3 targetTranslation);
[DllImport(nameDll, EntryPoint = "sl_reset_positional_tracking")]
private static extern int dllz_reset_tracking(int cameraID, Quaternion rotation, Vector3 translation);
[DllImport(nameDll, EntryPoint = "sl_reset_tracking_with_offset")]
private static extern int dllz_reset_tracking_with_offset(int cameraID, Quaternion rotation, Vector3 translation, Quaternion offsetQuaternion, Vector3 offsetTranslation);
[DllImport(nameDll, EntryPoint = "sl_estimate_initial_position")]
private static extern int dllz_estimate_initial_position(int cameraID, ref Quaternion quaternion, ref Vector3 translation, int countSuccess, int countTimeout);
[DllImport(nameDll, EntryPoint = "sl_set_imu_prior_orientation")]
private static extern int dllz_set_imu_prior_orientation(int cameraID, Quaternion rotation);
[DllImport(nameDll, EntryPoint = "sl_get_internal_imu_orientation")]
private static extern int dllz_get_internal_imu_orientation(int cameraID, ref Quaternion rotation, int reference_time);
[DllImport(nameDll, EntryPoint = "sl_get_internal_sensors_data")]
private static extern int dllz_get_internal_sensors_data(int cameraID, ref SensorsData imuData, int reference_time);
[DllImport(nameDll, EntryPoint = "sl_get_area_export_state")]
private static extern int dllz_get_area_export_state(int cameraID);
/*
* Spatial Mapping functions.
*/
[DllImport(nameDll, EntryPoint = "sl_enable_spatial_mapping_unity")]
private static extern int dllz_enable_spatial_mapping(int cameraID, int type, float resolution_meter, float max_range_meter, int saveTexture,int max_memory_usage);
[DllImport(nameDll, EntryPoint = "sl_disable_spatial_mapping")]
private static extern void dllz_disable_spatial_mapping(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_pause_spatial_mapping")]
private static extern void dllz_pause_spatial_mapping(int cameraID, bool status);
[DllImport(nameDll, EntryPoint = "sl_request_mesh_async")]
private static extern void dllz_request_mesh_async(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_get_mesh_request_status_async")]
private static extern int dllz_get_mesh_request_status_async(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_update_mesh")]
private static extern int dllz_update_mesh(int cameraID, int[] nbVerticesInSubemeshes, int[] nbTrianglesInSubemeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int nbSubmesh);
[DllImport(nameDll, EntryPoint = "sl_retrieve_mesh")]
private static extern int dllz_retrieve_mesh(int cameraID, Vector3[] vertices, int[] triangles, int nbSubmesh, Vector2[] uvs, IntPtr textures);
[DllImport(nameDll, EntryPoint = "sl_update_fused_point_cloud")]
private static extern int dllz_update_fused_point_cloud(int cameraID, ref int pbPoints);
[DllImport(nameDll, EntryPoint = "sl_retrieve_fused_point_cloud")]
private static extern int dllz_retrieve_fused_point_cloud(int cameraID, Vector4[] points);
[DllImport(nameDll, EntryPoint = "sl_save_mesh")]
private static extern bool dllz_save_mesh(int cameraID, string filename, MESH_FILE_FORMAT format);
[DllImport(nameDll, EntryPoint = "sl_save_point_cloud")]
private static extern bool dllz_save_point_cloud(int cameraID, string filename, MESH_FILE_FORMAT format);
[DllImport(nameDll, EntryPoint = "sl_load_mesh")]
private static extern bool dllz_load_mesh(int cameraID, string filename, int[] nbVerticesInSubemeshes, int[] nbTrianglesInSubemeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int nbMaxSubmesh, int[] textureSize = null);
[DllImport(nameDll, EntryPoint = "sl_apply_texture")]
private static extern bool dllz_apply_texture(int cameraID, int[] nbVerticesInSubemeshes, int[] nbTrianglesInSubemeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int[] textureSize, int nbSubmesh);
[DllImport(nameDll, EntryPoint = "sl_filter_mesh")]
private static extern bool dllz_filter_mesh(int cameraID, FILTER meshFilter, int[] nbVerticesInSubemeshes, int[] nbTrianglesInSubemeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int nbSubmesh);
[DllImport(nameDll, EntryPoint = "sl_get_spatial_mapping_state")]
private static extern int dllz_get_spatial_mapping_state(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_spatial_mapping_merge_chunks")]
private static extern void dllz_spatial_mapping_merge_chunks(int cameraID, int numberFaces, int[] nbVerticesInSubemeshes, int[] nbTrianglesInSubemeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int nbSubmesh);
[DllImport(nameDll, EntryPoint = "sl_spatial_mapping_get_gravity_estimation")]
private static extern void dllz_spatial_mapping_get_gravity_estimation(int cameraID, ref Vector3 v);
/*
* Plane Detection functions (starting v2.4)
*/
[DllImport(nameDll, EntryPoint = "sl_find_floor_plane")]
private static extern IntPtr dllz_find_floor_plane(int cameraID, out Quaternion rotation, out Vector3 translation, Quaternion priorQuaternion, Vector3 priorTranslation);
[DllImport(nameDll, EntryPoint = "sl_find_plane_at_hit")]
private static extern IntPtr dllz_find_plane_at_hit(int cameraID, Vector2 HitPixel, bool refine);
[DllImport(nameDll, EntryPoint = "sl_convert_floorplane_to_mesh")]
private static extern int dllz_convert_floorplane_to_mesh(int cameraID, Vector3[] vertices, int[] triangles, out int numVertices, out int numTriangles);
[DllImport(nameDll, EntryPoint = "sl_convert_hitplane_to_mesh")]
private static extern int dllz_convert_hitplane_to_mesh(int cameraID, Vector3[] vertices, int[] triangles, out int numVertices, out int numTriangles);
/*
* Streaming Module functions (starting v2.8)
*/
[DllImport(nameDll, EntryPoint = "sl_enable_streaming")]
private static extern int dllz_enable_streaming(int cameraID, sl.STREAMING_CODEC codec, uint bitrate, ushort port, int gopSize, int adaptativeBitrate,int chunk_size,int target_fps);
[DllImport(nameDll, EntryPoint = "sl_is_streaming_enabled")]
private static extern int dllz_is_streaming_enabled(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_disable_streaming")]
private static extern void dllz_disable_streaming(int cameraID);
/*
* Objects Detection functions (starting v3.0)
*/
[DllImport(nameDll, EntryPoint = "sl_check_AI_model_status")]
private static extern IntPtr dllz_check_AI_model_status(AI_MODELS model, int gpu_id);
[DllImport(nameDll, EntryPoint = "sl_optimize_AI_model")]
private static extern int dllz_optimize_AI_model(AI_MODELS model, int gpu_id);
[DllImport(nameDll, EntryPoint = "sl_enable_objects_detection")]
private static extern int dllz_enable_objects_detection(int cameraID, ref dll_ObjectDetectionParameters od_params);
[DllImport(nameDll, EntryPoint = "sl_disable_objects_detection")]
private static extern void dllz_disable_objects_detection(int cameraID);
[DllImport(nameDll, EntryPoint = "sl_pause_objects_detection")]
private static extern void dllz_pause_objects_detection(int cameraID, bool status);
[DllImport(nameDll, EntryPoint = "sl_ingest_custom_box_objects")]
private static extern int dllz_ingest_custom_box_objects(int cameraID, int nb_objects, CustomBoxObjectData[] objects_in);
[DllImport(nameDll, EntryPoint = "sl_retrieve_objects")]
private static extern int dllz_retrieve_objects_data(int cameraID, ref dll_ObjectDetectionRuntimeParameters od_params, ref ObjectsFrameSDK objFrame);
[DllImport(nameDll, EntryPoint = "sl_update_objects_batch")]
private static extern int dllz_update_objects_batch(int cameraID, out int nbBatches);
[DllImport(nameDll, EntryPoint = "sl_get_objects_batch")]
private static extern int dllz_get_objects_batch_data(int cameraID, int batch_index, ref int numData, ref int id, ref OBJECT_CLASS label, ref OBJECT_SUBCLASS sublabel, ref TRACKING_STATE trackingState,
[In, Out] Vector3[] position, [In, Out] float[,] positionCovariances, [In, Out] Vector3[] velocities, [In, Out] ulong[] timestamps, [In, Out] Vector2[,] boundingBoxes2D, [In, Out] Vector3[,] boundingBoxes,
[In, Out] float[] confidences, [In, Out] OBJECT_ACTION_STATE[] actionStates, [In, Out] Vector2[,] keypoints2D, [In, Out] Vector3[,] keypoints, [In, Out] Vector2[,] headBoundingBoxes2D, [In, Out] Vector3[,] headBoundingBoxes, [In, Out] Vector3[] headPositions,
[In, Out] float[,] keypointsConfidences);
/*
* Save utils function
*/
[DllImport(nameDll, EntryPoint = "sl_save_current_image")]
private static extern int dllz_save_current_image(int cameraID, VIEW view,string filename);
[DllImport(nameDll, EntryPoint = "sl_save_current_depth")]
private static extern int dllz_save_current_depth(int cameraID, int side, string filename);
[DllImport(nameDll, EntryPoint = "sl_save_current_point_cloud")]
private static extern int dllz_save_current_point_cloud(int cameraID, int side, string filename);
/*
* Specific plugin functions
*/
[DllImport(nameDll, EntryPoint = "sl_check_plugin")]
private static extern int dllz_check_plugin(int major, int minor);
[DllImport(nameDll, EntryPoint = "sl_get_sdk_version")]
private static extern IntPtr dllz_get_sdk_version();
[DllImport(nameDll, EntryPoint = "sl_compute_offset")]
private static extern void dllz_compute_offset(float[] A, float[] B, int nbVectors, float[] C);
[DllImport(nameDll, EntryPoint = "sl_compute_optical_center_offsets")]
private static extern System.IntPtr dllz_compute_optical_center_offsets(ref Vector4 calibLeft, ref Vector4 calibRight, int width, int height, float planeDistance);
/*
* Retreieves used by mat
*/
[DllImport(nameDll, EntryPoint = "sl_retrieve_measure")]
private static extern int dllz_retrieve_measure(int cameraID, System.IntPtr ptr, int type, int mem, int width, int height);
[DllImport(nameDll, EntryPoint = "sl_retrieve_image")]
private static extern int dllz_retrieve_image(int cameraID, System.IntPtr ptr, int type, int mem, int width, int height);
#endregion
public static void UnloadPlugin()
{
dllz_unload_all_instances();
}
public static void UnloadInstance(int id)
{
dllz_unload_instance(id);
}
public static void ComputeOffset(float[] A, float[] B, int nbVectors, ref Quaternion rotation, ref Vector3 translation)
{
float[] C = new float[16];
if (A.Length != 4 * nbVectors || B.Length != 4 * nbVectors || C.Length != 16) return;
dllz_compute_offset(A, B, nbVectors, C);
Matrix4x4 m = Matrix4x4.identity;
Float2Matrix(ref m, C);
rotation = Matrix4ToQuaternion(m);
Vector4 t = m.GetColumn(3);
translation.x = t.x;
translation.y = t.y;
translation.z = t.z;
}
///
/// Return a string from a pointer to a char. Used in GetSDKVersion().
///
/// Pointer to a char.
/// The char as a string.
private static string PtrToStringUtf8(IntPtr ptr)
{
if (ptr == IntPtr.Zero)
{
return "";
}
int len = 0;
while (Marshal.ReadByte(ptr, len) != 0)
len++;
if (len == 0)
{
return "";
}
byte[] array = new byte[len];
Marshal.Copy(ptr, array, 0, len);
return System.Text.Encoding.ASCII.GetString(array);
}
///
/// Displays a console message. Used to display C++ SDK messages in Unity's console.
///
///
private static void DebugMethod(string message)
{
Debug.Log("[ZED plugin]: " + message);
}
///
/// Convert a pointer to a char into an array of bytes. Used to send file names to SDK for SVO recording.
///
/// Pointer to a char.
/// The array.
private static byte[] StringUtf8ToByte(string str)
{
byte[] array = System.Text.Encoding.ASCII.GetBytes(str);
return array;
}
///
/// Gets the max FPS for each resolution setting. Higher FPS will cause lower GPU performance.
///
///
/// The resolution
static private uint GetFpsForResolution(RESOLUTION reso)
{
if (reso == RESOLUTION.HD1080) return 30;
else if (reso == RESOLUTION.HD2K) return 15;
else if (reso == RESOLUTION.HD720) return 60;
else if (reso == RESOLUTION.VGA) return 100;
return 30;
}
///
/// Get a quaternion from a matrix with a minimum size of 3x3.
///
/// The matrix.
/// A quaternion which contains the matrix's rotation.
public static Quaternion Matrix4ToQuaternion(Matrix4x4 m)
{
Quaternion q = new Quaternion();
q.w = Mathf.Sqrt(Mathf.Max(0, 1 + m[0, 0] + m[1, 1] + m[2, 2])) / 2;
q.x = Mathf.Sqrt(Mathf.Max(0, 1 + m[0, 0] - m[1, 1] - m[2, 2])) / 2;
q.y = Mathf.Sqrt(Mathf.Max(0, 1 - m[0, 0] + m[1, 1] - m[2, 2])) / 2;
q.z = Mathf.Sqrt(Mathf.Max(0, 1 - m[0, 0] - m[1, 1] + m[2, 2])) / 2;
q.x *= Mathf.Sign(q.x * (m[2, 1] - m[1, 2]));
q.y *= Mathf.Sign(q.y * (m[0, 2] - m[2, 0]));
q.z *= Mathf.Sign(q.z * (m[1, 0] - m[0, 1]));
return q;
}
///
/// Performs a rigid transform.
///
///
///
///
///
public static void TransformPose(ref Quaternion quaternion, ref Vector3 translation, ref Quaternion targetQuaternion, ref Vector3 targetTranslation)
{
dllz_transform_pose(ref quaternion, ref translation, ref targetQuaternion, ref targetTranslation);
}
public static string GenerateUniqueID()
{
byte[] array = new byte[37];
int size = dllz_generate_unique_id(array);
return new string(System.Text.Encoding.ASCII.GetChars(array));
}
///
/// Checks that the ZED plugin's dependencies are installed.
///
public static bool CheckPlugin()
{
try
{
int res = dllz_check_plugin(PluginVersion.Major, PluginVersion.Minor);
if (res!= 0)
{
//0 = installed SDK is compatible with plugin. 1 otherwise.
Debug.LogError(ZEDLogMessage.Error2Str(ZEDLogMessage.ERROR.SDK_DEPENDENCIES_ISSUE));
return false;
}
}
catch (DllNotFoundException) //In case could not resolve the dll/.so
{
Debug.Log("DllNotFoundException");
Debug.LogError(ZEDLogMessage.Error2Str(ZEDLogMessage.ERROR.SDK_DEPENDENCIES_ISSUE));
return false;
}
pluginIsReady = true;
return true;
}
///
/// Checks if the USB device of a 'brand' type is connected. Used to check if a VR headset are connected
/// for display in ZEDManager's Inspector.
///
/// True, if USB device connected was found, false otherwise.
/// Type.
public static bool CheckUSBDeviceConnected(USB_DEVICE Type)
{
if (dllz_find_usb_device(Type))
return true;
else
return false;
}
///
/// Private constructor. Initializes lists to hold references to textures and texture requests.
///
public ZEDCamera()
{
//Create the textures
textures = new Dictionary>();
texturesRequested = new List();
}
///
/// Create a camera in Live mode (input comes from a connected ZED device, not SVO playback).
///
/// True to create detailed log file of SDK calls at the cost of performance.
public bool CreateCamera(int cameraID, bool verbose)
{
string infoSystem = SystemInfo.graphicsDeviceType.ToString().ToUpper();
if (!infoSystem.Equals("DIRECT3D11") && !infoSystem.Equals("OPENGLCORE"))
{
throw new Exception("The graphic library [" + infoSystem + "] is not supported");
}
CameraID = cameraID;
//tagOneObject += cameraID;
return dllz_create_camera(cameraID);
}
///
/// Closes the camera and deletes all textures.
/// Once destroyed, you need to recreate a camera instance to restart again.
///
public void Destroy()
{
cameraReady = false;
dllz_close(CameraID);
DestroyAllTexture();
}
///
/// DLL-friendly version of InitParameters (found in ZEDCommon.cs).
///
[StructLayout(LayoutKind.Sequential)]
public struct dll_initParameters
{
public sl.INPUT_TYPE inputType;
///
/// Resolution the ZED will be set to.
///
public sl.RESOLUTION resolution;
///
/// Desired camera FPS. Max is set by resolution.
///
public int cameraFps;
///
/// ID for identifying which of multiple connected ZEDs to use.
///
public int cameraDeviceID;
///
/// True to flip images horizontally.
///
public int cameraImageFlip;
///
/// True to disable self-calibration, using unoptimized optional calibration parameters.
/// False is recommended for optimized calibration.
///
[MarshalAs(UnmanagedType.U1)]
public bool cameraDisableSelfCalib;
///
/// True if depth relative to the right sensor should be computed.
///
[MarshalAs(UnmanagedType.U1)]
public bool enableRightSideMeasure;
///
/// True to skip dropped frames during SVO playback.
///
[MarshalAs(UnmanagedType.U1)]
public bool svoRealTimeMode;
///
/// Quality level of depth calculations. Higher settings improve accuracy but cost performance.
///
public sl.DEPTH_MODE depthMode;
///
/// True to stabilize the depth map. Recommended.
///
[MarshalAs(UnmanagedType.U1)]
public bool depthStabilization;
///
/// Minimum distance from the camera from which depth will be computed, in the defined coordinateUnit.
///
public float depthMinimumDistance;
///
/// Maximum distance that can be computed.
///
public float depthMaximumDistance;
///
/// Coordinate unit for all measurements (depth, tracking, etc.). Meters are recommended for Unity.
///
public UNIT coordinateUnit;
///
/// Defines order and direction of coordinate system axes. Unity uses left-handed, Y up, so this setting is recommended.
///
public COORDINATE_SYSTEM coordinateSystem;
///
/// ID of the graphics card on which the ZED's computations will be performed.
///
public int sdkGPUId;
///
/// True for the SDK to provide text feedback.
///
public int sdkVerbose;
///
/// True if sensors are required, false will not trigger an error if sensors are missing.
///
[MarshalAs(UnmanagedType.U1)]
public bool sensorsRequired;
///
/// Whether to enable improved color/gamma curves added in ZED SDK 3.0.
///
[MarshalAs(UnmanagedType.U1)]
public bool enableImageEnhancement;
///
/// Set an optional file path where the SDK can find a file containing the calibration information of the camera computed by OpenCV.
/// Using this will disable the factory calibration of the camera.
/// Erroneous calibration values can lead to poor SDK modules accuracy.
///
public string optionalOpencvCalibrationFile;
///
/// Define a timeout in seconds after which an error is reported if the \ref open() command fails.
/// Set to '-1' to try to open the camera endlessly without returning error in case of failure.
/// Set to '0' to return error in case of failure at the first attempt.
/// This parameter only impacts the LIVE mode.
///
public float openTimeoutSec;
///
/// Copy constructor. Takes values from Unity-suited InitParameters class.
///
///
public dll_initParameters(InitParameters init)
{
inputType = init.inputType;
resolution = init.resolution;
cameraFps = init.cameraFPS;
svoRealTimeMode = init.svoRealTimeMode;
coordinateUnit = init.coordinateUnit;
depthMode = init.depthMode;
depthMinimumDistance = init.depthMinimumDistance;
depthMaximumDistance = init.depthMaximumDistance;
cameraImageFlip = init.cameraImageFlip;
enableRightSideMeasure = init.enableRightSideMeasure;
cameraDisableSelfCalib = init.cameraDisableSelfCalib;
sdkVerbose = init.sdkVerbose;
sdkGPUId = init.sdkGPUId;
cameraDeviceID = init.cameraDeviceID;
coordinateSystem = init.coordinateSystem;
depthStabilization = init.depthStabilization;
sensorsRequired = init.sensorsRequired;
enableImageEnhancement = init.enableImageEnhancement;
optionalOpencvCalibrationFile = init.optionalOpencvCalibrationFile;
openTimeoutSec = init.openTimeoutSec;
}
}
///
/// Checks if the ZED camera is plugged in, opens it, and initializes the projection matix and command buffers for updating textures.
///
/// Class with all initialization settings.
/// A newly-instantiated InitParameters will have recommended default values.
/// ERROR_CODE: The error code gives information about the internal connection process.
/// If SUCCESS is returned, the camera is ready to use. Every other code indicates an error.
public ERROR_CODE Init(ref InitParameters initParameters)
{
//Update values with what we're about to pass to the camera.
currentResolution = initParameters.resolution;
fpsMax = GetFpsForResolution(currentResolution);
if (initParameters.cameraFPS == 0)
{
initParameters.cameraFPS = (int)fpsMax;
}
dll_initParameters initP = new dll_initParameters(initParameters); //DLL-friendly version of InitParameters.
initP.coordinateSystem = COORDINATE_SYSTEM.LEFT_HANDED_Y_UP; //Left-hand, Y-up is Unity's coordinate system, so we match that.
int v = dllz_open(CameraID, ref initP,
new System.Text.StringBuilder(initParameters.pathSVO, initParameters.pathSVO.Length),
new System.Text.StringBuilder(initParameters.ipStream, initParameters.ipStream.Length),
initParameters.portStream,
new System.Text.StringBuilder(initParameters.sdkVerboseLogFile, initParameters.sdkVerboseLogFile.Length),
new System.Text.StringBuilder(initParameters.optionalSettingsPath, initParameters.optionalSettingsPath.Length),
new System.Text.StringBuilder(initParameters.optionalOpencvCalibrationFile, initParameters.optionalOpencvCalibrationFile.Length));
if ((ERROR_CODE)v != ERROR_CODE.SUCCESS)
{
cameraReady = false;
return (ERROR_CODE)v;
}
//Set more values if the initialization was successful.
imageWidth = dllz_get_width(CameraID);
imageHeight = dllz_get_height(CameraID);
if (imageWidth > 0 && imageHeight > 0)
{
GetCalibrationParameters(false);
FillProjectionMatrix();
baseline = calibrationParametersRectified.Trans[0];
fov_H = calibrationParametersRectified.leftCam.hFOV * Mathf.Deg2Rad;
fov_V = calibrationParametersRectified.leftCam.vFOV * Mathf.Deg2Rad;
cameraModel = GetCameraModel();
cameraReady = true;
return (ERROR_CODE)v;
}
else
return sl.ERROR_CODE.CAMERA_NOT_INITIALIZED;
}
///
/// Fills the projection matrix with the parameters of the ZED. Needs to be called only once.
/// This projection matrix is off-center.
///
///
///
public void FillProjectionMatrix(float zFar = 500, float zNear = 0.1f)
{
CalibrationParameters parameters = GetCalibrationParameters(false);
float fovx = parameters.leftCam.hFOV * Mathf.Deg2Rad;
float fovy = parameters.leftCam.vFOV * Mathf.Deg2Rad;
float f_imageWidth = (float)ImageWidth;
float f_imageHeight = (float)ImageHeight;
//Manually construct the matrix based on initialization/calibration values.
projection[0, 0] = 1.0f / Mathf.Tan(fovx * 0.5f); //Horizontal FoV.
projection[0, 1] = 0;
projection[0, 2] = 2.0f * ((f_imageWidth - 1.0f * parameters.leftCam.cx) / f_imageWidth) - 1.0f; //Horizontal offset.
projection[0, 3] = 0;
projection[1, 0] = 0;
projection[1, 1] = 1.0f / Mathf.Tan(fovy * 0.5f); //Vertical FoV.
projection[1, 2] = -(2.0f * ((f_imageHeight - 1.0f * parameters.leftCam.cy) / f_imageHeight) - 1.0f); //Vertical offset.
projection[1, 3] = 0;
projection[2, 0] = 0;
projection[2, 1] = 0;
projection[2, 2] = -(zFar + zNear) / (zFar - zNear); //Near and far planes.
projection[2, 3] = -(2.0f * zFar * zNear) / (zFar - zNear); //Near and far planes.
projection[3, 0] = 0;
projection[3, 1] = 0;
projection[3, 2] = -1;
projection[3, 3] = 0.0f;
}
///
/// Grabs a new image, rectifies it, and computes the disparity map and (optionally) the depth map.
/// The grabbing function is typically called in the main loop in a separate thread.
/// For more info, read about the SDK function it calls:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/classsl_1_1Camera.html#afa3678a18dd574e162977e97d7cbf67b
/// Struct holding all grab parameters.
/// the function returns false if no problem was encountered,
/// true otherwise.
public sl.ERROR_CODE Grab(ref sl.RuntimeParameters runtimeParameters)
{
return (sl.ERROR_CODE)dllz_grab(CameraID, ref runtimeParameters);
}
///
/// Return the INPUT_TYPE currently used
///
///
public sl.INPUT_TYPE GetInputType()
{
return (sl.INPUT_TYPE)dllz_get_input_type(CameraID);
}
///
/// Creates a file for recording the ZED's output into a .SVO or .AVI video.
/// An SVO is Stereolabs' own format designed for the ZED. It holds the video feed with timestamps
/// as well as info about the camera used to record it.
/// Filename. Whether it ends with .svo or .avi defines its file type.
/// How much compression to use
/// An ERROR_CODE that defines if the file was successfully created and can be filled with images.
public ERROR_CODE EnableRecording(string videoFileName, SVO_COMPRESSION_MODE compressionMode = SVO_COMPRESSION_MODE.H264_BASED, int bitrate = 0, int target_fps = 0,bool transcode = false)
{
return (ERROR_CODE)dllz_enable_recording(CameraID, new System.Text.StringBuilder(videoFileName, videoFileName.Length), (int)compressionMode,bitrate,target_fps,transcode);
}
///
/// Stops recording to an SVO/AVI, if applicable, and closes the file.
///
public bool DisableRecording()
{
return dllz_disable_recording(CameraID);
}
///
/// Sets the position of the SVO file currently being read to a desired frame.
///
/// Index of the desired frame to be decoded.
public void SetSVOPosition(int frame)
{
dllz_set_svo_position(CameraID, frame);
}
///
/// Gets the current confidence threshold value for the disparity map (and by extension the depth map).
/// Values below the given threshold are removed from the depth map.
///
/// Filtering value between 0 and 100.
public int GetConfidenceThreshold()
{
return dllz_get_confidence_threshold(CameraID);
}
///
/// Gets the timestamp at the time the latest grabbed frame was extracted from the USB stream.
/// This is the closest timestamp you can get from when the image was taken. Must be called after calling grab().
///
/// Current timestamp in nanoseconds. -1 means it's is not available, such as with an .SVO file without compression.
public ulong GetCameraTimeStamp()
{
return dllz_get_image_timestamp(CameraID);
}
///
/// Gets the current timestamp at the time the function is called. Can be compared to the camera timestamp
/// for synchronization, since they have the same reference (the computer's start time).
///
/// The timestamp in nanoseconds.
public ulong GetCurrentTimeStamp()
{
return dllz_get_current_timestamp(CameraID);
}
///
/// Get the current position of the SVO being recorded to.
///
/// Index of the frame being recorded to.
public int GetSVOPosition()
{
return dllz_get_svo_position(CameraID);
}
///
/// Gets the total number of frames in the loaded SVO file.
///
/// Total frames in the SVO file. Returns -1 if the SDK is not reading an SVO.
public int GetSVONumberOfFrames()
{
return dllz_get_svo_number_of_frames(CameraID);
}
///
/// Gets the closest measurable distance by the camera, according to the camera type and depth map parameters.
///
/// The nearest possible depth value.
public float GetDepthMinRangeValue()
{
return dllz_get_depth_min_range_value(CameraID);
}
///
/// Returns the current maximum distance of depth/disparity estimation.
///
/// The closest depth
public float GetDepthMaxRangeValue()
{
return dllz_get_depth_max_range_value(CameraID);
}
///
/// Initialize and Start the tracking functions
///
/// rotation used as initial world transform. By default it should be identity.
/// translation used as initial world transform. By default it should be identity.
/// (optional) define if spatial memory is enable or not.
/// (optional) file of spatial memory file that has to be loaded to relocate in the scene.
///
public sl.ERROR_CODE EnableTracking(ref Quaternion quat, ref Vector3 vec, bool enableSpatialMemory = true, bool enablePoseSmoothing = false, bool enableFloorAlignment = false, bool trackingIsStatic = false,
bool enableIMUFusion = true, string areaFilePath = "")
{
sl.ERROR_CODE trackingStatus = sl.ERROR_CODE.CAMERA_NOT_DETECTED;
trackingStatus = (sl.ERROR_CODE)dllz_enable_tracking(CameraID, ref quat, ref vec, enableSpatialMemory, enablePoseSmoothing, enableFloorAlignment,
trackingIsStatic, enableIMUFusion, new System.Text.StringBuilder(areaFilePath, areaFilePath.Length));
return trackingStatus;
}
///
/// Reset tracking
///
///
///
///
public sl.ERROR_CODE ResetTracking(Quaternion rotation, Vector3 translation)
{
sl.ERROR_CODE trackingStatus = sl.ERROR_CODE.CAMERA_NOT_DETECTED;
trackingStatus = (sl.ERROR_CODE)dllz_reset_tracking(CameraID, rotation, translation);
return trackingStatus;
}
public sl.ERROR_CODE ResetTrackingWithOffset(Quaternion rotation, Vector3 translation, Quaternion rotationOffset, Vector3 translationOffset)
{
sl.ERROR_CODE trackingStatus = sl.ERROR_CODE.CAMERA_NOT_DETECTED;
trackingStatus = (sl.ERROR_CODE)dllz_reset_tracking_with_offset(CameraID, rotation, translation, rotationOffset, translationOffset);
return trackingStatus;
}
public sl.ERROR_CODE EstimateInitialPosition(ref Quaternion rotation, ref Vector3 translation)
{
sl.ERROR_CODE status = sl.ERROR_CODE.CAMERA_NOT_DETECTED;
status = (sl.ERROR_CODE)dllz_estimate_initial_position(CameraID, ref rotation, ref translation, 2, 100);
return status;
}
///
/// Stop the motion tracking, if you want to restart, call enableTracking().
///
/// The path to save the area file
public void DisableTracking(string path = "")
{
dllz_disable_tracking(CameraID, new System.Text.StringBuilder(path, path.Length));
}
public sl.ERROR_CODE SaveCurrentArea(string path)
{
return (sl.ERROR_CODE)dllz_save_current_area(CameraID, new System.Text.StringBuilder(path, path.Length));
}
///
/// Returns the current state of the area learning saving
///
///
public sl.AREA_EXPORT_STATE GetAreaExportState()
{
return (sl.AREA_EXPORT_STATE)dllz_get_area_export_state(CameraID);
}
///
/// Register a texture to the base
///
private void RegisterTexture(Texture2D m_Texture, int type, int mode)
{
TextureRequested t = new TextureRequested();
t.type = type;
t.option = mode;
texturesRequested.Add(t);
textures[type].Add(mode, m_Texture);
}
///
/// Creates or retrieves a texture of type Image. Will be updated each frame automatically.
/// Image type textures are human-viewable, but have less accuracy than measure types.
///
///
/// Note that the new texture will exist on the GPU, so accessing from the CPU will result in an empty image. To get images
/// with the CPU, use RetrieveImage() instead and specify CPU memory in the arguments.
///
/// What the image shows (left RGB image, right depth image, normal map, etc.)
/// /// Resolution of the image. Should correspond to ZED's current resolution.
/// Texture2D that will update each frame with the ZED SDK's output.
public Texture2D CreateTextureImageType(VIEW mode, Resolution resolution = new Resolution())
{
if (HasTexture((int)TYPE_VIEW.RETRIEVE_IMAGE, (int)mode))
{
return textures[(int)TYPE_VIEW.RETRIEVE_IMAGE][(int)mode];
}
if (!cameraReady)
return null;
int width = ImageWidth;
int height = imageHeight;
if (!((uint)resolution.width == 0 && (uint)resolution.height == 0)) //Handles if Resolution arg was empty, as in the default.
{
width = (int)resolution.width;
height = (int)resolution.height;
}
Texture2D m_Texture;
if (mode == VIEW.LEFT_GREY || mode == VIEW.RIGHT_GREY || mode == VIEW.LEFT_UNRECTIFIED_GREY || mode == VIEW.RIGHT_UNRECTIFIED_GREY)
{
m_Texture = new Texture2D(width, height, TextureFormat.Alpha8, false);
}
else if (mode == VIEW.SIDE_BY_SIDE)
{
m_Texture = new Texture2D(width * 2, height, TextureFormat.RGBA32, false); //Needs to be twice as wide for SBS because there are two images.
}
else
{
m_Texture = new Texture2D(width, height, TextureFormat.RGBA32, false);
}
m_Texture.filterMode = FilterMode.Point;
m_Texture.wrapMode = TextureWrapMode.Clamp;
m_Texture.Apply();
IntPtr idTexture = m_Texture.GetNativeTexturePtr();
int error = dllz_register_texture_image_type(CameraID, (int)mode, idTexture, (int)resolution.width, (int)resolution.height);
if (error != 0)
{
throw new Exception("CUDA error:" + error + " if the problem appears again, please contact Stereolabs support.");
}
if (!textures.ContainsKey((int)TYPE_VIEW.RETRIEVE_IMAGE))
{
textures.Add((int)TYPE_VIEW.RETRIEVE_IMAGE, new Dictionary());
}
RegisterTexture(m_Texture, (int)TYPE_VIEW.RETRIEVE_IMAGE, (int)mode); //Save so you don't make a duplicate if another script needs the texture.
return m_Texture;
}
///
/// Creates or retrievse a texture of type Measure. Will be updated each frame automatically.
/// Measure types are not human-viewable, but don't lose any accuracy.
///
///
/// Note that the new texture will exist on the GPU, so accessing from the CPU will result in an empty image. To get images
/// with the CPU, use RetrieveMeasure() instead and specify CPU memory in the arguments.
///
/// What the image shows (disparity, depth, confidence, etc.)
/// Resolution of the image. Should correspond to ZED's current resolution.
/// Texture2D that will update each frame with the ZED SDK's output.
public Texture2D CreateTextureMeasureType(MEASURE mode, Resolution resolution = new Resolution())
{
if (HasTexture((int)TYPE_VIEW.RETRIEVE_MEASURE, (int)mode))
{
return textures[(int)TYPE_VIEW.RETRIEVE_MEASURE][(int)mode];
}
if (!cameraReady)
return null;
Texture2D m_Texture;
int width = ImageWidth;
int height = imageHeight;
if (!((uint)resolution.width == 0 && (uint)resolution.height == 0))
{
width = (int)resolution.width;
height = (int)resolution.height;
}
//Handle the mode options.
if (mode == MEASURE.XYZ || mode == MEASURE.XYZABGR || mode == MEASURE.XYZARGB || mode == MEASURE.XYZBGRA || mode == MEASURE.XYZRGBA || mode == MEASURE.NORMALS
|| mode == MEASURE.XYZ_RIGHT || mode == MEASURE.XYZABGR_RIGHT || mode == MEASURE.XYZARGB_RIGHT || mode == MEASURE.XYZBGRA_RIGHT || mode == MEASURE.XYZRGBA_RIGHT || mode == MEASURE.NORMALS_RIGHT)
{
m_Texture = new Texture2D(width, height, TextureFormat.RGBAFloat, false, true);
}
else if (mode == MEASURE.DEPTH || mode == MEASURE.CONFIDENCE || mode == MEASURE.DISPARITY || mode == MEASURE.DEPTH_RIGHT || mode == MEASURE.DISPARITY_RIGHT)
{
m_Texture = new Texture2D(width, height, TextureFormat.RFloat, false, true);
}
else
{
m_Texture = new Texture2D(width, height, TextureFormat.RGBA32, false, true);
}
if (!((uint)resolution.width == 0 && (uint)resolution.height == 0))
{
m_Texture.filterMode = FilterMode.Bilinear;
}
else
{
m_Texture.filterMode = FilterMode.Point;
}
m_Texture.wrapMode = TextureWrapMode.Clamp;
m_Texture.Apply();
IntPtr idTexture = m_Texture.GetNativeTexturePtr();
int error = dllz_register_texture_measure_type(CameraID, (int)mode, idTexture, (int)resolution.width, (int)resolution.height);
if (error != 0)
{
throw new Exception("CUDA error:" + error + " if the problem appears again, please contact Stereolabs support.");
}
if (!textures.ContainsKey((int)TYPE_VIEW.RETRIEVE_MEASURE))
{
textures.Add((int)TYPE_VIEW.RETRIEVE_MEASURE, new Dictionary());
}
RegisterTexture(m_Texture, (int)TYPE_VIEW.RETRIEVE_MEASURE, (int)mode); //Save to avoid duplicates if texture type is needed elsewhere.
return m_Texture;
}
///
/// Unregisters a texture of type Image. The texture will be destroyed and will no longer be updated each frame.
///
/// What the image was showing (left RGB image, right depth image, normal map, etc.)
public bool UnregisterTextureImageType(sl.VIEW view)
{
DestroyTextureImageType((int)view);
return dllz_unregister_texture_image_type(CameraID, (int)view) != 0;
}
///
/// Unregisters a texture of type Measure, The texture will be destroyed and will no longer be updated each frame.
///
/// What the measure was showing (disparity, depth, confidence, etc.)
public bool UnregisterTextureMeasureType(sl.MEASURE measure)
{
DestroyTextureMeasureType((int)measure);
return dllz_unregister_texture_measure_type(CameraID, (int)measure) != 0;
}
///
/// Copies a Texture of type Image into a ZEDMat. This function should be called after a Grab() and an UpdateTextures().
///
/// View type (left rgb, right depth, etc.)
/// New ZEDMat for an image texture of the selected view type.
public ZEDMat RequestCopyMatFromTextureImageType(sl.VIEW view)
{
return new ZEDMat(dllz_get_copy_mat_texture_image_type(CameraID, (int)view));
}
///
/// Copies a texture of type Measure into a ZEDMat. This function should be called after a Grab() and an UpdateTextures().
///
/// Measure type (depth, disparity, confidence, etc.)
/// New ZEDMat for a measure texture of the selected measure type.
public ZEDMat RequestCopyMatFromTextureMeasureType(sl.MEASURE measure)
{
return new ZEDMat(dllz_get_copy_mat_texture_measure_type(CameraID, (int)measure));
}
///
/// Destroys a texture and removes its reference in the textures list.
///
/// Type of texture as an int (0 for Image, 1 for Measure).
/// Corresponding options enum (sl.VIEW if Image type, sl.MEASURE if Measure type) as an integer.
private void DestroyTexture(int type, int option)
{
if (textures.ContainsKey(type) && textures[type].ContainsKey(option))
{
textures[type][option] = null;
textures[type].Remove(option);
if (textures[type].Count == 0)
{
textures.Remove(type);
}
}
}
///
/// Destroy all textures that were ever requested.
///
private void DestroyAllTexture()
{
if (cameraReady)
{
foreach (TextureRequested t in texturesRequested)
{
DestroyTexture(t.type, t.option);
}
texturesRequested.Clear();
}
}
///
/// Destroy a texture created with CreateTextureImageType().
///
/// View type (left RGB, right depth image, etc.) as an integer.
private void DestroyTextureImageType(int option)
{
DestroyTexture((int)TYPE_VIEW.RETRIEVE_IMAGE, option);
}
///
/// Destroy a texture created with CreateTextureMeasureType().
///
/// Measure type (depth, confidence, etc.) as an integer.
private void DestroyTextureMeasureType(int option)
{
DestroyTexture((int)TYPE_VIEW.RETRIEVE_MEASURE, option);
}
///
/// Retrieves a texture that was already created.
///
/// Type of texture as an integer (0 for Image, 1 for Measure).
/// Corresponding options enum (sl.VIEW if Image type, sl.MEASURE if Measure type) as an integer.
/// Existing texture of the given type/mode.
public Texture2D GetTexture(TYPE_VIEW type, int mode)
{
if (HasTexture((int)type, mode))
{
return textures[(int)type][mode];
}
return null;
}
///
/// Checks if a texture of a given type has already been created.
///
/// Type of texture as an integer (0 for Image, 1 for Measure).
/// Corresponding options enum (sl.VIEW if Image type, sl.MEASURE if Measure type) as an integer.
/// True if the texture is available.
private bool HasTexture(int type, int mode)
{
if (cameraReady) //Texture can't exist if the ZED hasn't been initialized yet.
{
return textures.ContainsKey((int)type) && textures[type].ContainsKey((int)mode);
}
return false;
}
///
/// Returns the current camera FPS. This is limited primarily by resolution but can also be lower due to
/// setting a lower desired resolution in Init() or from USB connection/bandwidth issues.
///
/// The current fps
public float GetCameraFPS()
{
return dllz_get_camera_fps(CameraID);
}
public CalibrationParameters GetCalibrationParameters(bool raw = false)
{
IntPtr p = dllz_get_calibration_parameters(CameraID, raw);
if (p == IntPtr.Zero)
{
return new CalibrationParameters();
}
CalibrationParameters parameters = (CalibrationParameters)Marshal.PtrToStructure(p, typeof(CalibrationParameters));
if (raw)
calibrationParametersRaw = parameters;
else
calibrationParametersRectified = parameters;
return parameters;
}
public SensorsConfiguration GetInternalSensorsConfiguration()
{
IntPtr p = dllz_get_sensors_configuration(CameraID);
if (p == IntPtr.Zero)
{
return new SensorsConfiguration();
}
SensorsConfiguration configuration = (SensorsConfiguration)Marshal.PtrToStructure(p, typeof(SensorsConfiguration));
return configuration;
}
///
/// Gets the ZED camera model (ZED or ZED Mini).
///
/// Model of the ZED as sl.MODEL.
public sl.MODEL GetCameraModel()
{
return (sl.MODEL)dllz_get_camera_model(CameraID);
}
///
/// Gets the ZED's camera firmware version.
///
/// Firmware version.
public int GetCameraFirmwareVersion()
{
return dllz_get_camera_firmware(CameraID);
}
///
/// Gets the ZED's sensors firmware version.
///
/// Firmware version.
public int GetSensorsFirmwareVersion()
{
return dllz_get_sensors_firmware(CameraID);
}
///
/// Gets the ZED's serial number.
///
/// Serial number
public int GetZEDSerialNumber()
{
return dllz_get_zed_serial(CameraID);
}
///
/// Returns the ZED's vertical field of view in radians.
///
/// Vertical field of view.
public float GetFOV()
{
return GetCalibrationParameters(false).leftCam.vFOV * Mathf.Deg2Rad;
}
///
/// Computes textures from the ZED. The new textures will not be displayed until an event is sent to the render thread.
/// This event is called from UpdateTextures().
///
public void RetrieveTextures()
{
dllz_retrieve_textures(CameraID);
}
///
/// Swaps textures safely between the acquisition and rendering threads.
///
public void SwapTextures()
{
dllz_swap_textures(CameraID);
}
///
/// Timestamp of the images used the last time the ZED wrapper updated textures.
///
///
public ulong GetImagesTimeStamp()
{
return dllz_get_updated_textures_timestamp(CameraID);
}
///
/// Perform a new self calibration process.
/// In some cases, due to temperature changes or strong vibrations, the stereo calibration becomes less accurate.
/// Use this function to update the self-calibration data and get more reliable depth values.
/// The self calibration will occur at the next \ref grab() call.
/// New values will then be available in \ref getCameraInformation(), be sure to get them to still have consistent 2D <-> 3D conversion.
///
///
///
public void UpdateSelfCalibration()
{
dllz_update_self_calibration(CameraID);
}
///
/// Gets the number of frames dropped since Grab() was called for the first time.
/// Based on camera timestamps and an FPS comparison.
/// Similar to the Frame Drop display in the ZED Explorer app.
/// Frames dropped since first Grab() call.
public uint GetFrameDroppedCount()
{
return dllz_get_frame_dropped_count(CameraID);
}
///
/// Gets the percentage of frames dropped since Grab() was called for the first time.
///
/// Percentage of frames dropped.
public float GetFrameDroppedPercent()
{
return dllz_get_frame_dropped_percent(CameraID);
}
///
/// Gets the position of the camera and the current state of the ZED Tracking.
///
/// Quaternion filled with the current rotation of the camera depending on its reference frame.
/// Vector filled with the current position of the camera depending on its reference frame.
/// Reference frame for setting the rotation/position. CAMERA gives movement relative to the last pose.
/// WORLD gives cumulative movements since tracking started.
/// State of ZED's Tracking system (off, searching, ok).
public TRACKING_STATE GetPosition(ref Quaternion rotation, ref Vector3 position, REFERENCE_FRAME referenceType = REFERENCE_FRAME.WORLD)
{
return (TRACKING_STATE)dllz_get_position(CameraID, ref rotation, ref position, (int)referenceType);
}
///
/// Gets the current position of the camera and state of the tracking, with an optional offset to the tracking frame.
///
/// Quaternion filled with the current rotation of the camera depending on its reference frame.
/// Vector filled with the current position of the camera depending on its reference frame.
/// Rotational offset applied to the tracking frame.
/// Positional offset applied to the tracking frame.
/// Reference frame for setting the rotation/position. CAMERA gives movement relative to the last pose.
/// WORLD gives cumulative movements since tracking started.
/// State of ZED's Tracking system (off, searching, ok).
public TRACKING_STATE GetPosition(ref Quaternion rotation, ref Vector3 translation, ref Quaternion targetQuaternion, ref Vector3 targetTranslation, REFERENCE_FRAME referenceFrame = REFERENCE_FRAME.WORLD)
{
return (TRACKING_STATE)dllz_get_position_at_target_frame(CameraID, ref rotation, ref translation, ref targetQuaternion, ref targetTranslation, (int)referenceFrame);
}
///
/// Gets the current position of the camera and state of the tracking, with a defined tracking frame.
/// A tracking frame defines what part of the ZED is its center for tracking purposes. See ZEDCommon.TRACKING_FRAME.
///
/// Quaternion filled with the current rotation of the camera depending on its reference frame.
/// Vector filled with the current position of the camera depending on its reference frame.
/// Center of the ZED for tracking purposes (left eye, center, right eye).
/// Reference frame for setting the rotation/position. CAMERA gives movement relative to the last pose.
/// WORLD gives cumulative movements since tracking started.
/// State of ZED's Tracking system (off, searching, ok).
public TRACKING_STATE GetPosition(ref Quaternion rotation, ref Vector3 translation, TRACKING_FRAME trackingFrame, REFERENCE_FRAME referenceFrame = REFERENCE_FRAME.WORLD)
{
Quaternion rotationOffset = Quaternion.identity;
Vector3 positionOffset = Vector3.zero;
switch (trackingFrame) //Add offsets to account for different tracking frames.
{
case sl.TRACKING_FRAME.LEFT_EYE:
positionOffset = new Vector3(0, 0, 0);
break;
case sl.TRACKING_FRAME.RIGHT_EYE:
positionOffset = new Vector3(Baseline, 0, 0);
break;
case sl.TRACKING_FRAME.CENTER_EYE:
positionOffset = new Vector3(Baseline / 2.0f, 0, 0);
break;
}
return (TRACKING_STATE)dllz_get_position_at_target_frame(CameraID, ref rotation, ref translation, ref rotationOffset, ref positionOffset, (int)referenceFrame);
}
///
/// Gets the current position of the camera and state of the tracking, filling a Pose struct useful for AR pass-through.
///
/// Current pose.
/// Reference frame for setting the rotation/position. CAMERA gives movement relative to the last pose.
/// WORLD gives cumulative movements since tracking started.
/// State of ZED's Tracking system (off, searching, ok).
public TRACKING_STATE GetPosition(ref Pose pose, REFERENCE_FRAME referenceType = REFERENCE_FRAME.WORLD)
{
return (TRACKING_STATE)dllz_get_position_data(CameraID, ref pose, (int)referenceType);
}
///
/// Sets a prior to the IMU orientation (only for ZED-M).
/// Prior must come from a external IMU, such as the HMD orientation and should be in a time frame
/// that's as close as possible to the camera.
///
/// Error code status.
/// Prior rotation.
public ERROR_CODE SetIMUOrientationPrior(ref Quaternion rotation)
{
sl.ERROR_CODE trackingStatus = sl.ERROR_CODE.CAMERA_NOT_DETECTED;
trackingStatus = (sl.ERROR_CODE)dllz_set_imu_prior_orientation(CameraID, rotation);
return trackingStatus;
}
///
/// Gets the rotation given by the ZED-M/ZED2 IMU. Return an error if using ZED (v1) which does not contains internal sensors
///
/// Rotation from the IMU.
/// time reference.
/// Error code status.
public ERROR_CODE GetInternalIMUOrientation(ref Quaternion rotation, TIME_REFERENCE referenceTime = TIME_REFERENCE.IMAGE)
{
sl.ERROR_CODE err = sl.ERROR_CODE.CAMERA_NOT_DETECTED;
err = (sl.ERROR_CODE)dllz_get_internal_imu_orientation(CameraID, ref rotation, (int)referenceTime);
return err;
}
///
/// Gets the full Sensor data from the ZED-M or ZED2 . Return an error if using ZED (v1) which does not contains internal sensors
///
/// Sensor Data.
/// Time reference.
/// Error code status.
public ERROR_CODE GetInternalSensorsData(ref SensorsData data, TIME_REFERENCE referenceTime = TIME_REFERENCE.IMAGE)
{
sl.ERROR_CODE err = sl.ERROR_CODE.CAMERA_NOT_DETECTED;
err = (sl.ERROR_CODE)dllz_get_internal_sensors_data(CameraID, ref data, (int)referenceTime);
return err;
}
///
/// Converts a float array to a matrix.
///
/// Matrix to be filled.
/// Float array to be turned into a matrix.
static public void Float2Matrix(ref Matrix4x4 m, float[] f)
{
if (f == null) return;
if (f.Length != 16) return;
for (int i = 0; i < 4; ++i)
{
for (int j = 0; j < 4; ++j)
{
m[i, j] = f[i * 4 + j];
}
}
}
///
/// Sets a value in the ZED's camera settings.
///
/// Setting to be changed (brightness, contrast, gain, exposure, etc.)
/// New value.
/// True to set the settings to their default values.
public void SetCameraSettings(CAMERA_SETTINGS settings, int value)
{
AssertCameraIsReady();
//cameraSettingsManager.SetCameraSettings(CameraID, settings, value);
dllz_set_video_settings(CameraID, (int)settings, value);
}
///
/// Gets the value of a given setting from the ZED camera.
///
/// Setting to be retrieved (brightness, contrast, gain, exposure, etc.)
public int GetCameraSettings(CAMERA_SETTINGS settings)
{
AssertCameraIsReady();
return dllz_get_video_settings(CameraID, (int)settings);
//return cameraSettingsManager.GetCameraSettings(CameraID, settings);
}
///
/// Overloaded function for CAMERA_SETTINGS.AEC_AGC_ROI (requires iRect as input)
///
/// Must be set to CAMERA_SETTINGS.AEC_AGC_ROI. Otherwise will return -1.
/// defines left=0 or right=1 or both=2 sensor target
/// the roi defined as a sl.Rect
/// Defines if the target must be reset to full sensor
///
public int SetCameraSettings(CAMERA_SETTINGS settings, int side, iRect roi,bool reset)
{
AssertCameraIsReady();
if (settings == CAMERA_SETTINGS.AEC_AGC_ROI)
return dllz_set_roi_for_aec_agc(CameraID, side, roi, reset);
else
return -1;
}
///
/// Overloaded function for CAMERA_SETTINGS.AEC_AGC_ROI (requires iRect as input)
///
/// Must be set to CAMERA_SETTINGS.AEC_AGC_ROI. Otherwise will return -1.
/// defines left=0 or right=1 or both=2 sensor target.
/// Roi that will be filled.
///
public int GetCameraSettings(CAMERA_SETTINGS settings, int side,ref iRect roi)
{
AssertCameraIsReady();
if (settings == CAMERA_SETTINGS.AEC_AGC_ROI)
return dllz_get_roi_for_aec_agc(CameraID, side, ref roi);
else
return -1;
}
///
/// Reset camera settings to default
///
public void ResetCameraSettings()
{
AssertCameraIsReady();
//cameraSettingsManager.ResetCameraSettings(this);
SetCameraSettings(sl.CAMERA_SETTINGS.BRIGHTNESS, sl.ZEDCamera.brightnessDefault);
SetCameraSettings(sl.CAMERA_SETTINGS.CONTRAST, sl.ZEDCamera.contrastDefault);
SetCameraSettings(sl.CAMERA_SETTINGS.HUE, sl.ZEDCamera.hueDefault);
SetCameraSettings(sl.CAMERA_SETTINGS.SATURATION, sl.ZEDCamera.saturationDefault);
SetCameraSettings(sl.CAMERA_SETTINGS.SHARPNESS, sl.ZEDCamera.sharpnessDefault);
SetCameraSettings(sl.CAMERA_SETTINGS.GAMMA, sl.ZEDCamera.gammaDefault);
SetCameraSettings(sl.CAMERA_SETTINGS.AUTO_WHITEBALANCE, 1);
SetCameraSettings(sl.CAMERA_SETTINGS.AEC_AGC, 1);
SetCameraSettings(sl.CAMERA_SETTINGS.LED_STATUS, 1);
SetCameraSettings(sl.CAMERA_SETTINGS.AEC_AGC_ROI,2, new sl.iRect(), true);
}
///
/// Loads camera settings (brightness, contrast, hue, saturation, gain, exposure) from a file in the
/// project's root directory.
///
/// Filename.
public void LoadCameraSettings(string path)
{
cameraSettingsManager.LoadCameraSettings(this, path);
}
///
/// Save the camera settings (brightness, contrast, hue, saturation, gain, exposure) to a file
/// relative to the project's root directory.
///
/// Filename.
public void SaveCameraSettings(string path)
{
cameraSettingsManager.SaveCameraSettings(path);
}
///
/// Retrieves camera settings from the ZED camera and loads them into a CameraSettings instance
/// handled by ZEDCameraSettingsManager.
///
public void RetrieveCameraSettings()
{
cameraSettingsManager.RetrieveSettingsCamera(this);
}
///
/// Returns if the camera's exposure mode is set to automatic.
///
/// True if automatic, false if manual.
public bool GetExposureUpdateType()
{
return cameraSettingsManager.auto;
}
///
/// Returns if the camera's white balance is set to automatic.
///
/// True if automatic, false if manual.
public bool GetWhiteBalanceUpdateType()
{
return cameraSettingsManager.whiteBalanceAuto;
}
///
/// Applies all the settings registered in the ZEDCameraSettingsManager instance to the actual ZED camera.
///
public void SetCameraSettings()
{
cameraSettingsManager.SetSettings(this);
}
///
/// Gets the version of the currently installed ZED SDK.
///
/// ZED SDK version as a string in the format MAJOR.MINOR.PATCH.
public static string GetSDKVersion()
{
return PtrToStringUtf8(dllz_get_sdk_version());
}
///
/// List all the connected devices with their associated information.
/// This function lists all the cameras available and provides their serial number, models and other information.
///
/// The device properties for each connected camera
public static sl.DeviceProperties[] GetDeviceList(out int nbDevices)
{
sl.DeviceProperties[] deviceList = new sl.DeviceProperties[(int)Constant.MAX_CAMERA_PLUGIN];
dllz_get_device_list(deviceList, out nbDevices);
return deviceList;
}
///
/// Performs an hardware reset of the ZED 2/ZED 2i.
///
/// Serial number of the camera
/// Perform a full reboot (Sensors and Video modules)
/// ZED SDK version as a string in the format MAJOR.MINOR.PATCH.
public static sl.ERROR_CODE Reboot(int serialNumber, bool fullReboot = true)
{
return (sl.ERROR_CODE)dllz_reboot(serialNumber, fullReboot);
}
///
/// Checks if the camera has been initialized and the plugin has been loaded. Throws exceptions otherwise.
///
private void AssertCameraIsReady()
{
if (!cameraReady)
throw new Exception("ZED camera is not connected or Init() was not called.");
if (!pluginIsReady)
throw new Exception("Could not resolve ZED plugin dependencies.");
}
///
/// Deploys an event that causes the textures to be updated with images received from the ZED.
/// Should be called after RetrieveTextures() so there are new images available.
///
public void UpdateTextures()
{
GL.IssuePluginEvent(GetRenderEventFunc(), 1);
}
///////////////////////////// SINGLE PIXEL UTILITY FUNCTIONS ////////////////////////////////
///
/// Gets the current depth value of a pixel in the UNITS specified when the camera was started with Init().
/// May result in errors if the ZED image does not fill the whole screen.
/// The pixel's screen space coordinates as a Vector3.
/// The Z component is unused - designed to take input from Input.mousePosition.
/// Depth value as a float.
///
public float GetDepthValue(Vector3 pixel)
{
if (!cameraReady)
{
return -1;
}
float posX = (float)ImageWidth * (float)((float)pixel.x / (float)Screen.width);
float posY = ImageHeight * (1 - (float)pixel.y / (float)Screen.height);
posX = Mathf.Clamp(posX, 0, ImageWidth);
posY = Mathf.Clamp(posY, 0, ImageHeight);
float d = dllz_get_depth_value(CameraID, (uint)posX, (uint)posY);
return d;
}
///
/// Gets the current Euclidean distance (sqrt(x²+y²+z²)) of the targeted pixel of the screen to the camera.
/// May result in errors if the ZED image does not fill the whole screen.
/// The pixel's screen space coordinates as a Vector3.
/// The Z component is unused - designed to take input from Input.mousePosition.
/// Distance as a float.
///
public float GetDistanceValue(Vector3 pixel)
{
if (!cameraReady) //Do nothing if the ZED isn't initialized.
{
return -1;
}
float posX = ImageWidth * (float)pixel.x / (float)Screen.width;
float posY = ImageHeight * (1 - (float)pixel.y / (float)Screen.height);
posX = Mathf.Clamp(posX, 0, ImageWidth);
posY = Mathf.Clamp(posY, 0, ImageHeight);
return dllz_get_distance_value(CameraID, (uint)posX, (uint)posY);
}
///
/// Gets the position of a camera-space pixel relative to the camera frame.
/// The pixel's screen space coordinates as a Vector3.
/// The Z component is unused - designed to take input from Input.mousePosition.
/// Position relative to the camera.
/// True if successful.
///
public bool GetXYZValue(Vector3 pixel, out Vector4 xyz)
{
if (!cameraReady) //Do nothing if the ZED isn't initialized.
{
xyz = Vector3.zero;
return false;
}
float posX = (float)ImageWidth * (float)((float)pixel.x / (float)Screen.width);
float posY = ImageHeight * (1 - (float)pixel.y / (float)Screen.height);
posX = Mathf.Clamp(posX, 0, ImageWidth);
posY = Mathf.Clamp(posY, 0, ImageHeight);
bool r = dllz_get_xyz_value(CameraID, (uint)posX, (uint)posY, out xyz);
return r;
}
///
/// Gets the normal of a camera-space pixel. The normal is relative to the camera.
/// Use cam.worldToCameraMatrix.inverse to transform it to world space.
/// Note that ZEDSupportFunctions contains high-level versions of this function that are easier to use.
/// The pixel's screen space coordinates as a Vector3.
/// The Z component is unused - designed to take input from Input.mousePosition.
/// Normal value of the pixel as a Vector4.
/// True if successful.
///
public bool GetNormalValue(Vector3 pixel, out Vector4 normal)
{
if (!cameraReady) //Do nothing if the ZED isn't initialized.
{
normal = Vector3.zero;
return false;
}
float posX = (float)ImageWidth * (float)((float)pixel.x / (float)Screen.width);
float posY = ImageHeight * (1 - (float)pixel.y / (float)Screen.height);
posX = Mathf.Clamp(posX, 0, ImageWidth);
posY = Mathf.Clamp(posY, 0, ImageHeight);
bool r = dllz_get_normal_value(CameraID, (uint)posX, (uint)posY, out normal);
return r;
}
///
/// Initializes and begins the spatial mapping processes.
///
/// Spatial mapping resolution in meters.
/// Maximum scanning range in meters.
/// True to scan surface textures in addition to geometry.
///
public sl.ERROR_CODE EnableSpatialMapping(SPATIAL_MAP_TYPE type, float resolution_meter, float max_range_meter, bool saveTexture = false)
{
sl.ERROR_CODE spatialMappingStatus = ERROR_CODE.FAILURE;
//lock (grabLock)
{
spatialMappingStatus = (sl.ERROR_CODE)dllz_enable_spatial_mapping(CameraID, (int)type,resolution_meter, max_range_meter, System.Convert.ToInt32(saveTexture), 4096);
}
return spatialMappingStatus;
}
///
/// Disables the Spatial Mapping process.
///
public void DisableSpatialMapping()
{
lock (grabLock)
{
dllz_disable_spatial_mapping(CameraID);
}
}
///
/// Updates the internal version of the mesh and returns the sizes of the meshes.
///
/// Array of the number of vertices in each submesh.
/// Array of the number of triangles in each submesh.
/// Number of submeshes.
/// List of all submeshes updated since the last update.
/// Total number of updated vertices in all submeshes.
/// Total number of updated triangles in all submeshes.
/// Maximum number of submeshes that can be handled.
/// Error code indicating if the update was successful, and why it wasn't otherwise.
public sl.ERROR_CODE UpdateMesh(int[] nbVerticesInSubmeshes, int[] nbTrianglesInSubmeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int nbSubmeshMax)
{
sl.ERROR_CODE err = sl.ERROR_CODE.FAILURE;
err = (sl.ERROR_CODE)dllz_update_mesh(CameraID, nbVerticesInSubmeshes, nbTrianglesInSubmeshes, ref nbSubmeshes, updatedIndices, ref nbVertices, ref nbTriangles, nbSubmeshMax);
return err;
}
///
/// Retrieves all chunks of the generated mesh. Call UpdateMesh() before calling this.
/// Vertex and triangle arrays must be at least of the sizes returned by UpdateMesh (nbVertices and nbTriangles).
///
/// Vertices of the mesh.
/// Triangles, formatted as the index of each triangle's three vertices in the vertices array.
/// Maximum number of submeshes that can be handled.
/// Error code indicating if the retrieval was successful, and why it wasn't otherwise.
public sl.ERROR_CODE RetrieveMesh(Vector3[] vertices, int[] triangles, int nbSubmeshMax, Vector2[] uvs, IntPtr textures)
{
return (sl.ERROR_CODE)dllz_retrieve_mesh(CameraID, vertices, triangles, nbSubmeshMax, uvs, textures);
}
///
/// Updates the fused point cloud (if spatial map type was FUSED_POINT_CLOUD)
///
/// Error code indicating if the update was successful, and why it wasn't otherwise.
public sl.ERROR_CODE UpdateFusedPointCloud(ref int nbVertices)
{
sl.ERROR_CODE err = sl.ERROR_CODE.FAILURE;
err = (sl.ERROR_CODE)dllz_update_fused_point_cloud(CameraID, ref nbVertices);
return err;
}
///
/// Retrieves all points of the fused point cloud. Call UpdateFusedPointCloud() before calling this.
/// Vertex arrays must be at least of the sizes returned by UpdateFusedPointCloud
///
/// Points of the fused point cloud.
/// Error code indicating if the retrieval was successful, and why it wasn't otherwise.
public sl.ERROR_CODE RetrieveFusedPointCloud(Vector4[] vertices)
{
return (sl.ERROR_CODE)dllz_retrieve_fused_point_cloud(CameraID, vertices);
}
///
/// Starts the mesh generation process in a thread that doesn't block the spatial mapping process.
/// ZEDSpatialMappingHelper calls this each time it has finished applying the last mesh update.
///
public void RequestMesh()
{
dllz_request_mesh_async(CameraID);
}
///
/// Sets the pause state of the data integration mechanism for the ZED's spatial mapping.
///
/// If true, the integration is paused. If false, the spatial mapping is resumed.
public void PauseSpatialMapping(bool status)
{
dllz_pause_spatial_mapping(CameraID, status);
}
///
/// Returns the mesh generation status. Useful for knowing when to update and retrieve the mesh.
///
public sl.ERROR_CODE GetMeshRequestStatus()
{
return (sl.ERROR_CODE)dllz_get_mesh_request_status_async(CameraID);
}
///
/// Saves the scanned mesh in a specific file format.
///
/// Path and filename of the mesh.
/// File format (extension). Can be .obj, .ply or .bin.
public bool SaveMesh(string filename, MESH_FILE_FORMAT format)
{
return dllz_save_mesh(CameraID, filename, format);
}
///
/// Saves the scanned point cloud in a specific file format.
///
/// Path and filename of the point cloud.
/// File format (extension). Can be .obj, .ply or .bin.
public bool SavePointCloud(string filename, MESH_FILE_FORMAT format)
{
return dllz_save_point_cloud(CameraID, filename, format);
}
///
/// Loads a saved mesh file. ZEDSpatialMapping then configures itself as if the loaded mesh was just scanned.
///
/// Path and filename of the mesh. Should include the extension (.obj, .ply or .bin).
/// Array of the number of vertices in each submesh.
/// Array of the number of triangles in each submesh.
/// Number of submeshes.
/// List of all submeshes updated since the last update.
/// Total number of updated vertices in all submeshes.
/// Total number of updated triangles in all submeshes.
/// Maximum number of submeshes that can be handled.
/// Array containing the sizes of all the textures (width, height) if applicable.
public bool LoadMesh(string filename, int[] nbVerticesInSubmeshes, int[] nbTrianglesInSubmeshes, ref int nbSubmeshes, int[] updatedIndices,
ref int nbVertices, ref int nbTriangles, int nbSubmeshMax, int[] textureSize = null)
{
return dllz_load_mesh(CameraID, filename, nbVerticesInSubmeshes, nbTrianglesInSubmeshes, ref nbSubmeshes, updatedIndices, ref nbVertices,
ref nbTriangles, nbSubmeshMax, textureSize);
}
///
/// Filters a mesh to remove triangles while still preserving its overall shape (though less accurate).
///
/// Filter level. Higher settings remove more triangles.
/// Array of the number of vertices in each submesh.
/// Array of the number of triangles in each submesh.
/// Number of submeshes.
/// List of all submeshes updated since the last update.
/// Total number of updated vertices in all submeshes.
/// Total number of updated triangles in all submeshes.
/// Maximum number of submeshes that can be handled.
public bool FilterMesh(FILTER filterParameters, int[] nbVerticesInSubemeshes, int[] nbTrianglesInSubemeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int nbSubmeshMax)
{
return dllz_filter_mesh(CameraID, filterParameters, nbVerticesInSubemeshes, nbTrianglesInSubemeshes, ref nbSubmeshes, updatedIndices, ref nbVertices, ref nbTriangles, nbSubmeshMax);
}
///
/// Applies the scanned texture onto the internal scanned mesh.
/// You will need to call RetrieveMesh() with uvs and textures to get the result into Unity.
///
/// Array of the number of vertices in each submesh.
/// Array of the number of triangles in each submesh.
/// Number of submeshes.
/// List of all submeshes updated since the last update.
/// Total number of updated vertices in all submeshes.
/// Total number of updated triangles in all submeshes.
/// Vector containing the size of all the texture (width, height).
/// Maximum number of submeshes that can be handled.
///
public bool ApplyTexture(int[] nbVerticesInSubmeshes, int[] nbTrianglesInSubmeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int[] textureSize, int nbSubmeshMax)
{
return dllz_apply_texture(CameraID, nbVerticesInSubmeshes, nbTrianglesInSubmeshes, ref nbSubmeshes, updatedIndices, ref nbVertices, ref nbTriangles, textureSize, nbSubmeshMax);
}
///
/// Gets the current state of spatial mapping.
///
///
public SPATIAL_MAPPING_STATE GetSpatialMappingState()
{
return (sl.SPATIAL_MAPPING_STATE)dllz_get_spatial_mapping_state(CameraID);
}
///
/// Gets a vector pointing toward the direction of gravity. This is estimated from a 3D scan of the environment,
/// and as such, a scan must be started/finished for this value to be calculated.
/// If using the ZED Mini / ZED2, this isn't required thanks to its IMU.
///
/// Vector3 pointing downward.
public Vector3 GetGravityEstimate()
{
Vector3 v = Vector3.zero;
dllz_spatial_mapping_get_gravity_estimation(CameraID, ref v);
return v;
}
///
/// Consolidates the chunks from a scan. This is used to turn lots of small meshes (which are efficient for
/// the scanning process) into several large meshes (which are more convenient to work with).
///
///
/// Array of the number of vertices in each submesh.
/// Array of the number of triangles in each submesh.
/// Number of submeshes.
/// List of all submeshes updated since the last update.
/// Total number of updated vertices in all submeshes.
/// Total number of updated triangles in all submeshes.
public void MergeChunks(int numberFaces, int[] nbVerticesInSubmeshes, int[] nbTrianglesInSubmeshes, ref int nbSubmeshes, int[] updatedIndices, ref int nbVertices, ref int nbTriangles, int nbSubmesh)
{
dllz_spatial_mapping_merge_chunks(CameraID, numberFaces, nbVerticesInSubmeshes, nbTrianglesInSubmeshes, ref nbSubmeshes, updatedIndices, ref nbVertices, ref nbTriangles, nbSubmesh);
}
///
/// Retrieves a measure texture from the ZED SDK and loads it into a ZEDMat. Use this to get an individual
/// texture from the last grabbed frame with measurements in every pixel - such as a depth map, confidence map, etc.
/// Measure textures are not human-viewable but don't lose accuracy, unlike image textures.
///
/// If you want to access the texture via script, you'll usually want to specify CPU memory. Then you can use
/// Marshal.Copy to move them into a new byte array, which you can load into a Texture2D.
/// RetrieveMeasure() calls Camera::retrieveMeasure() in the C++ SDK. For more info, read:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/classsl_1_1Camera.html#af799d12342a7b884242fffdef5588a7f
///
/// ZEDMat to fill with the new texture.
/// Measure type (depth, confidence, xyz, etc.)
/// Whether the image should be on CPU or GPU memory.
/// Resolution of the texture.
/// Error code indicating if the retrieval was successful, and why it wasn't otherwise.
public sl.ERROR_CODE RetrieveMeasure(sl.ZEDMat mat, sl.MEASURE measure, sl.ZEDMat.MEM mem = sl.ZEDMat.MEM.MEM_CPU, sl.Resolution resolution = new sl.Resolution())
{
return (sl.ERROR_CODE)(dllz_retrieve_measure(CameraID, mat.MatPtr, (int)measure, (int)mem, (int)resolution.width, (int)resolution.height));
}
///
/// Retrieves an image texture from the ZED SDK and loads it into a ZEDMat. Use this to get an individual
/// texture from the last grabbed frame in a human-viewable format. Image textures work for when you want the result to be visible,
/// such as the direct RGB image from the camera, or a greyscale image of the depth. However it will lose accuracy if used
/// to show measurements like depth or confidence, unlike measure textures.
///
/// If you want to access the texture via script, you'll usually want to specify CPU memory. Then you can use
/// Marshal.Copy to move them into a new byte array, which you can load into a Texture2D. Note that you may need to
/// change the color space and/or flip the image.
/// RetrieveMeasure() calls Camera::retrieveMeasure() in the C++ SDK. For more info, read:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/classsl_1_1Camera.html#ac40f337ccc76cacd3412b93f7f4638e2
///
/// ZEDMat to fill with the new texture.
/// Image type (left RGB, right depth map, etc.)
/// Whether the image should be on CPU or GPU memory.
/// Resolution of the texture.
/// Error code indicating if the retrieval was successful, and why it wasn't otherwise.
public sl.ERROR_CODE RetrieveImage(sl.ZEDMat mat, sl.VIEW view, sl.ZEDMat.MEM mem = sl.ZEDMat.MEM.MEM_CPU, sl.Resolution resolution = new sl.Resolution())
{
return (sl.ERROR_CODE)(dllz_retrieve_image(CameraID, mat.MatPtr, (int)view, (int)mem, (int)resolution.width, (int)resolution.height));
}
///
/// Computes offsets of the optical centers used to line up the ZED's images properly with Unity cameras.
/// Called in ZEDRenderingPlane after the ZED finished initializing.
///
/// Distance from a camera in the ZED rig to the quad/Canvas object holding the ZED image.
///
public Vector4 ComputeOpticalCenterOffsets(float planeDistance)
{
IntPtr p = IntPtr.Zero;
sl.CalibrationParameters calib = GetCalibrationParameters(false);
Vector4 calibLeft = new Vector4(calib.leftCam.fx, calib.leftCam.fy, calib.leftCam.cx, calib.leftCam.cy);
Vector4 calibRight = new Vector4(calib.rightCam.fx, calib.rightCam.fy, calib.rightCam.cx, calib.rightCam.cy);
p = dllz_compute_optical_center_offsets(ref calibLeft, ref calibRight, this.ImageWidth, this.ImageHeight, planeDistance);
if (p == IntPtr.Zero)
{
return new Vector4();
}
Vector4 parameters = (Vector4)Marshal.PtrToStructure(p, typeof(Vector4));
return parameters;
}
////////////////////////
/// Plane Detection ///
////////////////////////
///
/// Looks for a plane in the visible area that is likely to represent the floor.
/// Use ZEDPlaneDetectionManager.DetectFloorPlane for a higher-level version that turns planes into GameObjects.
///
/// Data on the detected plane.
/// Height of the camera from the newly-detected floor.
/// Prior rotation.
/// Prior position.
///
public sl.ERROR_CODE findFloorPlane(ref ZEDPlaneGameObject.PlaneData plane, out float playerHeight, Quaternion priorQuat, Vector3 priorTrans)
{
IntPtr p = IntPtr.Zero;
Quaternion out_quat = Quaternion.identity;
Vector3 out_trans = Vector3.zero;
p = dllz_find_floor_plane(CameraID, out out_quat, out out_trans, priorQuat, priorTrans);
plane.Bounds = new Vector3[256];
playerHeight = 0;
if (p != IntPtr.Zero)
{
plane = (ZEDPlaneGameObject.PlaneData)Marshal.PtrToStructure(p, typeof(ZEDPlaneGameObject.PlaneData));
playerHeight = out_trans.y;
return (sl.ERROR_CODE)plane.ErrorCode;
}
else
return sl.ERROR_CODE.FAILURE;
}
///
/// Using data from a detected floor plane, updates supplied vertex and triangle arrays with
/// data needed to make a mesh that represents it. These arrays are updated directly from the wrapper.
///
/// Array to be filled with mesh vertices.
/// Array to be filled with mesh triangles, stored as indexes of each triangle's points.
/// Total vertices in the mesh.
/// Total triangle indexes (3x number of triangles).
///
public int convertFloorPlaneToMesh(Vector3[] vertices, int[] triangles, out int numVertices, out int numTriangles)
{
return dllz_convert_floorplane_to_mesh(CameraID, vertices, triangles, out numVertices, out numTriangles);
}
///
/// Checks for a plane in the real world at given screen-space coordinates.
/// Use ZEDPlaneDetectionManager.DetectPlaneAtHit() for a higher-level version that turns planes into GameObjects.
///
/// Data on the detected plane.
/// Point on the ZED image to check for a plane.
///
public sl.ERROR_CODE findPlaneAtHit(ref ZEDPlaneGameObject.PlaneData plane, Vector2 screenPos)
{
IntPtr p = IntPtr.Zero;
Quaternion out_quat = Quaternion.identity;
Vector3 out_trans = Vector3.zero;
float posX = (float)ImageWidth * (float)((float)screenPos.x / (float)Screen.width);
float posY = ImageHeight * (1 - (float)screenPos.y / (float)Screen.height);
posX = Mathf.Clamp(posX, 0, ImageWidth);
posY = Mathf.Clamp(posY, 0, ImageHeight);
p = dllz_find_plane_at_hit(CameraID, new Vector2(posX, posY), true);
plane.Bounds = new Vector3[256];
if (p != IntPtr.Zero)
{
plane = (ZEDPlaneGameObject.PlaneData)Marshal.PtrToStructure(p, typeof(ZEDPlaneGameObject.PlaneData));
return (sl.ERROR_CODE)plane.ErrorCode;
}
else
return sl.ERROR_CODE.FAILURE;
}
///
/// Using data from a detected hit plane, updates supplied vertex and triangle arrays with
/// data needed to make a mesh that represents it. These arrays are updated directly from the wrapper.
///
/// Array to be filled with mesh vertices.
/// Array to be filled with mesh triangles, stored as indexes of each triangle's points.
/// Total vertices in the mesh.
/// Total triangle indexes (3x number of triangles).
///
public int convertHitPlaneToMesh(Vector3[] vertices, int[] triangles, out int numVertices, out int numTriangles)
{
return dllz_convert_hitplane_to_mesh(CameraID, vertices, triangles, out numVertices, out numTriangles);
}
////////////////////////
/// Streaming Module ///
////////////////////////
///
/// Creates an streaming pipeline.
///
///
/// Streaming parameters: See sl::StreamingParameters of ZED SDK. See ZED SDK API doc for more informations
///
/// An ERROR_CODE that defines if the streaming pipe was successfully created
public ERROR_CODE EnableStreaming(STREAMING_CODEC codec = STREAMING_CODEC.AVCHD_BASED, uint bitrate = 8000, ushort port = 30000, int gopSize = -1, bool adaptativeBitrate = false,int chunk_size = 8096,int target_fps = 0)
{
int doAdaptBitrate = adaptativeBitrate ? 1 : 0;
return (ERROR_CODE)dllz_enable_streaming(CameraID, codec, bitrate, port, gopSize, doAdaptBitrate, chunk_size,target_fps);
}
///
/// Tells if streaming is running or not.
///
/// false if streaming is not enabled, true if streaming is on
public bool IsStreamingEnabled()
{
int res = dllz_is_streaming_enabled(CameraID);
if (res == 1)
return true;
else
return false;
}
///
/// Stops the streaming pipeline.
///
public void DisableStreaming()
{
dllz_disable_streaming(CameraID);
}
////////////////////////
/// Save utils fct ///
////////////////////////
///
/// Save current image (specified by view) in a file defined by filename
/// Supported formats are jpeg and png. Filename must end with either .jpg or .png
///
public sl.ERROR_CODE SaveCurrentImageInFile(sl.VIEW view, String filename)
{
sl.ERROR_CODE err = (sl.ERROR_CODE)dllz_save_current_image(CameraID, view, filename);
return err;
}
///
/// Save the current depth in a file defined by filename.
/// Supported formats are PNG,PFM and PGM
///
/// defines left (0) or right (1) depth
/// filename must end with .png, .pfm or .pgm
///
public sl.ERROR_CODE SaveCurrentDepthInFile(int side, String filename)
{
sl.ERROR_CODE err = (sl.ERROR_CODE)dllz_save_current_depth(CameraID, side, filename);
return err;
}
///
/// Save the current point cloud in a file defined by filename.
/// Supported formats are PLY,VTK,XYZ and PCD
///
/// defines left (0) or right (1) point cloud
/// filename must end with .ply, .xyz , .vtk or .pcd
///
public sl.ERROR_CODE SaveCurrentPointCloudInFile(int side, String filename)
{
sl.ERROR_CODE err = (sl.ERROR_CODE)dllz_save_current_point_cloud(CameraID, side, filename);
return err;
}
////////////////////////
/// Object detection ///
////////////////////////
public static sl.AI_MODELS cvtDetection(sl.DETECTION_MODEL m_in)
{
sl.AI_MODELS m_out = sl.AI_MODELS.LAST;
switch (m_in)
{
case sl.DETECTION_MODEL.HUMAN_BODY_ACCURATE: m_out = sl.AI_MODELS.HUMAN_BODY_ACCURATE_DETECTION; break;
case sl.DETECTION_MODEL.HUMAN_BODY_MEDIUM: m_out = sl.AI_MODELS.HUMAN_BODY_MEDIUM_DETECTION; break;
case sl.DETECTION_MODEL.HUMAN_BODY_FAST: m_out = sl.AI_MODELS.HUMAN_BODY_FAST_DETECTION; break;
case sl.DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE: m_out = sl.AI_MODELS.MULTI_CLASS_ACCURATE_DETECTION; break;
case sl.DETECTION_MODEL.MULTI_CLASS_BOX_MEDIUM: m_out = sl.AI_MODELS.MULTI_CLASS_MEDIUM_DETECTION; break;
case sl.DETECTION_MODEL.MULTI_CLASS_BOX: m_out = sl.AI_MODELS.MULTI_CLASS_DETECTION; break;
case sl.DETECTION_MODEL.PERSON_HEAD_BOX: m_out = sl.AI_MODELS.PERSON_HEAD_DETECTION; break;
}
return m_out;
}
public static sl.DETECTION_MODEL cvtDetection(sl.AI_MODELS m_in)
{
sl.DETECTION_MODEL m_out = sl.DETECTION_MODEL.LAST;
switch (m_in)
{
case sl.AI_MODELS.HUMAN_BODY_ACCURATE_DETECTION: m_out = sl.DETECTION_MODEL.HUMAN_BODY_ACCURATE; break;
case sl.AI_MODELS.HUMAN_BODY_MEDIUM_DETECTION: m_out = sl.DETECTION_MODEL.HUMAN_BODY_MEDIUM; break;
case sl.AI_MODELS.HUMAN_BODY_FAST_DETECTION: m_out = sl.DETECTION_MODEL.HUMAN_BODY_FAST; break;
case sl.AI_MODELS.MULTI_CLASS_ACCURATE_DETECTION: m_out = sl.DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE; break;
case sl.AI_MODELS.MULTI_CLASS_MEDIUM_DETECTION: m_out = sl.DETECTION_MODEL.MULTI_CLASS_BOX_MEDIUM; break;
case sl.AI_MODELS.MULTI_CLASS_DETECTION: m_out = sl.DETECTION_MODEL.MULTI_CLASS_BOX; break;
case sl.AI_MODELS.PERSON_HEAD_DETECTION: m_out = sl.DETECTION_MODEL.PERSON_HEAD_BOX; break;
}
return m_out;
}
///
/// Check if a corresponding optimized engine is found for the requested Model based on your rig configuration.
///
/// AI model to check.
/// ID of the gpu.
///
public static AI_Model_status CheckAIModelStatus(AI_MODELS model, int gpu_id = 0)
{
IntPtr p = dllz_check_AI_model_status(model, gpu_id);
if (p == IntPtr.Zero)
{
return new AI_Model_status();
}
AI_Model_status status = (AI_Model_status)Marshal.PtrToStructure(p, typeof(AI_Model_status));
return status;
}
///
/// Optimize the requested model, possible download if the model is not present on the host.
///
/// AI model to optimize.
/// ID of the gpu to optimize on.
///
public static sl.ERROR_CODE OptimizeAIModel(AI_MODELS model, int gpu_id = 0)
{
return (sl.ERROR_CODE)dllz_optimize_AI_model(model, gpu_id);
}
///
/// Enable object detection module
///
public sl.ERROR_CODE EnableObjectsDetection(ref dll_ObjectDetectionParameters od_params)
{
sl.ERROR_CODE objDetectStatus = ERROR_CODE.FAILURE;
lock (grabLock)
{
objDetectStatus = (sl.ERROR_CODE)dllz_enable_objects_detection(CameraID, ref od_params);
}
return objDetectStatus;
}
///
/// Disable object detection module and release the resources.
///
public void DisableObjectsDetection()
{
lock (grabLock)
{
dllz_disable_objects_detection(CameraID);
}
}
///
/// Pause or Unpause the object detection
///
///
public void PauseObjectsDetection(bool status)
{
lock (grabLock)
{
dllz_pause_objects_detection(CameraID, status);
}
}
public sl.ERROR_CODE IngestCustomBoxObjects(List objects_in)
{
return (sl.ERROR_CODE)dllz_ingest_custom_box_objects(CameraID, objects_in.Count, objects_in.ToArray());
}
///
/// Retrieve object detection data
///
/// Object detection runtime parameters
/// ObjectsFrameSDK that contains all the detection data
///
public sl.ERROR_CODE RetrieveObjectsDetectionData(ref dll_ObjectDetectionRuntimeParameters od_params, ref ObjectsFrameSDK objFrame)
{
return (sl.ERROR_CODE)dllz_retrieve_objects_data(CameraID, ref od_params, ref objFrame);
}
///
/// Update the batch trajectories and retrieve the number of batches.
///
/// numbers of batches
/// returns an ERROR_CODE that indicates the type of error
public sl.ERROR_CODE UpdateObjectsBatch(out int nbBatches)
{
return (sl.ERROR_CODE)dllz_update_objects_batch(CameraID, out nbBatches);
}
///
/// Retrieve a batch of objects.
/// This function need to be called after RetrieveObjects, otherwise trajectories will be empty.
/// If also needs to be called after UpdateOBjectsBatch in order to retrieve the number of batch trajectories.
///
/// To retrieve all the objectsbatches, you need to iterate from 0 to nbBatches (retrieved from UpdateObjectBatches)
/// index of the batch retrieved.
/// trajectory that will be filled by the batching queue process
/// returns an ERROR_CODE that indicates the type of error
public sl.ERROR_CODE GetObjectsBatch(int batch_index, ref ObjectsBatch objectsBatch)
{
return (sl.ERROR_CODE)dllz_get_objects_batch_data(CameraID, batch_index, ref objectsBatch.numData, ref objectsBatch.id, ref objectsBatch.label, ref objectsBatch.sublabel,
ref objectsBatch.trackingState, objectsBatch.positions, objectsBatch.positionCovariances, objectsBatch.velocities, objectsBatch.timestamps, objectsBatch.boundingBoxes2D,
objectsBatch.boundingBoxes, objectsBatch.confidences, objectsBatch.actionStates, objectsBatch.keypoints2D, objectsBatch.keypoints, objectsBatch.headBoundingBoxes2D,
objectsBatch.headBoundingBoxes, objectsBatch.headPositions, objectsBatch.keypointConfidences);
}
}//Zed Camera class
} // namespace sl