//======= Copyright (c) Stereolabs Corporation, All rights reserved. ===============
using System.Runtime.InteropServices;
using System;
using System.Collections.Generic;
using UnityEngine;
///
/// This file holds classes built to be exchanged between the ZED wrapper DLL (sl_unitywrapper.dll)
/// and C# scripts within Unity. Most have parity with a structure within the ZED C++ SDK.
/// Find more info at https://www.stereolabs.com/developers/documentation/API/latest/.
///
namespace sl
{
public class ZEDCommon
{
public const string NameDLL = "sl_unitywrapper";
}
public enum ZED_CAMERA_ID
{
CAMERA_ID_01,
CAMERA_ID_02,
CAMERA_ID_03,
CAMERA_ID_04
};
public enum INPUT_TYPE
{
INPUT_TYPE_USB,
INPUT_TYPE_SVO,
INPUT_TYPE_STREAM
};
///
/// Constant for plugin. Should not be changed
///
public enum Constant
{
MAX_CAMERA_PLUGIN = 4,
PLANE_DISTANCE = 10,
MAX_OBJECTS = 75,
MAX_BATCH_SIZE = 200
};
///
/// Holds a 3x3 matrix that can be marshaled between the ZED
/// Unity wrapper and C# scripts.
///
public struct Matrix3x3
{
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 9)]
public float[] m; //3x3 matrix.
};
///
/// Holds a camera resolution as two pointers (for height and width) for easy
/// passing back and forth to the ZED Unity wrapper.
///
public struct Resolution
{
///
///
///
///
///
public Resolution(uint width, uint height)
{
this.width = (System.UIntPtr)width;
this.height = (System.UIntPtr)height;
}
public System.UIntPtr width;
public System.UIntPtr height;
};
///
/// Pose structure with data on timing and validity in addition to
/// position and rotation.
///
[StructLayout(LayoutKind.Sequential)]
public struct Pose
{
public bool valid;
public ulong timestap;
public Quaternion rotation;
public Vector3 translation;
public int pose_confidence;
};
///
/// Rect structure to define a rectangle or a ROI in pixels
/// Use to set ROI target for AEC/AGC
///
[StructLayout(LayoutKind.Sequential)]
public struct iRect
{
public int x;
public int y;
public int width;
public int height;
};
public enum CAMERA_STATE
{
///
/// Defines if the camera can be openned by the sdk
///
AVAILABLE,
///
/// Defines if the camera is already opened and unavailable
///
NOT_AVAILABLE
}
[StructLayout(LayoutKind.Sequential)]
public struct DeviceProperties
{
///
/// The camera state
///
public sl.CAMERA_STATE cameraState;
///
/// The camera id (Notice that only the camera with id '0' can be used on Windows)
///
public int id;
///
/// The camera model
///
public sl.MODEL cameraModel;
///
/// The camera serial number
///
public int sn;
};
///
/// Full IMU data structure.
///
[StructLayout(LayoutKind.Sequential)]
public struct ImuData
{
///
/// Indicates if imu data is available
///
public bool available;
///
/// IMU Data timestamp in ns
///
public ulong timestamp;
///
/// Gyroscope calibrated data in degrees/second.
///
public Vector3 angularVelocity;
///
/// Accelerometer calibrated data in m/s².
///
public Vector3 linearAcceleration;
///
/// Gyroscope raw/uncalibrated data in degrees/second.
///
public Vector3 angularVelocityUncalibrated;
///
/// Accelerometer raw/uncalibrated data in m/s².
///
public Vector3 linearAccelerationUncalibrated;
///
/// Orientation from gyro/accelerator fusion.
///
public Quaternion fusedOrientation;
///
/// Covariance matrix of the quaternion.
///
public Matrix3x3 orientationCovariance;
///
/// Gyroscope raw data covariance matrix.
///
public Matrix3x3 angularVelocityCovariance;
///
/// Accelerometer raw data covariance matrix.
///
public Matrix3x3 linearAccelerationCovariance;
};
[StructLayout(LayoutKind.Sequential)]
public struct BarometerData
{
///
/// Indicates if mag data is available
///
public bool available;
///
/// mag Data timestamp in ns
///
public ulong timestamp;
///
/// Barometer ambient air pressure in hPa
///
public float pressure;
///
/// Relative altitude from first camera position
///
public float relativeAltitude;
};
public enum HEADING_STATE
{
///
/// The heading is reliable and not affected by iron interferences.
///
GOOD,
///
/// The heading is reliable, but affected by slight iron interferences.
///
OK,
///
/// The heading is not reliable because affected by strong iron interferences.
///
NOT_GOOD,
///
/// The magnetometer has not been calibrated.
///
NOT_CALIBRATED,
///
/// The magnetomer sensor is not available.
///
MAG_NOT_AVAILABLE
};
[StructLayout(LayoutKind.Sequential)]
public struct MagnetometerData
{
///
/// Indicates if mag data is available
///
public bool available;
///
/// mag Data timestamp in ns
///
public ulong timestamp;
///
/// Magnetic field calibrated values in uT
///
///
public Vector3 magneticField;
///
/// Magnetic field raw values in uT
///
public Vector3 magneticFieldUncalibrated;
///
/// The camera heading in degrees relative to the magnetic North Pole.
/// note: The magnetic North Pole has an offset with respect to the geographic North Pole, depending on the
/// geographic position of the camera.
/// To get a correct magnetic heading the magnetometer sensor must be calibrated using the ZED Sensor Viewer tool
///
public float magneticHeading;
///
/// The state of the /ref magnetic_heading value
///
public HEADING_STATE magnetic_heading_state;
///
/// The accuracy of the magnetic heading measure in the range [0.0,1.0].
/// A negative value means that the magnetometer must be calibrated using the ZED Sensor Viewer tool
///
public float magnetic_heading_accuracy;
};
[StructLayout(LayoutKind.Sequential)]
public struct TemperatureSensorData
{
///
/// Temperature from IMU device ( -100 if not available)
///
public float imu_temp;
///
/// Temperature from Barometer device ( -100 if not available)
///
public float barometer_temp;
///
/// Temperature from Onboard left analog temperature sensor ( -100 if not available)
///
public float onboard_left_temp;
///
/// Temperature from Onboard right analog temperature sensor ( -100 if not available)
///
public float onboard_right_temp;
};
[StructLayout(LayoutKind.Sequential)]
public struct SensorsData
{
///
/// Contains Imu Data
///
public ImuData imu;
///
/// Contains Barometer Data
///
public BarometerData barometer;
///
/// Contains Mag Data
///
public MagnetometerData magnetometer;
///
/// Contains Temperature Data
///
public TemperatureSensorData temperatureSensor;
///
/// Indicated if camera is :
/// -> Static : 0
/// -> Moving : 1
/// -> Falling : 2
///
public int camera_moving_state;
///
/// Indicates if the current sensors data is sync to the current image (>=1). Otherwise, value will be 0.
///
public int image_sync_val;
};
/*******************************************************************************************************************************
*******************************************************************************************************************************/
///
/// Calibration information for an individual sensor on the ZED (left or right).
/// For more information, see:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/structsl_1_1CameraParameters.html
[StructLayout(LayoutKind.Sequential)]
public struct CameraParameters
{
///
/// Focal X.
///
public float fx;
///
/// Focal Y.
///
public float fy;
///
/// Optical center X.
///
public float cx;
///
/// Optical center Y.
///
public float cy;
///
/// Distortion coefficients.
///
[MarshalAs(UnmanagedType.ByValArray, ArraySubType = UnmanagedType.U8, SizeConst = 5)]
public double[] disto;
///
/// Vertical field of view after stereo rectification.
///
public float vFOV;
///
/// Horizontal field of view after stereo rectification.
///
public float hFOV;
///
/// Diagonal field of view after stereo rectification.
///
public float dFOV;
///
/// Camera's current resolution.
///
public Resolution resolution;
};
///
/// List of the available onboard sensors
///
public enum SENSOR_TYPE
{
///
/// Three axis Accelerometer sensor to measure the inertial accelerations.
///
ACCELEROMETER,
///
/// Three axis Gyroscope sensor to measure the angular velocitiers.
///
GYROSCOPE,
///
/// Three axis Magnetometer sensor to measure the orientation of the device respect to the earth magnetic field.
///
MAGNETOMETER,
///
/// Barometer sensor to measure the atmospheric pressure.
///
BAROMETER,
LAST
};
///
/// List of the available onboard sensors measurement units
///
public enum SENSORS_UNIT
{
///
/// Acceleration [m/s²].
///
M_SEC_2,
///
/// Angular velocity [deg/s].
///
DEG_SEC,
///
/// Magnetic Fiels [uT].
///
U_T,
///
/// Atmospheric pressure [hPa].
///
HPA,
///
/// Temperature [°C].
///
CELSIUS,
///
/// Frequency [Hz].
///
HERTZ,
///
///
///
LAST
};
///
/// Structure containing information about a single sensor available in the current device
///
[StructLayout(LayoutKind.Sequential)]
public struct SensorParameters
{
///
/// The type of the sensor as \ref DEVICE_SENSORS.
///
public SENSOR_TYPE type;
///
/// The resolution of the sensor.
///
public float resolution;
///
/// The sampling rate (or ODR) of the sensor.
///
public float sampling_rate;
///
/// The range values of the sensor. MIN: `range.x`, MAX: `range.y`
///
public float2 range;
///
/// also known as white noise, given as continous (frequency independant). Units will be expressed in sensor_unit/√(Hz). `NAN` if the information is not available.
///
public float noise_density;
///
/// derived from the Allan Variance, given as continous (frequency independant). Units will be expressed in sensor_unit/s/√(Hz).`NAN` if the information is not available.
///
public float random_walk;
///
/// The string relative to the measurement unit of the sensor.
///
public SENSORS_UNIT sensor_unit;
///
///
///
public bool isAvailable;
};
///
/// Structure containing information about all the sensors available in the current device
///
[StructLayout(LayoutKind.Sequential)]
public struct SensorsConfiguration
{
///
/// The firmware version of the sensor module, 0 if no sensors are available (ZED camera model).
///
public uint firmware_version;
///
/// contains rotation between IMU frame and camera frame.
///
public float4 camera_imu_rotation;
///
/// contains translation between IMU frame and camera frame.
///
public float3 camera_imu_translation;
///
/// Magnetometer to IMU rotation. contains rotation between IMU frame and magnetometer frame.
///
public float4 imu_magnometer_rotation;
///
/// Magnetometer to IMU translation. contains translation between IMU frame and magnetometer frame.
///
public float3 imu_magnometer_translation;
///
/// Configuration of the accelerometer device.
///
public SensorParameters accelerometer_parameters;
///
/// Configuration of the gyroscope device.
///
public SensorParameters gyroscope_parameters;
///
/// Configuration of the magnetometer device.
///
public SensorParameters magnetometer_parameters;
///
/// Configuration of the barometer device
///
public SensorParameters barometer_parameters;
///
/// if a sensor type is available on the device
///
///
///
public bool isSensorAvailable(SENSOR_TYPE sensor_type)
{
switch (sensor_type)
{
case SENSOR_TYPE.ACCELEROMETER:
return accelerometer_parameters.isAvailable;
case SENSOR_TYPE.GYROSCOPE:
return gyroscope_parameters.isAvailable;
case SENSOR_TYPE.MAGNETOMETER:
return magnetometer_parameters.isAvailable;
case SENSOR_TYPE.BAROMETER:
return barometer_parameters.isAvailable;
default:
break;
}
return false;
}
};
///
/// Holds calibration information about the current ZED's hardware, including per-sensor
/// calibration and offsets between the two sensors.
/// For more info, see:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/structsl_1_1CalibrationParameters.html
[StructLayout(LayoutKind.Sequential)]
public struct CalibrationParameters
{
///
/// Parameters of the left sensor.
///
public CameraParameters leftCam;
///
/// Parameters of the right sensor.
///
public CameraParameters rightCam;
///
/// Rotation (using Rodrigues' transformation) between the two sensors. Defined as 'tilt', 'convergence' and 'roll'.
///
public Quaternion Rot;
///
/// Translation between the two sensors. T[0] is the distance between the two cameras in meters.
///
public Vector3 Trans;
};
///
/// Container for information about the current SVO recording process.
///
/// Mirrors sl.RecordingState in the ZED C++ SDK. For more info, visit:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/structsl_1_1RecordingState.html
///
[StructLayout(LayoutKind.Sequential)]
public struct Recording_state
{
///
/// Status of the current frame. True if recording was successful, false if frame could not be written.
///
public bool status;
///
/// Compression time for the current frame in milliseconds.
///
public double current_compression_time;
///
/// Compression ratio (% of raw size) for the current frame.
///
public double current_compression_ratio;
///
/// Average compression time in millisecond since beginning of recording.
///
public double average_compression_time;
///
/// Compression ratio (% of raw size) since recording was started.
///
public double average_compression_ratio;
}
///
/// Status of the ZED's self-calibration. Since v0.9.3, self-calibration is done in the background and
/// starts in the sl.ZEDCamera.Init or Reset functions.
///
/// Mirrors SELF_CALIBRATION_STATE in the ZED C++ SDK. For more info, see:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/group__Video__group.html#gacce19db438a07075b7e5e22ee5845c95
///
public enum ZED_SELF_CALIBRATION_STATE
{
///
/// Self-calibration has not yet been called (no Init() called).
///
SELF_CALIBRATION_NOT_CALLED,
///
/// Self-calibration is currently running.
///
SELF_CALIBRATION_RUNNING,
///
/// Self-calibration has finished running but did not manage to get coherent values. Old Parameters are used instead.
///
SELF_CALIBRATION_FAILED,
///
/// Self Calibration has finished running and successfully produces coherent values.
///
SELF_CALIBRATION_SUCCESS
};
///
/// Lists available depth computation modes. Each mode offers better accuracy than the
/// mode before it, but at a performance cost.
///
/// Mirrors DEPTH_MODE in the ZED C++ SDK. For more info, see:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/group__Depth__group.html#ga8d542017c9b012a19a15d46be9b7fa43
///
public enum DEPTH_MODE
{
///
/// Does not compute any depth map. Only rectified stereo images will be available.
///
NONE,
///
/// Fastest mode for depth computation.
///
PERFORMANCE,
///
/// Balanced quality mode. Depth map is robust in most environment and requires medium compute power.
///
QUALITY,
///
/// Native depth. Very accurate, but at a large performance cost.
///
ULTRA,
///
/// End to End Neural disparity estimation, requires AI module
///
NEURAL
};
///
/// Types of Image view modes, for creating human-viewable textures.
/// Used only in ZEDRenderingPlane as a simplified version of sl.VIEW, which has more detailed options.
///
public enum VIEW_MODE
{
///
/// Dsplays regular color images.
///
VIEW_IMAGE,
///
/// Displays a greyscale depth map.
///
VIEW_DEPTH,
///
/// Displays a normal map.
///
VIEW_NORMALS,
};
///
/// List of error codes in the ZED SDK.
///
/// Mirrors ERROR_CODE in the ZED C++ SDK. For more info, read:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/group__Camera__group.html#ga4db9ee29f2ff83c71567c12f6bfbf28c
///
public enum ERROR_CODE
{
///
/// Operation was successful.
///
SUCCESS,
///
/// Standard, generic code for unsuccessful behavior when no other code is more appropriate.
///
FAILURE,
///
/// No GPU found, or CUDA capability of the device is not supported.
///
NO_GPU_COMPATIBLE,
///
/// Not enough GPU memory for this depth mode. Try a different mode (such as PERFORMANCE).
///
NOT_ENOUGH_GPUMEM,
///
/// The ZED camera is not plugged in or detected.
///
CAMERA_NOT_DETECTED,
///
/// The MCU that controls the sensors module has an invalid Serial Number. You can try to recover it launching the 'ZED Diagnostic' tool from the command line with the option '-r'.
///
SENSORS_NOT_INITIALIZED,
///
/// a ZED Mini is detected but the inertial sensor cannot be opened. (Never called for original ZED)
///
SENSOR_NOT_DETECTED,
///
/// For Nvidia Jetson X1 only - resolution not yet supported (USB3.0 bandwidth).
///
INVALID_RESOLUTION,
///
/// USB communication issues. Occurs when the camera FPS cannot be reached, due to a lot of corrupted frames.
/// Try changing the USB port.
///
LOW_USB_BANDWIDTH,
///
/// ZED calibration file is not found on the host machine. Use ZED Explorer or ZED Calibration to get one.
///
CALIBRATION_FILE_NOT_AVAILABLE,
///
/// ZED calibration file is not valid. Try downloading the factory one or recalibrating using the ZED Calibration tool.
///
INVALID_CALIBRATION_FILE,
///
/// The provided SVO file is not valid.
///
INVALID_SVO_FILE,
///
/// An SVO recorder-related error occurred (such as not enough free storage or an invalid file path).
///
SVO_RECORDING_ERROR,
///
/// An SVO related error when NVIDIA based compression cannot be loaded
///
SVO_UNSUPPORTED_COMPRESSION,
///
/// The requested coordinate system is not available.
///
INVALID_COORDINATE_SYSTEM,
///
/// The firmware of the ZED is out of date. Update to the latest version.
///
INVALID_FIRMWARE,
///
/// An invalid parameter has been set for the function.
///
INVALID_FUNCTION_PARAMETERS,
///
/// In grab() only, the current call return the same frame as last call. Not a new frame.
///
NOT_A_NEW_FRAME,
///
/// In grab() only, a CUDA error has been detected in the process. Activate wrapperVerbose in ZEDManager.cs for more info.
///
CUDA_ERROR,
///
/// In grab() only, ZED SDK is not initialized. Probably a missing call to sl::Camera::open.
///
CAMERA_NOT_INITIALIZED,
///
/// Your NVIDIA driver is too old and not compatible with your current CUDA version.
///
NVIDIA_DRIVER_OUT_OF_DATE,
///
/// The function call is not valid in the current context. Could be a missing a call to sl::Camera::open.
///
INVALID_FUNCTION_CALL,
///
/// The SDK wasn't able to load its dependencies, the installer should be launched.
///
CORRUPTED_SDK_INSTALLATION,
///
/// The installed SDK is not the SDK used to compile the program.
///
INCOMPATIBLE_SDK_VERSION,
///
/// The given area file does not exist. Check the file path.
///
INVALID_AREA_FILE,
///
/// The area file does not contain enough data to be used ,or the sl::DEPTH_MODE used during the creation of the
/// area file is different from the one currently set.
///
INCOMPATIBLE_AREA_FILE,
///
/// Camera failed to set up.
///
CAMERA_FAILED_TO_SETUP,
///
/// Your ZED cannot be opened. Try replugging it to another USB port or flipping the USB-C connector (if using ZED Mini).
///
CAMERA_DETECTION_ISSUE,
///
/// The Camera is already in use by another process.
///
CAMERA_ALREADY_IN_USE,
///
/// No GPU found or CUDA is unable to list it. Can be a driver/reboot issue.
///
NO_GPU_DETECTED,
///
/// Plane not found. Either no plane is detected in the scene, at the location or corresponding to the floor,
/// or the floor plane doesn't match the prior given.
///
PLANE_NOT_FOUND,
///
/// The Object detection module is only compatible with the ZED 2
///
MODULE_NOT_COMPATIBLE_WITH_CAMERA,
///
/// The module needs the sensors to be enabled (see InitParameters::sensors_required)
///
MOTION_SENSORS_REQUIRED,
///
/// The module needs a newer version of CUDA
///
MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION,
///
/// End of ERROR_CODE
///
ERROR_CODE_LAST
};
///
/// Represents the available resolution options.
///
public enum RESOLUTION
{
///
/// 2208*1242. Supported frame rate: 15 FPS.
///
HD2K,
///
/// 1920*1080. Supported frame rates: 15, 30 FPS.
///
HD1080,
///
/// 1280*720. Supported frame rates: 15, 30, 60 FPS.
///
HD720,
///
/// 672*376. Supported frame rates: 15, 30, 60, 100 FPS.
///
VGA
};
///
/// Types of compatible ZED cameras.
///
public enum MODEL
{
///
/// ZED(1)
///
ZED,
///
/// ZED Mini.
///
ZED_M,
///
/// ZED2.
///
ZED2,
///
/// ZED2i
///
ZED2i
};
///
/// Lists available sensing modes - whether to produce the original depth map (STANDARD) or one with
/// smoothing and other effects added to fill gaps and roughness (FILL).
///
public enum SENSING_MODE
{
///
/// This mode outputs the standard ZED depth map that preserves edges and depth accuracy.
/// However, there will be missing data where a depth measurement couldn't be taken, such as from
/// a surface being occluded from one sensor but not the other.
/// Better for: Obstacle detection, autonomous navigation, people detection, 3D reconstruction.
///
STANDARD,
///
/// This mode outputs a smooth and fully dense depth map. It doesn't have gaps in the data
/// like STANDARD where depth can't be calculated directly, but the values it fills them with
/// is less accurate than a real measurement.
/// Better for: AR/VR, mixed-reality capture, image post-processing.
///
FILL
};
///
/// Lists available view types retrieved from the camera, used for creating human-viewable (Image-type) textures.
///
/// Based on the VIEW enum in the ZED C++ SDK. For more info, see:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/group__Video__group.html#ga77fc7bfc159040a1e2ffb074a8ad248c
///
public enum VIEW
{
///
/// Left RGBA image. As a ZEDMat, MAT_TYPE is set to MAT_TYPE_8U_C4.
///
LEFT,
///
/// Right RGBA image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C4.
///
RIGHT,
///
/// Left GRAY image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C1.
///
LEFT_GREY,
///
/// Right GRAY image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C1.
///
RIGHT_GREY,
///
/// Left RGBA unrectified image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C4.
///
LEFT_UNRECTIFIED,
///
/// Right RGBA unrectified image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C4.
///
RIGHT_UNRECTIFIED,
///
/// Left GRAY unrectified image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C1.
///
LEFT_UNRECTIFIED_GREY,
///
/// Right GRAY unrectified image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C1.
///
RIGHT_UNRECTIFIED_GREY,
///
/// Left and right image. Will be double the width to hold both. As a ZEDMat, MAT_TYPE is set to MAT_8U_C4.
///
SIDE_BY_SIDE,
///
/// Normalized depth image. As a ZEDMat, MAT_TYPE is set to sl::MAT_TYPE_8U_C4.
/// Use an Image texture for viewing only. For measurements, use a Measure type instead
/// (ZEDCamera.RetrieveMeasure()) to preserve accuracy.
///
DEPTH,
///
/// Normalized confidence image. As a ZEDMat, MAT_TYPE is set to MAT_8U_C4.
/// Use an Image texture for viewing only. For measurements, use a Measure type instead
/// (ZEDCamera.RetrieveMeasure()) to preserve accuracy.
///
CONFIDENCE,
///
/// Color rendering of the normals. As a ZEDMat, MAT_TYPE is set to MAT_8U_C4.
/// Use an Image texture for viewing only. For measurements, use a Measure type instead
/// (ZEDCamera.RetrieveMeasure()) to preserve accuracy.
///
NORMALS,
///
/// Color rendering of the right depth mapped on right sensor. As a ZEDMat, MAT_TYPE is set to MAT_8U_C4.
/// Use an Image texture for viewing only. For measurements, use a Measure type instead
/// (ZEDCamera.RetrieveMeasure()) to preserve accuracy.
///
DEPTH_RIGHT,
///
/// Color rendering of the normals mapped on right sensor. As a ZEDMat, MAT_TYPE is set to MAT_8U_C4.
/// Use an Image texture for viewing only. For measurements, use a Measure type instead
/// (ZEDCamera.RetrieveMeasure()) to preserve accuracy.
///
NORMALS_RIGHT
};
///
/// Lists available camera settings for the ZED camera (contrast, hue, saturation, gain, etc.)
///
public enum CAMERA_SETTINGS
{
///
/// Brightness control. Value should be between 0 and 8.
///
BRIGHTNESS,
///
/// Contrast control. Value should be between 0 and 8.
///
CONTRAST,
///
/// Hue control. Value should be between 0 and 11.
///
HUE,
///
/// Saturation control. Value should be between 0 and 8.
///
SATURATION,
///
/// Sharpness control. Value should be between 0 and 8.
///
SHARPNESS,
///
/// Gamma control. Value should be between 1 and 9
///
GAMMA,
///
/// Gain control. Value should be between 0 and 100 for manual control.
/// If ZED_EXPOSURE is set to -1 (automatic mode), then gain will be automatic as well.
///
GAIN,
///
/// Exposure control. Value can be between 0 and 100.
/// Setting to -1 enables auto exposure and auto gain.
/// Setting to 0 disables auto exposure but doesn't change the last applied automatic values.
/// Setting to 1-100 disables auto mode and sets exposure to the chosen value.
///
EXPOSURE,
///
/// Auto-exposure and auto gain. Setting this to true switches on both. Assigning a specifc value to GAIN or EXPOSURE will set this to 0.
///
AEC_AGC,
///
/// ROI for auto exposure/gain. ROI defines the target where the AEC/AGC will be calculated
/// Use overloaded function for this enum
///
AEC_AGC_ROI,
///
/// Color temperature control. Value should be between 2800 and 6500 with a step of 100.
///
WHITEBALANCE,
///
/// Defines if the white balance is in automatic mode or not.
///
AUTO_WHITEBALANCE,
///
/// front LED status (1==enable, 0 == disable)
///
LED_STATUS
};
///
/// Lists available measure types retrieved from the camera, used for creating precise measurement maps
/// (Measure-type textures).
/// Based on the MEASURE enum in the ZED C++ SDK. For more info, see:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/group__Depth__group.html#ga798a8eed10c573d759ef7e5a5bcd545d
///
public enum MEASURE
{
///
/// Disparity map. As a ZEDMat, MAT_TYPE is set to MAT_32F_C1.
///
DISPARITY,
///
/// Depth map. As a ZEDMat, MAT_TYPE is set to MAT_32F_C1.
///
DEPTH,
///
/// Certainty/confidence of the disparity map. As a ZEDMat, MAT_TYPE is set to MAT_32F_C1.
///
CONFIDENCE,
///
/// 3D coordinates of the image points. Used for point clouds in ZEDPointCloudManager.
/// As a ZEDMat, MAT_TYPE is set to MAT_32F_C4. The 4th channel may contain the colors.
///
XYZ,
///
/// 3D coordinates and color of the image. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// The 4th channel encodes 4 UCHARs for colors in R-G-B-A order.
///
XYZRGBA,
///
/// 3D coordinates and color of the image. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// The 4th channel encode 4 UCHARs for colors in B-G-R-A order.
///
XYZBGRA,
///
/// 3D coordinates and color of the image. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// The 4th channel encodes 4 UCHARs for color in A-R-G-B order.
///
XYZARGB,
///
/// 3D coordinates and color of the image. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// Channel 4 contains color in A-B-G-R order.
///
XYZABGR,
///
/// 3D coordinates and color of the image. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// The 4th channel encode 4 UCHARs for color in A-B-G-R order.
///
NORMALS,
///
/// Disparity map for the right sensor. As a ZEDMat, MAT_TYPE is set to MAT_32F_C1.
///
DISPARITY_RIGHT,
///
/// Depth map for right sensor. As a ZEDMat, MAT_TYPE is set to MAT_32F_C1.
///
DEPTH_RIGHT,
///
/// Point cloud for right sensor. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4. Channel 4 is empty.
///
XYZ_RIGHT,
///
/// Colored point cloud for right sensor. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// Channel 4 contains colors in R-G-B-A order.
///
XYZRGBA_RIGHT,
///
/// Colored point cloud for right sensor. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// Channel 4 contains colors in B-G-R-A order.
///
XYZBGRA_RIGHT,
///
/// Colored point cloud for right sensor. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// Channel 4 contains colors in A-R-G-B order.
///
XYZARGB_RIGHT,
///
/// Colored point cloud for right sensor. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// Channel 4 contains colors in A-B-G-R order.
///
XYZABGR_RIGHT,
///
/// Normals vector for right view. As a ZEDMat, MAT_TYPE is set to MAT_32F_C4.
/// Channel 4 is empty (set to 0).
///
NORMALS_RIGHT,
///
/// Depth map in millimeter. Each pixel contains 1 unsigned short. As a ZEDMat, MAT_TYPE is set to MAT_U16_C1.
///
DEPTH_U16_MM,
///
/// Depth map in millimeter for right sensor. Each pixel contains 1 unsigned short. As a ZEDMat, MAT_TYPE is set to MAT_U16_C1.
///
DEPTH_U16_MM_RIGHT
};
///
/// Categories indicating when a timestamp is captured.
///
public enum TIME_REFERENCE
{
///
/// Timestamp from when the image was received over USB from the camera, defined
/// by when the entire image was available in memory.
///
IMAGE,
///
/// Timestamp from when the relevant function was called.
///
CURRENT
};
///
/// Reference frame (world or camera) for tracking and depth sensing.
///
public enum REFERENCE_FRAME
{
///
/// Matrix contains the total displacement from the world origin/the first tracked point.
///
WORLD,
///
/// Matrix contains the displacement from the previous camera position to the current one.
///
CAMERA
};
///
/// Possible states of the ZED's Tracking system.
///
public enum TRACKING_STATE
{
///
/// Tracking is searching for a match from the database to relocate to a previously known position.
///
TRACKING_SEARCH,
///
/// Tracking is operating normally; tracking data should be correct.
///
TRACKING_OK,
///
/// Tracking is not enabled.
///
TRACKING_OFF,
///
/// This is the last searching state of the track, the track will be deleted in the next retreiveObject
///
TRACKING_TERMINATE
}
///
/// SVO compression modes.
///
public enum SVO_COMPRESSION_MODE
{
///
/// Lossless compression based on png/zstd. Average size = 42% of RAW.
///
LOSSLESS_BASED,
///
/// H264(AVCHD) GPU based compression : avg size = 1% (of RAW). Requires a NVIDIA GPU
///
H264_BASED,
///
/// H265(HEVC) GPU based compression: avg size = 1% (of RAW). Requires a NVIDIA GPU, Pascal architecture or newer
///
H265_BASED,
///
/// H264 Lossless GPU/Hardware based compression: avg size = 25% (of RAW). Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Requires a NVIDIA GPU
///
H264_LOSSLESS_BASED,
///
/// H265 Lossless GPU/Hardware based compression: avg size = 25% (of RAW). Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Requires a NVIDIA GPU
///
H265_LOSSLESS_BASED,
}
///
/// Streaming codecs
///
public enum STREAMING_CODEC
{
///
/// AVCHD Based compression (H264)
///
AVCHD_BASED,
///
/// HEVC Based compression (H265)
///
HEVC_BASED
}
///
/// Spatial Mapping type (default is mesh)
///
public enum SPATIAL_MAP_TYPE
{
///
/// Represent a surface with faces, 3D points are linked by edges, no color information
///
MESH,
///
/// Geometry is represented by a set of 3D colored points.
///
FUSED_POINT_CLOUD
};
///
/// Mesh formats that can be saved/loaded with spatial mapping.
///
public enum MESH_FILE_FORMAT
{
///
/// Contains only vertices and faces.
///
PLY,
///
/// Contains only vertices and faces, encoded in binary.
///
BIN,
///
/// Contains vertices, normals, faces, and texture information (if possible).
///
OBJ
}
///
/// Presets for filtering meshes scannedw ith spatial mapping. Higher values reduce total face count by more.
///
public enum FILTER
{
///
/// Soft decimation and smoothing.
///
LOW,
///
/// Decimate the number of faces and apply a soft smooth.
///
MEDIUM,
///
/// Drastically reduce the number of faces.
///
HIGH,
}
///
/// Possible states of the ZED's Spatial Mapping system.
///
public enum SPATIAL_MAPPING_STATE
{
///
/// Spatial mapping is initializing.
///
SPATIAL_MAPPING_STATE_INITIALIZING,
///
/// Depth and tracking data were correctly integrated into the fusion algorithm.
///
SPATIAL_MAPPING_STATE_OK,
///
/// Maximum memory dedicated to scanning has been reached; the mesh will no longer be updated.
///
SPATIAL_MAPPING_STATE_NOT_ENOUGH_MEMORY,
///
/// EnableSpatialMapping() wasn't called (or the scanning was stopped and not relaunched).
///
SPATIAL_MAPPING_STATE_NOT_ENABLED,
///
/// Effective FPS is too low to give proper results for spatial mapping.
/// Consider using performance-friendly parameters (DEPTH_MODE_PERFORMANCE, VGA or HD720 camera resolution,
/// and LOW spatial mapping resolution).
///
SPATIAL_MAPPING_STATE_FPS_TOO_LOW
}
///
/// Units used by the SDK for measurements and tracking. METER is best to stay consistent with Unity.
///
public enum UNIT
{
///
/// International System, 1/1000 meters.
///
MILLIMETER,
///
/// International System, 1/100 meters.
///
CENTIMETER,
///
/// International System, 1/1 meters.
///
METER,
///
/// Imperial Unit, 1/12 feet.
///
INCH,
///
/// Imperial Unit, 1/1 feet.
///
FOOT
}
///
/// Struct containing all parameters passed to the SDK when initializing the ZED.
/// These parameters will be fixed for the whole execution life time of the camera.
/// For more details, see the InitParameters class in the SDK API documentation:
/// https://www.stereolabs.com/developers/documentation/API/v2.5.1/structsl_1_1InitParameters.html
///
public class InitParameters
{
public sl.INPUT_TYPE inputType;
///
/// Resolution the ZED will be set to.
///
public sl.RESOLUTION resolution;
///
/// Requested FPS for this resolution. Setting it to 0 will choose the default FPS for this resolution.
///
public int cameraFPS;
///
/// ID for identifying which of multiple connected ZEDs to use.
///
public int cameraDeviceID;
///
/// Path to a recorded SVO file to play, including filename.
///
public string pathSVO = "";
///
/// In SVO playback, this mode simulates a live camera and consequently skipped frames if the computation framerate is too slow.
///
public bool svoRealTimeMode;
///
/// Define a unit for all metric values (depth, point clouds, tracking, meshes, etc.) Meters are recommended for Unity.
///
public UNIT coordinateUnit;
///
/// This defines the order and the direction of the axis of the coordinate system.
/// LEFT_HANDED_Y_UP is recommended to match Unity's coordinates.
///
public COORDINATE_SYSTEM coordinateSystem;
///
/// Quality level of depth calculations. Higher settings improve accuracy but cost performance.
///
public sl.DEPTH_MODE depthMode;
///
/// Minimum distance from the camera from which depth will be computed, in the defined coordinateUnit.
///
public float depthMinimumDistance;
///
/// When estimating the depth, the SDK uses this upper limit to turn higher values into \ref TOO_FAR ones.
/// The current maximum distance that can be computed in the defined \ref UNIT.
/// Changing this value has no impact on performance and doesn't affect the positional tracking nor the spatial mapping. (Only the depth, point cloud, normals)
///
public float depthMaximumDistance;
///
/// Defines if images are horizontally flipped.
///
public int cameraImageFlip;
///
/// Defines if measures relative to the right sensor should be computed (needed for MEASURE__RIGHT).
///
public bool enableRightSideMeasure;
///
/// True to disable self-calibration and use the optional calibration parameters without optimizing them.
/// False is recommended, so that calibration parameters can be optimized.
///
public bool cameraDisableSelfCalib;
///
/// True for the SDK to provide text feedback.
///
public int sdkVerbose;
///
/// ID of the graphics card on which the ZED's computations will be performed.
///
public int sdkGPUId;
///
/// If set to verbose, the filename of the log file into which the SDK will store its text output.
///
public string sdkVerboseLogFile = "";
///
/// True to stabilize the depth map. Recommended.
///
public bool depthStabilization;
///
/// Optional path for searching configuration (calibration) file SNxxxx.conf. (introduced in ZED SDK 2.6)
///
public string optionalSettingsPath = "";
///
/// True to stabilize the depth map. Recommended.
///
public bool sensorsRequired;
///
/// Path to a recorded SVO file to play, including filename.
///
public string ipStream = "";
///
/// Path to a recorded SVO file to play, including filename.
///
public ushort portStream = 30000;
///
/// Whether to enable improved color/gamma curves added in ZED SDK 3.0.
///
public bool enableImageEnhancement = true;
///
/// Set an optional file path where the SDK can find a file containing the calibration information of the camera computed by OpenCV.
/// Using this will disable the factory calibration of the camera.
/// Erroneous calibration values can lead to poor SDK modules accuracy.
///
public string optionalOpencvCalibrationFile = "";
///
/// Define a timeout in seconds after which an error is reported if the \ref open() command fails.
/// Set to '-1' to try to open the camera endlessly without returning error in case of failure.
/// Set to '0' to return error in case of failure at the first attempt.
/// This parameter only impacts the LIVE mode.
///
public float openTimeoutSec;
///
/// Constructor. Sets default initialization parameters recommended for Unity.
///
public InitParameters()
{
this.inputType = sl.INPUT_TYPE.INPUT_TYPE_USB;
this.resolution = RESOLUTION.HD720;
this.cameraFPS = 60;
this.cameraDeviceID = 0;
this.pathSVO = "";
this.svoRealTimeMode = false;
this.coordinateUnit = UNIT.METER;
this.coordinateSystem = COORDINATE_SYSTEM.LEFT_HANDED_Y_UP;
this.depthMode = DEPTH_MODE.PERFORMANCE;
this.depthMinimumDistance = -1;
this.depthMaximumDistance = -1;
this.cameraImageFlip = 2;
this.cameraDisableSelfCalib = false;
this.sdkVerbose = 0;
this.sdkGPUId = -1;
this.sdkVerboseLogFile = "";
this.enableRightSideMeasure = false;
this.depthStabilization = true;
this.optionalSettingsPath = "";
this.sensorsRequired = false;
this.ipStream = "";
this.portStream = 30000;
this.enableImageEnhancement = true;
this.optionalOpencvCalibrationFile = "";
this.openTimeoutSec = 5.0f;
}
}
///
/// List of available coordinate systems. Left-Handed, Y Up is recommended to stay consistent with Unity.
/// consistent with Unity.
///
public enum COORDINATE_SYSTEM
{
///
/// Standard coordinates system used in computer vision.
/// Used in OpenCV. See: http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
///
IMAGE,
///
/// Left-Handed with Y up and Z forward. Recommended. Used in Unity with DirectX.
///
LEFT_HANDED_Y_UP,
///
/// Right-Handed with Y pointing up and Z backward. Used in OpenGL.
///
RIGHT_HANDED_Y_UP,
///
/// Right-Handed with Z pointing up and Y forward. Used in 3DSMax.
///
RIGHT_HANDED_Z_UP,
///
/// Left-Handed with Z axis pointing up and X forward. Used in Unreal Engine.
///
LEFT_HANDED_Z_UP
}
///
/// Possible states of the ZED's spatial memory area export, for saving 3D features used
/// by the tracking system to relocalize the camera. This is used when saving a mesh generated
/// by spatial mapping when Save Mesh is enabled - a .area file is saved as well.
///
public enum AREA_EXPORT_STATE
{
///
/// Spatial memory file has been successfully created.
///
AREA_EXPORT_STATE_SUCCESS,
///
/// Spatial memory file is currently being written to.
///
AREA_EXPORT_STATE_RUNNING,
///
/// Spatial memory file export has not been called.
///
AREA_EXPORT_STATE_NOT_STARTED,
///
/// Spatial memory contains no data; the file is empty.
///
AREA_EXPORT_STATE_FILE_EMPTY,
///
/// Spatial memory file has not been written to because of a bad file name.
///
AREA_EXPORT_STATE_FILE_ERROR,
///
/// Spatial memory has been disabled, so no file can be created.
///
AREA_EXPORT_STATE_SPATIAL_MEMORY_DISABLED
};
///
/// Runtime parameters used by the ZEDCamera.Grab() function, and its Camera::grab() counterpart in the SDK.
///
[StructLayout(LayoutKind.Sequential)]
public struct RuntimeParameters {
///
/// Defines the algorithm used for depth map computation, more info : \ref SENSING_MODE definition.
///
public sl.SENSING_MODE sensingMode;
///
/// Provides 3D measures (point cloud and normals) in the desired reference frame (default is REFERENCE_FRAME_CAMERA).
///
public sl.REFERENCE_FRAME measure3DReferenceFrame;
///
/// Defines whether the depth map should be computed.
///
[MarshalAs(UnmanagedType.U1)]
public bool enableDepth;
///
/// Defines the confidence threshold for the depth. Based on stereo matching score.
///
public int confidenceThreshold;
///
/// Defines texture confidence threshold for the depth. Based on textureness confidence.
///
public int textureConfidenceThreshold;
///
/// Defines if the saturated area (Luminance>=255) must be removed from depth map estimation
///
public bool removeSaturatedAreas;
}
///
///brief Lists available compression modes for SVO recording.
///
public enum FLIP_MODE
{
OFF = 0, /// default behavior.
ON = 1, /// Images and camera sensors data are flipped, useful when your camera is mounted upside down.
AUTO = 2, /// in live mode: use the camera orientation (if an IMU is available) to set the flip mode, in SVO mode, read the state of this enum when recorded
};
///
/// Part of the ZED (left/right sensor, center) that's considered its center for tracking purposes.
///
public enum TRACKING_FRAME
{
///
/// Camera's center is at the left sensor.
///
LEFT_EYE,
///
/// Camera's center is in the camera's physical center, between the sensors.
///
CENTER_EYE,
///
/// Camera's center is at the right sensor.
///
RIGHT_EYE
};
///
/// Types of USB device brands.
///
public enum USB_DEVICE
{
///
/// Oculus device, eg. Oculus Rift VR Headset.
///
USB_DEVICE_OCULUS,
///
/// HTC device, eg. HTC Vive.
///
USB_DEVICE_HTC,
///
/// Stereolabs device, eg. ZED/ZED Mini.
///
USB_DEVICE_STEREOLABS
};
////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////// Object Detection /////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// sets batch trajectory parameters
/// The default constructor sets all parameters to their default settings.
/// Parameters can be user adjusted.
///
[StructLayout(LayoutKind.Sequential)]
public struct BatchParameters
{
///
/// Defines if the Batch option in the object detection module is enabled. Batch queueing system provides:
/// - Deep-Learning based re-identification
/// - Trajectory smoothing and filtering
///
///
/// To activate this option, enable must be set to true.
///
[MarshalAs(UnmanagedType.U1)]
public bool enable;
///
/// Max retention time in seconds of a detected object. After this time, the same object will mostly have a different ID.
///
public float idRetentionTime;
///
/// Trajectories will be output in batch with the desired latency in seconds.
/// During this waiting time, re-identification of objects is done in the background.
/// Specifying a short latency will limit the search (falling in timeout) for previously seen object IDs but will be closer to real time output.
/// Specifying a long latency will reduce the change of timeout in Re-ID but increase difference with live output.
///
public float latency;
}
///
/// Contains AI model status.
///
[StructLayout(LayoutKind.Sequential)]
public struct AI_Model_status
{
///
/// the model file is currently present on the host.
///
[MarshalAs(UnmanagedType.U1)]
public bool downloaded;
///
/// an engine file with the expected architecure is found.
///
[MarshalAs(UnmanagedType.U1)]
public bool optimized;
};
///
/// Sets the object detection parameters.
///
[StructLayout(LayoutKind.Sequential)]
public struct dll_ObjectDetectionParameters
{
///
/// Defines if the object detection is synchronized to the image or runs in a separate thread.
///
[MarshalAs(UnmanagedType.U1)]
public bool imageSync;
///
/// Defines if the object detection will track objects across multiple images, instead of an image-by-image basis.
///
[MarshalAs(UnmanagedType.U1)]
public bool enableObjectTracking;
///
/// Defines if the SDK will calculate 2D masks for each object. Requires more performance, so don't enable if you don't need these masks.
///
[MarshalAs(UnmanagedType.U1)]
public bool enable2DMask;
///
/// Defines the AI model used for detection
///
public sl.DETECTION_MODEL detectionModel;
///
/// Defines if the body fitting will be applied
///
[MarshalAs(UnmanagedType.U1)]
public bool enableBodyFitting;
///
/// Body Format. BODY_FORMAT.POSE_34 automatically enables body fitting.
///
public sl.BODY_FORMAT bodyFormat;
///
/// Defines a upper depth range for detections.
/// Defined in UNIT set at sl.Camera.Open.
/// Default value is set to sl.Initparameters.depthMaximumDistance (can not be higher).
///
public float maxRange;
///
/// Batching system parameters.
/// Batching system(introduced in 3.5) performs short-term re-identification with deep learning and trajectories filtering.
/// BatchParameters.enable need to be true to use this feature (by default disabled)
///
public BatchParameters batchParameters;
/**
\brief Defines the filtering mode that should be applied to raw detections.
*/
public OBJECT_FILTERING_MODE filteringMode;
};
[StructLayout(LayoutKind.Sequential)]
public struct dll_ObjectDetectionRuntimeParameters
{
///
/// The detection confidence threshold between 1 and 99.
/// A confidence of 1 means a low threshold, more uncertain objects and 99 very few but very precise objects.
/// Ex: If set to 80, then the SDK must be at least 80% sure that a given object exists before reporting it in the list of detected objects.
/// If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked.
///
public float detectionConfidenceThreshold;
///
///
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = (int)sl.OBJECT_CLASS.LAST)]
public int[] objectClassFilter;
///
///
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = (int)sl.OBJECT_CLASS.LAST)]
public int[] object_confidence_threshold;
};
///
/// Lists of supported skeleton body model
///
public enum BODY_FORMAT
{
POSE_18,
POSE_34,
};
///
/// Object data structure directly from the SDK. Represents a single object detection.
/// See DetectedObject for an abstracted version with helper functions that make this data easier to use in Unity.
///
[StructLayout(LayoutKind.Sequential)]
public struct ObjectDataSDK
{
///
/// Object identification number, used as a reference when tracking the object through the frames.
///
public int id;
///
///Unique ID to help identify and track AI detections. Can be either generated externally, or using \ref ZEDCamera.generateUniqueId() or left empty
///
[MarshalAs(UnmanagedType.ByValTStr, SizeConst = 37)]
public string uniqueObjectId;
///
/// Object label, forwarded from \ref CustomBoxObjects when using DETECTION_MODEL.CUSTOM_BOX_OBJECTS
///
public int rawLabel;
///
/// Object category. Identify the object type.
///
public sl.OBJECT_CLASS objectClass;
public sl.OBJECT_SUBCLASS objectSubClass;
public sl.OBJECT_TRACK_STATE objectTrackingState;
public sl.OBJECT_ACTION_STATE actionState;
public float confidence;
public System.IntPtr mask;
///
/// Image data.
/// Note that Y in these values is relative from the top of the image, whereas the opposite is true
/// in most related Unity functions. If using this raw value, subtract Y from the
/// image height to get the height relative to the bottom.
///
/// 0 ------- 1
/// | obj |
/// 3-------- 2
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 4)]
public Vector2[] imageBoundingBox;
///
/// 3D space data (Camera Frame since this is what we used in Unity)
///
public Vector3 rootWorldPosition; //object root position
public Vector3 headWorldPosition; //object head position (only for HUMAN detectionModel)
public Vector3 rootWorldVelocity; //object root velocity
///
/// The 3D space bounding box. given as array of vertices
///
/// 1 ---------2
/// /| /|
/// 0 |--------3 |
/// | | | |
/// | 5--------|-6
/// |/ |/
/// 4 ---------7
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 8)]
public Vector3[] worldBoundingBox; // 3D Bounding Box of object
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 8)]
public Vector3[] headBoundingBox;// 3D Bounding Box of head (only for HUMAN detectionModel)
///
/// The 2D position of skeleton joints
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 34)]
public Vector2[] skeletonJointPosition2D;// 2D position of the joints of the skeleton
///
/// The 3D position of skeleton joints
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 34)]
public Vector3[] skeletonJointPosition;// 3D position of the joints of the skeleton
// Full covariance matrix for position (3x3). Only 6 values are necessary
// [p0, p1, p2]
// [p1, p3, p4]
// [p2, p4, p5]
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 6)]
public float[] positionCovariance;// covariance matrix of the 3d position, represented by its upper triangular matrix value
///
/// Per keypoint detection confidence, can not be lower than the ObjectDetectionRuntimeParameters.detection_confidence_threshold.
/// Not available with DETECTION_MODEL.MULTI_CLASS_BOX.
/// in some cases, eg. body partially out of the image or missing depth data, some keypoint can not be detected, they will have non finite values.
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 34)]
public float[] keypointConfidence;
///
/// Global position per joint in the coordinate frame of the requested skeleton format.
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 34)]
public Vector3[] localPositionPerJoint;
///
/// Local orientation per joint in the coordinate frame of the requested skeleton format.
/// The orientation is represented by a quaternion.
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 34)]
public Quaternion[] localOrientationPerJoint;
///
/// Global root rotation.
///
public Quaternion globalRootOrientation;
};
///
/// Container to store the externally detected objects. The objects can be ingested using IngestCustomBoxObjects() function to extract 3D information and tracking over time.
///
[StructLayout(LayoutKind.Sequential)]
public struct CustomBoxObjectData
{
///
///Unique ID to help identify and track AI detections. Can be either generated externally, or using \ref ZEDCamera.generateUniqueId() or left empty
///
[MarshalAs(UnmanagedType.ByValTStr, SizeConst = 37)]
public string uniqueObjectID;
///
/// 2D bounding box represented as four 2D points starting at the top left corner and rotation clockwise.
///
/// 0 ------- 1
/// | obj |
/// 3-------- 2
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 4)]
public Vector2[] boundingBox2D;
///
/// Object label, this information is passed-through and can be used to improve object tracking
///
public int label;
///
/// Detection confidence. Should be [0-1]. It can be used to improve the object tracking
///
public float probability;
///
/// Provide hypothesis about the object movements(degrees of freedom) to improve the object tracking
/// true: means 2 DoF projected alongside the floor plane, the default for object standing on the ground such as person, vehicle, etc
/// false : 6 DoF full 3D movements are allowed
///
[MarshalAs(UnmanagedType.U1)]
public bool isGrounded;
}
///
/// Object Scene data directly from the ZED SDK. Represents all detections given during a single image frame.
/// See DetectionFrame for an abstracted version with helper functions that make this data easier to use in Unity.
/// Contains the number of object in the scene and the objectData structure for each object.
/// Since the data is transmitted from C++ to C#, the size of the structure must be constant. Therefore, there is a limitation of 200 (MAX_OBJECT constant) objects in the image.
/// This number cannot be changed.
///
[StructLayout(LayoutKind.Sequential)]
public struct ObjectsFrameSDK
{
///
/// How many objects were detected this frame. Use this to iterate through the top of objectData; objects with indexes greater than numObject are empty.
///
public int numObject;
///
/// Timestamp of the image where these objects were detected.
///
public ulong timestamp;
///
/// Defines if the object frame is new (new timestamp)
///
public int isNew;
///
/// Defines if the object is tracked
///
public int isTracked;
///
/// Current detection model used.
///
public sl.DETECTION_MODEL detectionModel;
///
/// Array of objects
///
[MarshalAs(UnmanagedType.ByValArray, SizeConst = (int)(Constant.MAX_OBJECTS))]
public ObjectDataSDK[] objectData;
};
///
/// Lists available object class
///
public enum OBJECT_CLASS
{
PERSON = 0,
VEHICLE = 1,
BAG = 2,
ANIMAL = 3,
ELECTRONICS = 4,
FRUIT_VEGETABLE = 5,
SPORT = 6,
LAST = 7
};
///
/// Lists available object subclass.
///
public enum OBJECT_SUBCLASS
{
PERSON = 0,
// VEHICLES
BICYCLE = 1,
CAR = 2,
MOTORBIKE = 3,
BUS = 4,
TRUCK = 5,
BOAT = 6,
// BAGS
BACKPACK = 7,
HANDBAG = 8,
SUITCASE = 9,
// ANIMALS
BIRD = 10,
CAT = 11,
DOG = 12,
HORSE = 13,
SHEEP = 14,
COW = 15,
// ELECTRONICS
CELLPHONE = 16,
LAPTOP = 17,
// FRUITS/VEGETABLES
BANANA = 18,
APPLE = 19,
ORANGE = 20,
CARROT = 21,
PERSON_HEAD = 22,
SPORTSBALL = 23,
LAST = 24
};
///
/// Tracking state of an individual object.
///
public enum OBJECT_TRACK_STATE
{
OFF, /**< The tracking is not yet initialized, the object ID is not usable */
OK, /**< The object is tracked */
SEARCHING,/**< The object couldn't be detected in the image and is potentially occluded, the trajectory is estimated */
TERMINATE/**< This is the last searching state of the track, the track will be deleted in the next retreiveObject */
};
public enum OBJECT_ACTION_STATE
{
IDLE = 0, /**< The object is staying static. */
MOVING = 1, /**< The object is moving. */
LAST = 2
};
///
/// List available models for detection
///
public enum DETECTION_MODEL {
///
/// Any objects, bounding box based.
///
MULTI_CLASS_BOX,
///
/// Any objects, bounding box based.
///
MULTI_CLASS_BOX_ACCURATE,
///
/// Keypoints based, specific to human skeleton, real time performance even on Jetson or low end GPU cards.
///
HUMAN_BODY_FAST,
///
/// Keypoints based, specific to human skeleton, state of the art accuracy, requires powerful GPU.
///
HUMAN_BODY_ACCURATE,
///
/// Any objects, bounding box based.
///
MULTI_CLASS_BOX_MEDIUM,
///
/// Keypoints based, specific to human skeleton, real time performance even on Jetson or low end GPU cards.
///
HUMAN_BODY_MEDIUM,
///
/// Bounding Box detector specialized in person heads, particulary well suited for crowded environement, the person localization is also improved
///
PERSON_HEAD_BOX,
///
/// For external inference, using your own custom model and/or frameworks. This mode disable the internal inference engine, the 2D bounding box detection must be provided
///
CUSTOM_BOX_OBJECTS,
LAST
};
///
/// Lists of supported bounding box preprocessing
///
public enum OBJECT_FILTERING_MODE
{
///
/// SDK will not apply any preprocessing to the detected objects
///
NONE,
///
/// SDK will remove objects that are in the same 3D position as an already tracked object (independant of class ID). Default value
///
NMS3D,
///
/// SDK will remove objects that are in the same 3D position as an already tracked object of the same class ID
///
NMS3D_PER_CLASS
};
public enum AI_MODELS
{
///
/// related to sl.DETECTION_MODEL.MULTI_CLASS_BOX
///
MULTI_CLASS_DETECTION,
///
/// related to sl.DETECTION_MODEL.MULTI_CLASS_BOX_MEDIUM
///
MULTI_CLASS_MEDIUM_DETECTION,
///
/// related to sl.DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE
///
MULTI_CLASS_ACCURATE_DETECTION,
///
/// related to sl.DETECTION_MODEL.HUMAN_BODY_FAST
///
HUMAN_BODY_FAST_DETECTION,
///
/// related to sl.DETECTION_MODEL.HUMAN_BODY_MEDIUM
///
HUMAN_BODY_MEDIUM_DETECTION,
///
/// related to sl.DETECTION_MODEL.HUMAN_BODY_ACCURATE
///
HUMAN_BODY_ACCURATE_DETECTION, //
///
/// related to sl.DETECTION_MODEL.PERSON_HEAD
///
PERSON_HEAD_DETECTION,
///
/// related to sl.BatchParameters.enable
///
REID_ASSOCIATION, // related to
///
/// related to sl.DETECTION_MODEL.NEURAL
///
NEURAL_DEPTH,
LAST
};
///
/// semantic and order of human body keypoints.
///
public enum BODY_PARTS {
NOSE = 0,
NECK = 1,
RIGHT_SHOULDER = 2,
RIGHT_ELBOW= 3,
RIGHT_WRIST = 4,
LEFT_SHOULDER = 5,
LEFT_ELBOW = 6,
LEFT_WRIST = 7,
RIGHT_HIP = 8,
RIGHT_KNEE = 9,
RIGHT_ANKLE = 10,
LEFT_HIP = 11,
LEFT_KNEE = 12,
LEFT_ANKLE = 13,
RIGHT_EYE = 14,
LEFT_EYE = 15,
RIGHT_EAR = 16,
LEFT_EAR = 17,
LAST = 18
};
///
/// Contains batched data of a detected object
///
///
/// Contains batched data of a detected object
///
public class ObjectsBatch
{
///
/// How many data were stored. Use this to iterate through the top of position/velocity/bounding_box/...; objects with indexes greater than numData are empty.
///
public int numData = 0;
///
/// The trajectory id
///
public int id = 0;
///
/// Object Category. Identity the object type
///
public OBJECT_CLASS label = OBJECT_CLASS.LAST;
///
/// Object subclass
///
public OBJECT_SUBCLASS sublabel = OBJECT_SUBCLASS.LAST;
///
/// Defines the object tracking state
///
public TRACKING_STATE trackingState = TRACKING_STATE.TRACKING_TERMINATE;
///
/// A sample of 3d position
///
public Vector3[] positions = new Vector3[(int)Constant.MAX_BATCH_SIZE];
///
/// a sample of the associated position covariance
///
public float[,] positionCovariances = new float[(int)Constant.MAX_BATCH_SIZE, 6];
///
/// A sample of 3d velocity
///
public Vector3[] velocities = new Vector3[(int)Constant.MAX_BATCH_SIZE];
///
/// The associated position timestamp
///
public ulong[] timestamps = new ulong[(int)Constant.MAX_BATCH_SIZE];
///
/// A sample of 3d bounding boxes
///
public Vector3[,] boundingBoxes = new Vector3[(int)Constant.MAX_BATCH_SIZE, 8];
///
/// 2D bounding box of the person represented as four 2D points starting at the top left corner and rotation clockwise.
/// Expressed in pixels on the original image resolution, [0, 0] is the top left corner.
/// A ------ B
/// | Object |
/// D ------ C
///
public Vector2[,] boundingBoxes2D = new Vector2[(int)Constant.MAX_BATCH_SIZE, 4];
///
/// a sample of object detection confidence
///
public float[] confidences = new float[(int)Constant.MAX_BATCH_SIZE];
///
/// a sample of the object action state
///
public OBJECT_ACTION_STATE[] actionStates = new OBJECT_ACTION_STATE[(int)Constant.MAX_BATCH_SIZE];
///
/// a sample of 2d person keypoints.
/// Not available with DETECTION_MODEL::MULTI_CLASS_BOX.
/// in some cases, eg. body partially out of the image or missing depth data, some keypoint can not be detected, they will have non finite values.
///
public Vector2[,] keypoints2D = new Vector2[(int)Constant.MAX_BATCH_SIZE, 18];
///
/// a sample of 3d person keypoints
/// Not available with DETECTION_MODEL::MULTI_CLASS_BOX.
/// in some cases, eg. body partially out of the image or missing depth data, some keypoint can not be detected, they will have non finite values.
///
public Vector3[,] keypoints = new Vector3[(int)Constant.MAX_BATCH_SIZE, 18];
///
/// bounds the head with four 2D points.
/// Expressed in pixels on the original image resolution.
/// Not available with DETECTION_MODEL.MULTI_CLASS_BOX.
///
public Vector2[,] headBoundingBoxes2D = new Vector2[(int)Constant.MAX_BATCH_SIZE, 8];
///
/// bounds the head with eight 3D points.
/// Defined in sl.InitParameters.UNIT, expressed in RuntimeParameters.measure3DReferenceFrame.
/// Not available with DETECTION_MODEL.MULTI_CLASS_BOX.
///
public Vector3[,] headBoundingBoxes = new Vector3[(int)Constant.MAX_BATCH_SIZE, 8];
///
/// 3D head centroid.
/// Defined in sl.InitParameters.UNIT, expressed in RuntimeParameters.measure3DReferenceFrame.
/// Not available with DETECTION_MODEL.MULTI_CLASS_BOX.
///
public Vector3[] headPositions = new Vector3[(int)Constant.MAX_BATCH_SIZE];
///
/// Per keypoint detection confidence, can not be lower than the ObjectDetectionRuntimeParameters.detectionConfidenceThreshold.
/// Not available with DETECTION_MODEL.MULTI_CLASS_BOX.
/// in some cases, eg. body partially out of the image or missing depth data, some keypoint can not be detected, they will have non finite values.
///
public float[,] keypointConfidences = new float[(int)Constant.MAX_BATCH_SIZE, 18];
}
}// end namespace sl