Commit f5a372a7 authored by Retenua AB's avatar Retenua AB

initial commit

parents
cmake_minimum_required(VERSION 2.8.3)
project(emitrace_robotics)
## Compile as C++11, supported in ROS Kinetic and newer
add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
roscpp
std_msgs
sensor_msgs
geometry_msgs
image_transport
message_generation
cv_bridge
camera_info_manager
)
## System dependencies are found with CMake's conventions
find_package(OpenCV REQUIRED )
include_directories( ${OpenCV_INCLUDE_DIRS} )
MESSAGE( STATUS " OpenCV_DIR: " ${OpenCV_DIR} )
MESSAGE( STATUS " OpenCV_INCLUDE_DIRS: " ${OpenCV_INCLUDE_DIRS} )
MESSAGE( STATUS " OpenCV_LIBS: " ${OpenCV_LIBS} )
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a run_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
# Generate messages in the 'msg' folder
add_message_files(
FILES
CameraUnitInfo.msg
SystemStatus.msg
ApplicationStatus.msg
SensorInfo.msg
SensorConfiguration.msg
SensorStatus.msg
Data.msg
Image.msg
Object.msg
Reflector.msg
Contour.msg
)
# Generate services in the 'srv' folder
add_service_files(
FILES
SetOperationState.srv
SetCameraPose.srv
SetSensorShutter.srv
SetSensorGain.srv
SetSensorState.srv
SetFlashTrigger.srv
SetAmbientLightSuppression.srv
SetFrameRate.srv
RebootCameraUnit.srv
SaveSensorConfiguration.srv
SetNetworkSettings.srv
)
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
# Generate added messages and services with any dependencies listed here
generate_messages(
DEPENDENCIES
std_msgs
sensor_msgs
geometry_msgs
)
##############################################################################
# emitrace library
##############################################################################
include_directories( include/ )
link_directories( lib/ )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a run_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if you package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
INCLUDE_DIRS include
# LIBRARIES
# CATKIN_DEPENDS roscpp std_msgs
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
include
${catkin_INCLUDE_DIRS}
)
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/test_node.cpp)
add_executable(${PROJECT_NAME}_node src/emitrace_robotics_driver_node.cpp)
target_link_libraries(${PROJECT_NAME}_node emitrace_core_cv emitrace_robotics_cv emitrace_driver_assistance_cv ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})
add_dependencies(${PROJECT_NAME}_node ${PROJECT_NAME}_generate_messages_cpp)
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_test.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)
This diff is collapsed.
#ifndef EMITRACE_CORE_H
#define EMITRACE_CORE_H
#include "emitrace.hpp"
#include <opencv2/opencv.hpp>
#include <string>
#include <vector>
namespace emitrace
{
struct CameraUnitInfo
{
std::string ip_address;
std::string serial_number;
std::string type_name;
int8_t type = -1;
int8_t nb_sensors = -1;
};
struct SystemStatus
{
double timestamp = -1.;
std::vector<float> temperature;
std::vector<float> voltage;
std::vector<float> humidity;
std::vector<float> heater_activity;
std::vector<float> light_intensity;
std::vector<float> cpu_usage;
std::vector<int32_t> disk_space; // idx 0: total, idx 1: free, idx 1: available
std::vector<int32_t> memory; // idx 0: total, idx 1: free
std::vector<float> network_traffic; // idx 0: internal received, idx 1: internal transmitted, idx 2: external received, idx 3: external transmitted
std::vector<uint8_t> errors;
};
struct SensorInfo
{
int8_t image_type = IMAGE_TYPE_UNDEFINED;
int16_t resolution[2] = { -1, -1 };
float fov[2] = { -1.f, -1.f };
int32_t shutter_min = -1;
int32_t shutter_max = -1;
float gain_min = -1.f;
float gain_max = -1.f;
};
struct SensorConfiguration
{
std::string name;
int8_t exposure_mode = SENSOR_EXPOSURE_MODE_UNDEFINED;
uint32_t shutter = 0;
float gain = -1.;
int8_t sync_mode = SENSOR_SYNC_MODE_UNDEFINED;
bool flash_trigger = false;
int8_t subsampling[2] = { -1, -1 };
int8_t binning[2] = { -1, -1 };
int16_t roi[4] = { -1, -1, -1, -1 };
};
struct SensorStatus
{
SensorConfiguration configuration;
double timestamp = -1.;
bool active = true;
int8_t sensor_id = (int8_t)-1;
int16_t error = ERROR_NONE;
uint32_t frame_id = 0;
float framerate = -1.f;
int16_t resolution[2] = { -1, -1 };
};
struct DetectorInfo
{
std::string api_version;
};
struct ApplicationStatus
{
double timestamp = -1.;
float framerate = -1.f;
std::vector<uint8_t> errors;
int8_t mode = -1;
std::string configuration_name;
};
struct Image
{
double timestamp = -1;
uint32_t frame_id = 0;
uint32_t shutter = 0;
float gain = -1.;
int8_t exposure_mode = -1;
uint16_t width = -1;
uint16_t height = -1;
int8_t type = -1;
int8_t sensor_id = -1;
uint8_t* channel1 = NULL;
uint8_t* channel2 = NULL;
uint8_t* channel3 = NULL;
uint32_t step = -1;
};
struct ImageCV
{
double timestamp;
uint32_t frame_id;
uint32_t shutter;
float gain;
int8_t exposure_mode;
int8_t type;
int8_t sensor_id;
cv::Mat img;
ImageCV()
{
timestamp = -1.;
frame_id = 0;
shutter = 0;
gain = -1.;
exposure_mode = -1;
type = -1;
sensor_id = -1;
}
};
struct DetectionRange
{
float range_min;
float range_max;
float angle_min[2];
float angle_max[2];
std::vector<float> range_factor_angle[2];
std::vector<float> range_factor_min[2];
std::vector<float> range_factor_max[2];
};
struct GPIOConfiguration
{
int8_t config_id = -1;
std::vector<int8_t> pin_assignment;
};
struct NetworkSettings
{
std::string ip_address;
std::string netmask;
std::string gateway;
bool dhcp = false;
};
struct CameraIntrinsics
{
int calibration_id = -1;
std::string configuration_name;
int16_t image_size[2] = { -1, -1 };
double camera_matrix[3][3] = { {-1.,-1.,-1.}, {-1.,-1.,-1.}, {-1.,-1.,-1.} };
std::vector<double> dist_coeffs;
};
struct CameraExtrinsics
{
int calibration_id = -1;
double rotation[3][3] = { {-1.,-1.,-1.}, {-1.,-1.,-1.}, {-1.,-1.,-1.} };
double translation[3] = { -1.,-1.,-1. };
double essential_matrix[3][3] = { {-1.,-1.,-1.}, {-1.,-1.,-1.}, {-1.,-1.,-1.} };
double fundamental_matrix[3][3] = { {-1.,-1.,-1.}, {-1.,-1.,-1.}, {-1.,-1.,-1.} };
};
struct UndistortionParameters
{
int calibration_id = -1;
std::string configuration_name;
int16_t image_size[2] = { -1, -1 };
double rotation[3][3] = { {-1.,-1.,-1.}, {-1.,-1.,-1.}, {-1.,-1.,-1.} };
int16_t valid_roi[4] = { -1, -1, -1, -1 };
};
struct StereoRectificationParameters
{
int calibration_id = -1;
std::string configuration_name;
int16_t image_size[2] = { -1, -1 };
double rotation1[3][3] = { {-1.,-1.,-1.}, {-1.,-1.,-1.}, {-1.,-1.,-1.} };
double rotation2[3][3] = { {-1.,-1.,-1.}, {-1.,-1.,-1.}, {-1.,-1.,-1.} };
double projection1[3][4] = { {-1.,-1.,-1.,-1.}, {-1.,-1.,-1.,-1.}, {-1.,-1.,-1.,-1.} };
double projection2[3][4] = { {-1.,-1.,-1.,-1.}, {-1.,-1.,-1.,-1.}, {-1.,-1.,-1.,-1.} };
double disparity_to_depth_map[4][4] = { {-1.,-1.,-1.,-1.}, {-1.,-1.,-1.,-1.}, {-1.,-1.,-1.,-1.}, {-1.,-1.,-1.,-1.} };
int16_t valid_roi1[4] = { -1, -1, -1, -1 };
int16_t valid_roi2[4] = { -1, -1, -1, -1 };
};
struct CalibrationData
{
int calibration_id = -1;
std::vector<CameraIntrinsics> intrinsics;
std::vector<CameraExtrinsics> extrinsics;
std::vector<StereoRectificationParameters> stereo_rectification;
};
int get_camera_unit_info(const std::string& ip_address, CameraUnitInfo& info);
int get_calibration_data(const std::string& ip_address, const std::string& path, const std::string& sensor_config = std::string(""));
int get_calibration_data(const std::string& ip_address, CalibrationData& data, const std::string& sensor_config = std::string(""));
int get_time_stamp(const std::string& ip_address, double& timestamp);
int get_camera_pose(const std::string& ip_address, std::vector<float>& pose);
int get_detection_range(const std::string& ip_address, DetectionRange& range);
int get_system_status(const std::string& ip_address, SystemStatus& status);
int get_application_status(const std::string& ip_address, ApplicationStatus& status);
int get_sensor_info(const std::string& ip_address, int8_t sensor_id, SensorInfo& info);
int get_sensor_status(const std::string& ip_address, int8_t sensor_id, SensorStatus& status);
int get_sensor_configuration(const std::string& ip_address, int8_t sensor_id, SensorConfiguration& configuration);
int get_sensor_configurations(const std::string& ip_address, std::vector<std::string>& configurations);
int get_gpio_configuration(const std::string& ip_address, GPIOConfiguration& configuration);
int get_network_settings(const std::string& ip_address, NetworkSettings& settings);
int get_application_configurations(const std::string& ip_address, std::vector<std::string>& configurations);
int get_image(const std::string& ip_address, int8_t sensor_id, ImageCV& image);
int get_images(const std::string& ip_address, const std::vector<int8_t>& sensor_ids, std::vector<ImageCV>& images);
int get_images(const std::string& ip_address, const std::vector<int8_t>& sensor_ids, std::vector<ImageCV>& images_sig, std::vector<ImageCV>& images_ref);
int set_camera_pose(const std::string& ip_address, const std::vector<float>& pose);
int set_operation_mode(const std::string& ip_address, int mode);
int set_sensor_configuration(const std::string& ip_address, int8_t sensor_id, const SensorConfiguration& configuration);
int set_sensor_configuration(const std::string& ip_address, int8_t sensor_id, const std::string& name);
int set_sensor_state(const std::string& ip_address, int8_t sensor_id, bool active);
int set_sensor_shutter(const std::string& ip_address, int8_t sensor_id, uint32_t shutter);
int set_sensor_gain(const std::string& ip_address, int8_t sensor_id, float gain);
int set_sensor_mode(const std::string& ip_address, int8_t sensor_id, int8_t mode);
int set_flash_trigger(const std::string& ip_address, int8_t sensor_id, bool active);
int set_ambient_light_suppression(const std::string& ip_address, bool active);
int set_gpio_configuration(const std::string& ip_address, const GPIOConfiguration& configuration);
int set_network_settings(const std::string& ip_address, const NetworkSettings& settings);
int set_application_configuration(const std::string& ip_address, const std::string& name);
int set_ambient_light_suppression(const std::string& ip_address, bool active);
int set_frame_rate(const std::string& ip_address, float framerate);
int save_sensor_configuration(const std::string& ip_address, int8_t sensor_id); // under which name???
int reboot_camera_unit(const std::string& ip_address);
int transmit_command(const std::string& ip_address, const char* command, int timeout_ms, char** data, int32_t* data_package_size);
// set application parameters:
// -ambient light suppression
// -framerate (mode dependent)
int _decode_image(char* data, int32_t data_size, const std::string& name, Image& image);
} // end of namespace
#endif
#ifndef EMITRACE_DRIVER_ASSISTANCE_H
#define EMITRACE_DRIVER_ASSISTANCE_H
#include "emitrace-core.hpp"
namespace emitrace
{
struct Contour
{
std::vector<int16_t> x;
std::vector<int16_t> y;
};
struct Reflector
{
Contour contour;
float pos2d[2];
float pos3d[3];
float area;
float circularity;
float score;
Reflector()
{
pos2d[0] = 0.;
pos2d[1] = 0.;
pos3d[0] = 0.;
pos3d[1] = 0.;
pos3d[2] = 0.;
area = -1.;
circularity = -1.;
score = -1.;
}
};
struct Object
{
float pos2d[2];
float pos3d[3];
float vel3d[3];
float score;
int16_t id;
uint32_t lifetime;
Object()
{
pos2d[0] = 0.;
pos2d[1] = 0.;
pos3d[0] = 0.;
pos3d[1] = 0.;
pos3d[2] = 0.;
vel3d[0] = 0.;
vel3d[1] = 0.;
vel3d[2] = 0.;
score = -1.;
id = -1;
lifetime = 0;
}
};
struct Data
{
double timestamp = -1.;
float framerate = -1.;
int16_t error = -1.;
uint32_t frame_id = 0;
int8_t risk_level = -1;
std::vector<Reflector> reflectors;
std::vector<Object> objects;
};
struct RiskZone
{
int8_t level = -1;
int8_t type = -1;
std::vector<float> params;
};
struct RiskSource
{
int8_t type = -1;
std::vector<float> params;
};
struct RiskEnvironment
{
std::vector<std::vector<RiskZone>> zones;
std::vector<RiskSource> risk_sources;
};
int get_data(const std::string& ip_address, Data& data);
int get_data_and_image(const std::string& ip_address, Data& data, int8_t sensor_id, ImageCV& image);
int get_data_and_images(const std::string& ip_address, Data& data, const std::vector<int8_t>& sensor_ids, std::vector<ImageCV>& images);
int get_risk_environment(const std::string& ip_address, RiskEnvironment& environment);
int add_risk_zone(const std::string& ip_address, const RiskZone& zone);