Skip to content
Snippets Groups Projects
Commit 0d35f7fe authored by Iván del Pino's avatar Iván del Pino
Browse files

Added launch and params files for the ONA2 robot

parent f60e336c
No related branches found
No related tags found
No related merge requests found
ONA2.rviz 0 → 100644
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /Grid1
- /PointCloud21
Splitter Ratio: 0.5
Tree Height: 617
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: PointCloud2
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.019999999552965164
Style: Flat Squares
Topic: /iri_ground_segmentation/lidar_points_ground_segmented
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz/MarkerArray
Enabled: false
Marker Topic: /iri_ground_segmentation/roadmap_markers
Name: MarkerArray
Namespaces:
{}
Queue Size: 100
Value: false
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: ona2/base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Angle: 1.1175870895385742e-8
Class: rviz/TopDownOrtho
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Scale: 96.46293640136719
Target Frame: <Fixed Frame>
Value: TopDownOrtho (rviz)
X: 0.03260824829339981
Y: -0.33336418867111206
Saved: ~
Window Geometry:
Displays:
collapsed: true
Height: 914
Hide Left Dock: true
Hide Right Dock: false
QMainWindow State: 000000ff00000000fd000000040000000000000156000002f4fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073000000003d000002f4000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002f4fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d000002f4000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000064d0000003efc0100000002fb0000000800540069006d006501000000000000064d000002eb00fffffffb0000000800540069006d0065010000000000000450000000000000000000000538000002f400000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: false
Width: 1613
X: 67
Y: 27
<?xml version="1.0"?>
<launch>
<env name="LIBGL_ALWAYS_SOFTWARE" value="1" /> <!-- for running the rviz in virtual box, comment this if you do not use a VM-->
<rosparam command="load" file="$(find iri_ground_segmentation)/params/ONA2_iri_ground_segmentation.yaml" />
<remap from="/iri_ground_segmentation/lidar_points" to="/ona2/traversability/robosense_fused_ext_points"/>
<node pkg="iri_ground_segmentation" type="iri_ground_segmentation" name="iri_ground_segmentation" output="screen" respawn="false">
</node>
</launch>
......@@ -3,7 +3,7 @@
<env name="LIBGL_ALWAYS_SOFTWARE" value="1" /> <!-- for running the rviz in virtual box, comment this if you do not use a VM-->
<rosparam command="load" file="$(find iri_ground_segmentation)/params/iri_ground_segmentation.yaml" />
<remap from="/iri_ground_segmentation/lidar_points" to="/kitti/velo/pointcloud"/>
<remap from="/iri_ground_segmentation/ground_truth_lidar_points" to="/velodyne_points"/>
<remap from="/iri_ground_segmentation/ground_truth_lidar_points" to="/velodyne_points"/>
<node pkg="iri_ground_segmentation" type="iri_ground_segmentation" name="iri_ground_segmentation" output="screen" respawn="false">
</node>
</launch>
iri_ground_segmentation: {
rate: 10.00,
# Robot related geometric parameters
sensor_height: 0.3, ## Ground is expected to be at z = -1*sensor_height
robot_height: 1.5, ## All obstacle points above this value (w.r.t the z ground predicted by the algorithm) are treated as overhanging obstacles
# Parameters affecting the exploration of the pointcloud
ROI_delta_x_and_y: 3.0, #4.0, ## This value sets the size of the ROIs: ROI_size = (2*ROI_delta_x_and_y)^2
ROI_shadow_area: 3.0, #6.5, ## This value is the same that the above, but only used in the root vertex to overcome the shadow area
ground_reference_search_resolution_deg: 12.00, ## It is the angular resolution when generating new ground references (graph nodes),
## it will affect the number of nodes in the graph: lower values generate more nodes
elevation_grid_resolution: 2.1, #1.2, #1.5, #1.0 #0.5, ## This value is used to create the "elevation point cloud" which is a reduction of the original pointcloud, where
## only the lowest z values in each cell are stored (along with a vector of indexes pointing to the remaining points
## in the cell, so that the original pointcloud can be recovered or labeled using the info in the "elevation cloud").
## Big values can speed up the algorithm but generates a lower resolution ground model, on the other hand, small values
## can produce outliers because we would like to have only ground points in the elevation cloud (however these outliers
## are usually succesfully rejected by the mahalanobis threshold and the "prior" information)
# Kalman filter noise parameters
## initial uncertainties
z_initial_std_dev: 0.05, ## Uncertainty in z = -1*sensor_height
initial_angular_std_dev_deg: 1.5, ## Used to initialize x_slope and y_slope variances
## propagation additive noises
propagation_z_additive_noise_per_meter: 0.01, ## Uncertainty added to var_z as a function of the distance with the parent vertex
propagation_additive_noise_deg_per_meter: 0.4, ## Uncertainty added to x_slope and y_slope as a function of the distance with the parent vertex
## (it is expressed in degrees per meter, but inside the code it is converted to slope per meter)
# measurement noise
z_observation_std_dev: 0.3, #0.15, #0.3, #0.45, for elevation_grid_resolution = 0.5 ## Uncertainty in the Lidar observations
# threshold for outlier rejection and classification
mahalanobis_threshold: 3.0, #2.7, #3.0, ## To classify points as obstacles or ground (a small value will cause false obstacle points,
## and a big value will increase the number of false ground points)
# Neural Network related parameters
use_neural_network: false,
extract_data_to_external_training_of_the_network: false,
dataset_filename_with_global_path: '/home/idelpino/Documentos/dataset.csv', # This is the filename with path where the data for neural net training is dumped
neural_net_filename_with_global_path: '/home/idelpino/iri-lab/ground_segmentation_ws/src/iri_ground_segmentation/shallow_neural_nets/veg_terrain_and_obs_13_features_39_neurons.csv',
neural_net_number_of_features: 13,
neural_net_number_of_neurons_in_hidden_layer: 39,
neural_net_number_of_neurons_in_output_layer: 2,
# labeling parameters
max_pred_std_dev_for_labelling: 0.333, ## ONLY IN USE TO GIVE COLOUR TO DENSE RECONSTRUCTION
score_threshold: 0.0, #0.0 when using neural net ## for assigning ground class label: one means maha. dist. equal to zero, zero means mahalanobis dist equal to maha. thres
classify_not_labeled_points_as_obstacles: false, ## when a point has no reference satisfying the max_pred_std_dev_for_labelling threshold we can leave as unknown or label it as obstacle
ground_threshold_in_not_analyzed_areas: 0.1, ## when it is not possible to make a local analysis of the data, we will use the lowest point (the one in elevation_cloud) as ground height, and
## all the points above it and below this threshold will be classified as ground
# low obstacle detection parameters
use_low_obstacle_detection: true,
low_obstacle_min_height: 0.07,
sensor_best_resolution_deg: 0.2, # Best resolution, either horizontal or vertical
max_navigation_slope: 0.3,
# visualization and debug parameters
evaluate_segmentation: false,
only_road_is_traversable: true, # If set to false terrain and vegetation will be considered non-traversable, but sidewalks, parkings, etc will be considered to be traversable
measure_performance: false, ## (feature still not debugged) Flag to measure number of execution and execution times of the different functions of the algorithm
show_dense_reconstruction: false, ## To show a dense ground surface reconstruction using the predictions of the ground mode (colored using the std_dev of z coordinate)
## or alternatively the "elevation point cloud" (useful for parameter tunning)
}
......@@ -55,10 +55,10 @@ iri_ground_segmentation: {
use_low_obstacle_detection: true,
low_obstacle_min_height: 0.1,
sensor_best_resolution_deg: 0.2, # Best resolution, either horizontal or vertical
max_navigation_slope: 0.2,
max_navigation_slope: 0.15,
# visualization and debug parameters
evaluate_segmentation: true,
evaluate_segmentation: false,
only_road_is_traversable: true, # If set to false terrain and vegetation will be considered non-traversable, but sidewalks, parkings, etc will be considered to be traversable
measure_performance: false, ## (feature still not debugged) Flag to measure number of execution and execution times of the different functions of the algorithm
show_dense_reconstruction: false, ## To show a dense ground surface reconstruction using the predictions of the ground mode (colored using the std_dev of z coordinate)
......
......@@ -471,7 +471,7 @@ void GroundSegmentationAlgNode::convertInputsToSuitableFormats(void)
//
// std::cout << "DATA_C_0_RGB_CAST_INTO_FLOAT = " << lidar_pcl_cloud_.points[100].data_c[DATA_C_0_RGB_CAST_INTO_FLOAT] << std::endl;
// std::cout << "DATA_C_1_ID_CLASS = " << lidar_pcl_cloud_.points[100].data_c[DATA_C_1_ID_CLASS] << std::endl;
// std::cout << "DATA_C_2_MEAN_INTENSITY = " << lidar_pcl_cloud_.points[100].data_c[DATA_C_2_MEAN_INTENSITY] << std::endl;
// std::cout << "DATA_C_2_MEAN_INTENSITY = " << lidar_pcl_cloud_.points[100].data_c[DATA_C_2_INDEX_OF_GROUND_REF_THAT_MADE_THE_LABEL] << std::endl;
// std::cout << "DATA_C_3_ORIGINAL_INDEX = " << lidar_pcl_cloud_.points[100].data_c[DATA_C_3_ORIGINAL_INDEX] << std::endl;
if (flag_new_ground_truth_lidar_data_)
......@@ -728,11 +728,11 @@ void GroundSegmentationAlgNode::detectLowObstacles(float h, float res_deg, float
float cell_side = r_max / sqrt(8.0); // This comes from here: sqrt((r_max/2)^2 / 2)
int num_of_cells = (d_max * 2.0 / cell_side);
//std::cout << "r_max = " << r_max << std::endl;
//std::cout << "K = " << K << std::endl;
//std::cout << "d_max = " << d_max << std::endl;
//std::cout << "cell_side = " << cell_side << std::endl;
//std::cout << "num_of_cells = " << num_of_cells << std::endl;
// std::cout << "r_max = " << r_max << std::endl;
// std::cout << "K = " << K << std::endl;
// std::cout << "d_max = " << d_max << std::endl;
// std::cout << "cell_side = " << cell_side << std::endl;
// std::cout << "num_of_cells = " << num_of_cells << std::endl;
std::vector < std::vector < Cell >> grid(num_of_cells, std::vector < Cell > (num_of_cells));
......@@ -853,7 +853,7 @@ void GroundSegmentationAlgNode::evaluateSegmentation(void)
else
{
if (detected_class == VEGETATION || detected_class == CLASS_UNKNOWN || detected_class == CLASS_OBSTACLE
|| detected_class == CLASS_OVERHANGING_OBSTACLE || detected_class == TERRAIN)
|| detected_class == CLASS_OVERHANGING_OBSTACLE || detected_class == TERRAIN || detected_class == CLASS_LOW_OBSTACLE)
detected_class = CLASS_OBSTACLE;
else
detected_class = CLASS_GROUND;
......@@ -977,7 +977,7 @@ void GroundSegmentationAlgNode::cb_lidar(const sensor_msgs::PointCloud2::ConstPt
{
this->mutex_enter();
lidar_ros_cloud_ = *msg;
//std::cout << "GroundSegmentationAlgNode::cb_lidar --> lidar msg received!" << std::endl;
//std::cout << "GroundSegmentationAlgNode::cb_lidar --> lidar msg received!" << std::endl;
assert(msg != NULL && "Null pointer!!! in function cb_lidar!");
flag_new_lidar_data_ = true;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment