moveit2
The MoveIt Motion Planning Framework for ROS 2.
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
depth_image_octomap_updater.cpp
Go to the documentation of this file.
1 /*********************************************************************
2  * Software License Agreement (BSD License)
3  *
4  * Copyright (c) 2011, Willow Garage, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following
15  * disclaimer in the documentation and/or other materials provided
16  * with the distribution.
17  * * Neither the name of Willow Garage nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *********************************************************************/
34 
35 /* Author: Ioan Sucan, Suat Gedikli */
36 
39 #include <cmath>
40 #include <tf2_geometry_msgs/tf2_geometry_msgs.hpp>
41 // TODO: Remove conditional includes when released to all active distros.
42 #if __has_include(<tf2/LinearMath/Vector3.hpp>)
43 #include <tf2/LinearMath/Vector3.hpp>
44 #else
45 #include <tf2/LinearMath/Vector3.h>
46 #endif
47 #if __has_include(<tf2/LinearMath/Transform.hpp>)
48 #include <tf2/LinearMath/Transform.hpp>
49 #else
50 #include <tf2/LinearMath/Transform.h>
51 #endif
52 #include <geometric_shapes/shape_operations.h>
53 #include <sensor_msgs/image_encodings.hpp>
54 #include <stdint.h>
55 #include <moveit/utils/logger.hpp>
56 
57 #include <memory>
58 
59 namespace occupancy_map_monitor
60 {
61 
63  : OccupancyMapUpdater("DepthImageUpdater")
64  , image_topic_("depth")
65  , queue_size_(5)
66  , near_clipping_plane_distance_(0.3)
67  , far_clipping_plane_distance_(5.0)
68  , shadow_threshold_(0.04)
69  , padding_scale_(0.0)
70  , padding_offset_(0.02)
71  , max_update_rate_(0)
72  , skip_vertical_pixels_(4)
73  , skip_horizontal_pixels_(6)
74  , image_callback_count_(0)
75  , average_callback_dt_(0.0)
76  , good_tf_(5)
77  , // start optimistically, so we do not output warnings right from the beginning
78  failed_tf_(0)
79  , K0_(0.0)
80  , K2_(0.0)
81  , K4_(0.0)
82  , K5_(0.0)
83  , logger_(moveit::getLogger("moveit.ros.depth_image_octomap_updater"))
84 {
85 }
86 
88 {
89  sub_depth_image_.shutdown();
90 }
91 
92 bool DepthImageOctomapUpdater::setParams(const std::string& name_space)
93 {
94  try
95  {
96  node_->get_parameter(name_space + ".image_topic", image_topic_) &&
97  node_->get_parameter(name_space + ".queue_size", queue_size_) &&
98  node_->get_parameter(name_space + ".near_clipping_plane_distance", near_clipping_plane_distance_) &&
99  node_->get_parameter(name_space + ".far_clipping_plane_distance", far_clipping_plane_distance_) &&
100  node_->get_parameter(name_space + ".shadow_threshold", shadow_threshold_) &&
101  node_->get_parameter(name_space + ".padding_scale", padding_scale_) &&
102  node_->get_parameter(name_space + ".padding_offset", padding_offset_) &&
103  node_->get_parameter(name_space + ".max_update_rate", max_update_rate_) &&
104  node_->get_parameter(name_space + ".skip_vertical_pixels", skip_vertical_pixels_) &&
105  node_->get_parameter(name_space + ".skip_horizontal_pixels", skip_horizontal_pixels_) &&
106  node_->get_parameter(name_space + ".filtered_cloud_topic", filtered_cloud_topic_) &&
107  node_->get_parameter(name_space + ".ns", ns_);
108  return true;
109  }
110  catch (const rclcpp::exceptions::InvalidParameterTypeException& e)
111  {
112  RCLCPP_ERROR_STREAM(logger_, e.what() << '\n');
113  return false;
114  }
115 }
116 
117 bool DepthImageOctomapUpdater::initialize(const rclcpp::Node::SharedPtr& node)
118 {
119  node_ = node;
120  input_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
121  model_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
122  filtered_depth_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
123  filtered_label_transport_ = std::make_unique<image_transport::ImageTransport>(node_);
124 
125  tf_buffer_ = monitor_->getTFClient();
126  free_space_updater_ = std::make_unique<LazyFreeSpaceUpdater>(tree_);
127 
128  // create our mesh filter
129  mesh_filter_ = std::make_unique<mesh_filter::MeshFilter<mesh_filter::StereoCameraModel>>(
131  mesh_filter_->parameters().setDepthRange(near_clipping_plane_distance_, far_clipping_plane_distance_);
132  mesh_filter_->setShadowThreshold(shadow_threshold_);
133  mesh_filter_->setPaddingOffset(padding_offset_);
134  mesh_filter_->setPaddingScale(padding_scale_);
135  mesh_filter_->setTransformCallback(
136  [this](mesh_filter::MeshHandle mesh, Eigen::Isometry3d& tf) { return getShapeTransform(mesh, tf); });
137 
138  return true;
139 }
140 
142 {
143  pub_model_depth_image_ = model_depth_transport_->advertiseCamera("model_depth", 1);
144 
145  std::string prefix = "";
146  if (!ns_.empty())
147  prefix = ns_ + "/";
148 
149  pub_model_depth_image_ = model_depth_transport_->advertiseCamera(prefix + "model_depth", 1);
150  if (!filtered_cloud_topic_.empty())
151  {
152  pub_filtered_depth_image_ = filtered_depth_transport_->advertiseCamera(prefix + filtered_cloud_topic_, 1);
153  }
154  else
155  {
156  pub_filtered_depth_image_ = filtered_depth_transport_->advertiseCamera(prefix + "filtered_depth", 1);
157  }
158 
159  pub_filtered_label_image_ = filtered_label_transport_->advertiseCamera(prefix + "filtered_label", 1);
160 
161  sub_depth_image_ = image_transport::create_camera_subscription(
162  node_.get(), image_topic_,
163  [this](const sensor_msgs::msg::Image::ConstSharedPtr& depth_msg,
164  const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg) {
165  return depthImageCallback(depth_msg, info_msg);
166  },
167  "raw", rmw_qos_profile_sensor_data);
168 }
169 
171 {
172  sub_depth_image_.shutdown();
173 }
174 
176 {
178  if (mesh_filter_)
179  {
180  if (shape->type == shapes::MESH)
181  {
182  h = mesh_filter_->addMesh(static_cast<const shapes::Mesh&>(*shape));
183  }
184  else
185  {
186  std::unique_ptr<shapes::Mesh> m(shapes::createMeshFromShape(shape.get()));
187  if (m)
188  h = mesh_filter_->addMesh(*m);
189  }
190  }
191  else
192  RCLCPP_ERROR(logger_, "Mesh filter not yet initialized!");
193  return h;
194 }
195 
197 {
198  if (mesh_filter_)
199  mesh_filter_->removeMesh(handle);
200 }
201 
202 bool DepthImageOctomapUpdater::getShapeTransform(mesh_filter::MeshHandle h, Eigen::Isometry3d& transform) const
203 {
204  ShapeTransformCache::const_iterator it = transform_cache_.find(h);
205  if (it == transform_cache_.end())
206  {
207  RCLCPP_ERROR(logger_, "Internal error. Mesh filter handle %u not found", h);
208  return false;
209  }
210  transform = it->second;
211  return true;
212 }
213 
214 namespace
215 {
216 const bool HOST_IS_BIG_ENDIAN = []() {
217  union
218  {
219  uint32_t i;
220  char c[sizeof(uint32_t)];
221  } bint = { 0x01020304 };
222  return bint.c[0] == 1;
223 }();
224 } // namespace
225 
226 void DepthImageOctomapUpdater::depthImageCallback(const sensor_msgs::msg::Image::ConstSharedPtr& depth_msg,
227  const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg)
228 {
229  RCLCPP_DEBUG(logger_, "Received a new depth image message (frame = '%s', encoding='%s')",
230  depth_msg->header.frame_id.c_str(), depth_msg->encoding.c_str());
231  rclcpp::Time start = node_->now();
232 
233  if (max_update_rate_ > 0)
234  {
235  // ensure we are not updating the octomap representation too often
236  if (node_->now() - last_update_time_ <= rclcpp::Duration::from_seconds(1.0 / max_update_rate_))
237  return;
238  last_update_time_ = node_->now();
239  }
240 
241  // measure the frequency at which we receive updates
242  if (image_callback_count_ < 1000)
243  {
244  if (image_callback_count_ > 0)
245  {
246  const double dt_start = (start - last_depth_callback_start_).seconds();
247  if (image_callback_count_ < 2)
248  {
249  average_callback_dt_ = dt_start;
250  }
251  else
252  {
253  average_callback_dt_ = ((image_callback_count_ - 1) * average_callback_dt_ + dt_start) /
254  static_cast<double>(image_callback_count_);
255  }
256  }
257  }
258  else
259  {
260  // every 1000 updates we reset the counter almost to the beginning (use 2 so we don't have so much of a ripple in
261  // the measured average)
262  image_callback_count_ = 2;
263  }
264  last_depth_callback_start_ = start;
265  ++image_callback_count_;
266 
267  if (monitor_->getMapFrame().empty())
268  monitor_->setMapFrame(depth_msg->header.frame_id);
269 
270  /* get transform for cloud into map frame */
271  tf2::Stamped<tf2::Transform> map_h_sensor;
272  if (monitor_->getMapFrame() == depth_msg->header.frame_id)
273  {
274  map_h_sensor.setIdentity();
275  }
276  else
277  {
278  if (tf_buffer_)
279  {
280  // wait at most 50ms
281  static const double TEST_DT = 0.005;
282  const int nt =
283  static_cast<int>((0.5 + average_callback_dt_ / TEST_DT) * std::max(1, (static_cast<int>(queue_size_) / 2)));
284  bool found = false;
285  std::string err;
286  for (int t = 0; t < nt; ++t)
287  {
288  try
289  {
290  tf2::fromMsg(tf_buffer_->lookupTransform(monitor_->getMapFrame(), depth_msg->header.frame_id,
291  depth_msg->header.stamp),
292  map_h_sensor);
293  found = true;
294  break;
295  }
296  catch (tf2::TransformException& ex)
297  {
298  std::chrono::duration<double, std::nano> tmp_duration(TEST_DT);
299  static const rclcpp::Duration D(tmp_duration);
300  err = ex.what();
301  std::this_thread::sleep_for(D.to_chrono<std::chrono::seconds>());
302  }
303  }
304  static const unsigned int MAX_TF_COUNTER = 1000; // so we avoid int overflow
305  if (found)
306  {
307  good_tf_++;
308  if (good_tf_ > MAX_TF_COUNTER)
309  {
310  const unsigned int div = MAX_TF_COUNTER / 10;
311  good_tf_ /= div;
312  failed_tf_ /= div;
313  }
314  }
315  else
316  {
317  failed_tf_++;
318  if (failed_tf_ > good_tf_)
319  {
320 #pragma GCC diagnostic push
321 #pragma GCC diagnostic ignored "-Wold-style-cast"
322  RCLCPP_WARN_THROTTLE(logger_, *node_->get_clock(), 1000,
323  "More than half of the image messages discarded due to TF being unavailable (%u%%). "
324  "Transform error of sensor data: %s; quitting callback.",
325  (100 * failed_tf_) / (good_tf_ + failed_tf_), err.c_str());
326 #pragma GCC diagnostic pop
327  }
328  else
329  {
330 #pragma GCC diagnostic push
331 #pragma GCC diagnostic ignored "-Wold-style-cast"
332  RCLCPP_DEBUG_THROTTLE(logger_, *node_->get_clock(), 1000,
333  "Transform error of sensor data: %s; quitting callback", err.c_str());
334 #pragma GCC diagnostic pop
335  }
336  if (failed_tf_ > MAX_TF_COUNTER)
337  {
338  const unsigned int div = MAX_TF_COUNTER / 10;
339  good_tf_ /= div;
340  failed_tf_ /= div;
341  }
342  return;
343  }
344  }
345  else
346  return;
347  }
348 
349  if (!updateTransformCache(depth_msg->header.frame_id, depth_msg->header.stamp))
350  return;
351 
352  if (depth_msg->is_bigendian && !HOST_IS_BIG_ENDIAN)
353  {
354 #pragma GCC diagnostic push
355 #pragma GCC diagnostic ignored "-Wold-style-cast"
356  RCLCPP_ERROR_THROTTLE(logger_, *node_->get_clock(), 1000, "endian problem: received image data does not match host");
357 #pragma GCC diagnostic pop
358  }
359 
360  const int w = depth_msg->width;
361  const int h = depth_msg->height;
362 
363  // call the mesh filter
364  mesh_filter::StereoCameraModel::Parameters& params = mesh_filter_->parameters();
365  params.setCameraParameters(info_msg->k[0], info_msg->k[4], info_msg->k[2], info_msg->k[5]);
366  params.setImageSize(w, h);
367 
368  const bool is_u_short = depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1;
369  if (is_u_short)
370  {
371  mesh_filter_->filter(&depth_msg->data[0], GL_UNSIGNED_SHORT);
372  }
373  else
374  {
375  if (depth_msg->encoding != sensor_msgs::image_encodings::TYPE_32FC1)
376  {
377 #pragma GCC diagnostic push
378 #pragma GCC diagnostic ignored "-Wold-style-cast"
379  RCLCPP_ERROR_THROTTLE(logger_, *node_->get_clock(), 1000, "Unexpected encoding type: '%s'. Ignoring input.",
380  depth_msg->encoding.c_str());
381 #pragma GCC diagnostic pop
382  return;
383  }
384  mesh_filter_->filter(&depth_msg->data[0], GL_FLOAT);
385  }
386 
387  // the mesh filter runs in background; compute extra things in the meantime
388 
389  // Use correct principal point from calibration
390  const double px = info_msg->k[2];
391  const double py = info_msg->k[5];
392 
393  // if the camera parameters have changed at all, recompute the cache we had
394  if (w >= static_cast<int>(x_cache_.size()) || h >= static_cast<int>(y_cache_.size()) || K2_ != px || K5_ != py ||
395  K0_ != info_msg->k[0] || K4_ != info_msg->k[4])
396  {
397  K2_ = px;
398  K5_ = py;
399  K0_ = info_msg->k[0];
400  K4_ = info_msg->k[4];
401 
402  inv_fx_ = 1.0 / K0_;
403  inv_fy_ = 1.0 / K4_;
404 
405  // if there are any NaNs, discard data
406  if (isnan(px) || isnan(py) || isnan(inv_fx_) || isnan(inv_fy_))
407  return;
408 
409  // Pre-compute some constants
410  if (static_cast<int>(x_cache_.size()) < w)
411  x_cache_.resize(w);
412  if (static_cast<int>(y_cache_.size()) < h)
413  y_cache_.resize(h);
414 
415  for (int x = 0; x < w; ++x)
416  x_cache_[x] = (x - px) * inv_fx_;
417 
418  for (int y = 0; y < h; ++y)
419  y_cache_[y] = (y - py) * inv_fy_;
420  }
421 
422  const octomap::point3d sensor_origin(map_h_sensor.getOrigin().getX(), map_h_sensor.getOrigin().getY(),
423  map_h_sensor.getOrigin().getZ());
424 
425  octomap::KeySet* occupied_cells_ptr = new octomap::KeySet();
426  octomap::KeySet* model_cells_ptr = new octomap::KeySet();
427  octomap::KeySet& occupied_cells = *occupied_cells_ptr;
428  octomap::KeySet& model_cells = *model_cells_ptr;
429 
430  // allocate memory if needed
431  std::size_t img_size = h * w;
432  if (filtered_labels_.size() < img_size)
433  filtered_labels_.resize(img_size);
434 
435  // get the labels of the filtered data
436  const unsigned int* labels_row = &filtered_labels_[0];
437  mesh_filter_->getFilteredLabels(&filtered_labels_[0]);
438 
439  // publish debug information if needed
440  if (debug_info_)
441  {
442  sensor_msgs::msg::Image debug_msg;
443  debug_msg.header = depth_msg->header;
444  debug_msg.height = h;
445  debug_msg.width = w;
446  debug_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
447  debug_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
448  debug_msg.step = w * sizeof(float);
449  debug_msg.data.resize(img_size * sizeof(float));
450  mesh_filter_->getModelDepth(reinterpret_cast<float*>(&debug_msg.data[0]));
451  pub_model_depth_image_.publish(debug_msg, *info_msg);
452 
453  sensor_msgs::msg::Image filtered_depth_msg;
454  filtered_depth_msg.header = depth_msg->header;
455  filtered_depth_msg.height = h;
456  filtered_depth_msg.width = w;
457  filtered_depth_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
458  filtered_depth_msg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
459  filtered_depth_msg.step = w * sizeof(float);
460  filtered_depth_msg.data.resize(img_size * sizeof(float));
461  mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_depth_msg.data[0]));
462  pub_filtered_depth_image_.publish(filtered_depth_msg, *info_msg);
463 
464  sensor_msgs::msg::Image label_msg;
465  label_msg.header = depth_msg->header;
466  label_msg.height = h;
467  label_msg.width = w;
468  label_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
469  label_msg.encoding = sensor_msgs::image_encodings::RGBA8;
470  label_msg.step = w * sizeof(unsigned int);
471  label_msg.data.resize(img_size * sizeof(unsigned int));
472  mesh_filter_->getFilteredLabels(reinterpret_cast<unsigned int*>(&label_msg.data[0]));
473 
474  pub_filtered_label_image_.publish(label_msg, *info_msg);
475  }
476 
477  if (!filtered_cloud_topic_.empty())
478  {
479  sensor_msgs::msg::Image filtered_msg;
480  filtered_msg.header = depth_msg->header;
481  filtered_msg.height = h;
482  filtered_msg.width = w;
483  filtered_msg.is_bigendian = HOST_IS_BIG_ENDIAN;
484  filtered_msg.encoding = sensor_msgs::image_encodings::TYPE_16UC1;
485  filtered_msg.step = w * sizeof(unsigned short);
486  filtered_msg.data.resize(img_size * sizeof(unsigned short));
487 
488  // reuse float buffer across callbacks
489  static std::vector<float> filtered_data;
490  if (filtered_data.size() < img_size)
491  filtered_data.resize(img_size);
492 
493  mesh_filter_->getFilteredDepth(reinterpret_cast<float*>(&filtered_data[0]));
494  unsigned short* msg_data = reinterpret_cast<unsigned short*>(&filtered_msg.data[0]);
495  for (std::size_t i = 0; i < img_size; ++i)
496  {
497  // rescale depth to millimeter to work with `unsigned short`
498  msg_data[i] = static_cast<unsigned short>(filtered_data[i] * 1000 + 0.5);
499  }
500  pub_filtered_depth_image_.publish(filtered_msg, *info_msg);
501  }
502 
503  // figure out occupied cells and model cells
504  tree_->lockRead();
505 
506  try
507  {
508  const int h_bound = h - skip_vertical_pixels_;
509  const int w_bound = w - skip_horizontal_pixels_;
510 
511  if (is_u_short)
512  {
513  const uint16_t* input_row = reinterpret_cast<const uint16_t*>(&depth_msg->data[0]);
514 
515  for (int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
516  {
517  for (int x = skip_horizontal_pixels_; x < w_bound; ++x)
518  {
519  // not filtered
520  if (labels_row[x] == mesh_filter::MeshFilterBase::BACKGROUND)
521  {
522  float zz = static_cast<float>(input_row[x]) * 1e-3; // scale from mm to m
523  float yy = y_cache_[y] * zz;
524  float xx = x_cache_[x] * zz;
525  /* transform to map frame */
526  tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
527  occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
528  }
529  // on far plane or a model point -> remove
530  else if (labels_row[x] >= mesh_filter::MeshFilterBase::FAR_CLIP)
531  {
532  float zz = input_row[x] * 1e-3;
533  float yy = y_cache_[y] * zz;
534  float xx = x_cache_[x] * zz;
535  /* transform to map frame */
536  tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
537  // add to the list of model cells
538  model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
539  }
540  }
541  }
542  }
543  else
544  {
545  const float* input_row = reinterpret_cast<const float*>(&depth_msg->data[0]);
546 
547  for (int y = skip_vertical_pixels_; y < h_bound; ++y, labels_row += w, input_row += w)
548  {
549  for (int x = skip_horizontal_pixels_; x < w_bound; ++x)
550  {
551  if (labels_row[x] == mesh_filter::MeshFilterBase::BACKGROUND)
552  {
553  float zz = input_row[x];
554  float yy = y_cache_[y] * zz;
555  float xx = x_cache_[x] * zz;
556  /* transform to map frame */
557  tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
558  occupied_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
559  }
560  else if (labels_row[x] >= mesh_filter::MeshFilterBase::FAR_CLIP)
561  {
562  float zz = input_row[x];
563  float yy = y_cache_[y] * zz;
564  float xx = x_cache_[x] * zz;
565  /* transform to map frame */
566  tf2::Vector3 point_tf = map_h_sensor * tf2::Vector3(xx, yy, zz);
567  // add to the list of model cells
568  model_cells.insert(tree_->coordToKey(point_tf.getX(), point_tf.getY(), point_tf.getZ()));
569  }
570  }
571  }
572  }
573  }
574  catch (...)
575  {
576  tree_->unlockRead();
577  RCLCPP_ERROR(logger_, "Internal error while parsing depth data");
578  return;
579  }
580  tree_->unlockRead();
581 
582  /* cells that overlap with the model are not occupied */
583  for (const octomap::OcTreeKey& model_cell : model_cells)
584  occupied_cells.erase(model_cell);
585 
586  // mark occupied cells
587  tree_->lockWrite();
588  try
589  {
590  /* now mark all occupied cells */
591  for (const octomap::OcTreeKey& occupied_cell : occupied_cells)
592  tree_->updateNode(occupied_cell, true);
593  }
594  catch (...)
595  {
596  RCLCPP_ERROR(logger_, "Internal error while updating octree");
597  }
598  tree_->unlockWrite();
599  tree_->triggerUpdateCallback();
600 
601  // at this point we still have not freed the space
602  free_space_updater_->pushLazyUpdate(occupied_cells_ptr, model_cells_ptr, sensor_origin);
603 
604  RCLCPP_DEBUG(logger_, "Processed depth image in %lf ms", (node_->now() - start).seconds() * 1000.0);
605 }
606 } // namespace occupancy_map_monitor
std::function< bool(MeshHandle, Eigen::Isometry3d &)> TransformCallback
void setImageSize(unsigned width, unsigned height)
sets the image size
Parameters for Stereo-like devices.
void setCameraParameters(float fx, float fy, float cx, float cy)
sets the camera parameters of the pinhole camera where the disparities were obtained....
static const StereoCameraModel::Parameters & REGISTERED_PSDK_PARAMS
predefined sensor model for OpenNI compatible devices (e.g., PrimeSense, Kinect, Asus Xtion)
bool initialize(const rclcpp::Node::SharedPtr &node) override
Do any necessary setup (subscribe to ros topics, etc.). This call assumes setMonitor() and setParams(...
ShapeHandle excludeShape(const shapes::ShapeConstPtr &shape) override
bool setParams(const std::string &name_space) override
Set updater params using struct that comes from parsing a yaml string. This must be called after setM...
const std::shared_ptr< tf2_ros::Buffer > & getTFClient() const
Gets the tf client.
const std::string & getMapFrame() const
Gets the map frame (this is set either by the constor or a parameter).
void setMapFrame(const std::string &frame)
Sets the map frame.
Base class for classes which update the occupancy map.
collision_detection::OccMapTreePtr tree_
bool updateTransformCache(const std::string &target_frame, const rclcpp::Time &target_time)
unsigned int MeshHandle
Main namespace for MoveIt.
Definition: exceptions.hpp:43