diff --git a/get_obj_dist/launch/node.launch b/get_obj_dist/launch/node.launch
index ffc5dbe483d9b90839cc4faa4fc80734716a43e5..1012a037ab0e15c0029acfbd117de21d5eb083d1 100644
--- a/get_obj_dist/launch/node.launch
+++ b/get_obj_dist/launch/node.launch
@@ -2,14 +2,28 @@
 <!-- -->
 <launch>
 
-  <arg name="node_name"        default="objetc_to_distance"/>
-  <arg name="output"           default="screen"/>
+  <arg name="node_name"        default="person_bbox_to_distance"/>
+  <arg name="camera_in_topic"  default="/camera/color/image_raw"/>
+
+  <arg name="bbox_in_topic"      default="/human_detection/bounding_box"/>
+  <arg name="detection_image_in" default="/human_detection/detection_image"/>
+  <arg name="depth_image_in"     default="/camera/aligned_depth_to_color/image_raw"/>
+  <arg name="detection_distance_image_out" default="/$(arg node_name)/detection_distance_image"/>
+  <arg name="people_marker_array_out"      default="/$(arg node_name)/people_marker_array"/>
+  <arg name="output"             default="screen"/>
 
   <node pkg="get_obj_dist"
         name="$(arg node_name)"
         type="object_to_distance.py"
         output="$(arg output)"
         args="">
+    <remap from="~camera/color/image_raw" to="$(arg camera_in_topic)"/>
+    <remap from="~detection_image_in"        to="$(arg detection_image_in)"/>
+    <remap from="~bounding_box_in"           to="$(arg bbox_in_topic)"/>
+    <remap from="~depth_image_in"           to="$(arg depth_image_in)"/>
+    <remap from="~depth_image_in"           to="$(arg depth_image_in)"/>
+    <remap from="~detection_distance_image_out"  to="$(arg detection_distance_image_out)"/>
+    <remap from="~people_marker_array_out"       to="$(arg people_marker_array_out)"/>
   </node>
 
 </launch>
diff --git a/get_obj_dist/src/object_to_distance.py b/get_obj_dist/src/object_to_distance.py
index e382af0ac5bc5d1999ade10a7cd9fd04ffdb3311..6bd6f07ef8e66c0079928b058d55da0a5c94b824 100755
--- a/get_obj_dist/src/object_to_distance.py
+++ b/get_obj_dist/src/object_to_distance.py
@@ -30,13 +30,13 @@ class obj_dist:
         self.set_marker_array(marker_size, 'camera_color_optical_frame', 'package://get_obj_dist/human_model.STL')
 
         # Initiale publishers
-        self.dist_pub = rospy.Publisher('/obj_to_dist/human_distance', Image, queue_size=10)
-        self.render_pub = rospy.Publisher('/obj_to_dist/show_people_marker_array', MarkerArray, queue_size=10)
+        self.dist_pub = rospy.Publisher('~detection_distance_image_out', Image, queue_size=10)
+        self.render_pub = rospy.Publisher('~people_marker_array_out', MarkerArray, queue_size=10)
 
         # Initialize subscribers
-        self.bbx_sub = message_filters.Subscriber('/human_detected_image/bounding_box', box_list)
-        self.human_image_sub = message_filters.Subscriber('/human_detected_image/image', Image)
-        self.depth_image_sub = message_filters.Subscriber('/camera/aligned_depth_to_color/image_raw', Image)
+        self.bbx_sub = message_filters.Subscriber('~bounding_box_in', box_list)
+        self.human_image_sub = message_filters.Subscriber('~detection_image_in', Image)
+        self.depth_image_sub = message_filters.Subscriber('~depth_image_in', Image)
         # self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image)
 
         ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub,
@@ -121,7 +121,7 @@ class obj_dist:
 
 def main(args):
     rospy.init_node('obj_dist', anonymous=True)
-    print("object_to_distance.py is running")
+    #print("object_to_distance.py is running")
     _ = obj_dist()
     rospy.spin()
     cv2.destroyAllWindows()
diff --git a/human_detection/launch/node.launch b/human_detection/launch/node.launch
index f6149f428495187d7ccebcdfbbdc2c92b8572279..efdbeb6dfb977ac9d5f786ef49ee155aef1e5447 100644
--- a/human_detection/launch/node.launch
+++ b/human_detection/launch/node.launch
@@ -3,6 +3,10 @@
 <launch>
 
   <arg name="node_name"        default="human_detection"/>
+  <arg name="camera_in_topic"  default="/camera/color/image_raw"/>
+  <arg name="image_out_topic"  default="/$(arg node_name)/detection_image"/>
+  <arg name="bbox_out_topic"   default="/$(arg node_name)/bounding_box"/>
+
   <arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/>
   <arg name="output"           default="screen"/>
 
@@ -11,6 +15,9 @@
         type="human_detector.py"
         output="$(arg output)"
         args="--venv $(arg virtual_env_path)">
+    <remap from="~camera/color/image_raw" to="$(arg camera_in_topic)"/>
+    <remap from="~detection_image"        to="$(arg image_out_topic)"/>
+    <remap from="~bounding_box"           to="$(arg bbox_out_topic)"/>
   </node>
 
 </launch>
diff --git a/human_detection/launch/test_realsense.launch b/human_detection/launch/test_realsense.launch
index 7afacf9f0151f4ff52619aa1e834934a2dd20b4d..793511516bcf71bcd06f97f06d7005ab3499d74d 100644
--- a/human_detection/launch/test_realsense.launch
+++ b/human_detection/launch/test_realsense.launch
@@ -8,13 +8,11 @@
   </include>
 
   <include file="$(find human_detection)/launch/node.launch">
-    <arg name="node_name"        value="human_detection"/>
     <arg name="virtual_env_path" value="$(arg virtual_env_path)"/>
     <arg name="output"           value="$(arg output)"/>
   </include>
 
   <include file="$(find get_obj_dist)/launch/node.launch">
-    <arg name="node_name"        value="object_to_distance"/>
     <arg name="output"           value="$(arg output)"/>
   </include>
 
diff --git a/human_detection/rviz/test.rviz b/human_detection/rviz/test.rviz
index 5da8adfccee24438c6e640f2c41627890205e9af..10b7d63fe9930952d633650f3940c0733e710f14 100644
--- a/human_detection/rviz/test.rviz
+++ b/human_detection/rviz/test.rviz
@@ -116,7 +116,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/Image
       Enabled: true
-      Image Topic: /human_detected_image/image
+      Image Topic: /human_detection/detection_image
       Max Value: 1
       Median window: 5
       Min Value: 0
@@ -128,7 +128,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/Image
       Enabled: true
-      Image Topic: /obj_to_dist/human_distance
+      Image Topic: /person_bbox_to_distance/detection_distance_image
       Max Value: 1
       Median window: 5
       Min Value: 0
@@ -140,7 +140,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/MarkerArray
       Enabled: true
-      Marker Topic: /obj_to_dist/show_people_marker_array
+      Marker Topic: /person_bbox_to_distance/people_marker_array
       Name: MarkerArray
       Namespaces:
         "": true
@@ -189,10 +189,10 @@ Visualization Manager:
       Invert Z Axis: false
       Name: Current View
       Near Clip Distance: 0.009999999776482582
-      Pitch: 1.5697963237762451
+      Pitch: 1.3447965383529663
       Target Frame: <Fixed Frame>
       Value: Orbit (rviz)
-      Yaw: 3.145402669906616
+      Yaw: 3.165403366088867
     Saved: ~
 Window Geometry:
   DetectionDistanceImage:
diff --git a/human_detection/src/human_detector.py b/human_detection/src/human_detector.py
index ccefd91c7b15d2bc67a6240e73c6f8ca0ba74e24..992eb813739ef7c4089ea687c28308d30d207be7 100755
--- a/human_detection/src/human_detector.py
+++ b/human_detection/src/human_detector.py
@@ -62,7 +62,7 @@ detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
 detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
 detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
 
-# Number of objects detected
+  # Number of objects detected
 num_detections = detection_graph.get_tensor_by_name('num_detections:0')
 
 
@@ -70,17 +70,18 @@ num_detections = detection_graph.get_tensor_by_name('num_detections:0')
 class human_detector:
     def __init__(self):
         self.bridge = CvBridge()
-        self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.callback)
+        self.image_sub = rospy.Subscriber("~camera/color/image_raw", Image, self.callback)
 
-        self.image_pub = rospy.Publisher("/human_detected_image/image", Image, queue_size=10)
-        self.bbx_pub = rospy.Publisher("/human_detected_image/bounding_box", box_list, queue_size=10)
+        self.image_pub = rospy.Publisher("~detection_image", Image, queue_size=10)
+        self.bbx_pub = rospy.Publisher("~bounding_box", box_list, queue_size=10)
 
     def callback(self, data):
         t0 = time.time()
         try:
             cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
         except CvBridgeError as e:
-            print(e)
+            rospy.logerr("CvBridgeError: " +e)
+            #print(e)
             return
 
         frame_expanded = np.expand_dims(cv_image, axis=0)
@@ -155,7 +156,8 @@ class human_detector:
             cvt_msg.header.stamp = data.header.stamp
             self.image_pub.publish(cvt_msg)
         except CvBridgeError as e:
-            print(e)
+            rospy.logerr("CvBridgeError: " + e)
+            #print(e)
 
         # Append Accuracy
         # if scores[0][0] > 0.01:
@@ -163,13 +165,15 @@ class human_detector:
 
 
 def main():
-    _ = human_detector()
     rospy.init_node('human_detector', anonymous=True)
+    _ = human_detector()
     rospy.spin()
     # graph_average_percentage(prediction_level_list)
 
 
 if __name__ == '__main__':
-    print('Current Tensorflow Version: ' + str(tf.__version__))
-    rospy.set_param('use_sim_time', 'True')
+    #print('Current Tensorflow Version: ' + str(tf.__version__))
+    str = "Current Tensorflow Version: " + str(tf.__version__)
+    rospy.loginfo(str)
+    #rospy.set_param('use_sim_time', 'True')
     main()