diff --git a/get_obj_dist/launch/node.launch b/get_obj_dist/launch/node.launch
index 2574542f9af88b31ef1c759b742e77049262513a..d8fc353993517010bc28114593509d62066f62d6 100644
--- a/get_obj_dist/launch/node.launch
+++ b/get_obj_dist/launch/node.launch
@@ -2,28 +2,36 @@
 <!-- -->
 <launch>
 
-  <arg name="node_name"        default="human_bbox_to_distance"/>
-  <arg name="camera_in_topic"  default="/camera/color/image_raw"/>
+  <arg name="camera_in_topic"      default="/camera/color/image_raw"/>
+  <arg name="depth_image_in"       default="/camera/aligned_depth_to_color/image_raw"/>
 
-  <arg name="bbox_in_topic"      default="/human_detection/bounding_box"/>
-  <arg name="detection_image_in" default="/human_detection/detection_image"/>
-  <arg name="depth_image_in"     default="/camera/aligned_depth_to_color/image_raw"/>
-  <arg name="detection_distance_image_out" default="/$(arg node_name)/detection_distance_image"/>
+  <arg name="node_name"        default="human_tensorflow_detection_2d_to_3d"/>
+  <arg name="bbox_in_topic"        default="/human_tensorflow_detection/bounding_box"/>
+  <arg name="detection_image_in"   default="/human_tensorflow_detection/image"/>
+  <arg name="detection2d_array_in" default="/human_tensorflow_detection/detection2d_array"/>
+
+  <arg name="detection_distance_image_out" default="/$(arg node_name)/image"/>
   <arg name="people_marker_array_out"      default="/$(arg node_name)/people_marker_array"/>
   <arg name="output"             default="screen"/>
+  <arg name="required"           default="true"/>
 
   <node pkg="get_obj_dist"
         name="$(arg node_name)"
-        type="object_to_distance.py"
+        type="human_2d_to_3d.py"
         output="$(arg output)"
+        required="$(arg required)"
         args="">
     <remap from="~camera/color/image_raw" to="$(arg camera_in_topic)"/>
-    <remap from="~detection_image_in"        to="$(arg detection_image_in)"/>
-    <remap from="~bounding_box_in"           to="$(arg bbox_in_topic)"/>
-    <remap from="~depth_image_in"           to="$(arg depth_image_in)"/>
-    <remap from="~depth_image_in"           to="$(arg depth_image_in)"/>
+    <remap from="~detection_image_in"     to="$(arg detection_image_in)"/>
+    <remap from="~bounding_box_in"        to="$(arg bbox_in_topic)"/>
+    <remap from="~detection2d_array"      to="$(arg detection2d_array_in)"/>
+    <remap from="~depth_image_in"         to="$(arg depth_image_in)"/>
     <remap from="~detection_distance_image_out"  to="$(arg detection_distance_image_out)"/>
     <remap from="~people_marker_array_out"       to="$(arg people_marker_array_out)"/>
+
+
+
+    /human_bbox_to_distance/detection2d_array
   </node>
 
 </launch>
diff --git a/get_obj_dist/src/object_to_distance.py b/get_obj_dist/src/human_2d_to_3d.py
similarity index 65%
rename from get_obj_dist/src/object_to_distance.py
rename to get_obj_dist/src/human_2d_to_3d.py
index 97c62d9a89d03156280f7a8bcd05ba88c3ad4588..c533f72ad292b1d847ad85c4a79def4bd0761e0f 100755
--- a/get_obj_dist/src/object_to_distance.py
+++ b/get_obj_dist/src/human_2d_to_3d.py
@@ -12,6 +12,7 @@ from human_detection.msg import box_list
 from utils import *
 
 from spencer_tracking_msgs.msg import DetectedPersons, DetectedPerson
+from vision_msgs.msg import Detection2DArray
 
 text_position = (10, 50)
 font = cv2.FONT_HERSHEY_SIMPLEX
@@ -37,20 +38,34 @@ class obj_dist:
         self.detections_pub = rospy.Publisher('~detected_persons', DetectedPersons, queue_size=1)
 
         # Initialize subscribers
+        self.det2d_sub = message_filters.Subscriber('~detection2d_array', Detection2DArray)
         self.bbx_sub = message_filters.Subscriber('~bounding_box_in', box_list)
         self.human_image_sub = message_filters.Subscriber('~detection_image_in', Image)
         self.depth_image_sub = message_filters.Subscriber('~depth_image_in', Image)
         # self.depth_image_sub = message_filters.Subscriber('/camera/depth/image_rect_raw', Image)
 
+
+        """ from previous subscription to bounding_box, missing confidence, replaced with callback2
         ts = message_filters.ApproximateTimeSynchronizer([self.bbx_sub, self.human_image_sub,
+                                                          #self.depth_image_sub],
+                                                         #queue_size=10,
+                                                         #slop=1)
+        ts.registerCallback(self.callback)
+        """
+
+        ts2 = message_filters.ApproximateTimeSynchronizer([self.det2d_sub, self.human_image_sub,
                                                           self.depth_image_sub],
                                                          queue_size=10,
                                                          slop=1)
-        ts.registerCallback(self.callback)
+        ts2.registerCallback(self.callback2)
 
+    """ from previous subscription to bounding_box, missing confidence, replaced with callback2
     def callback(self, bbx, image, depth):
 
+
         detections = DetectedPersons()
+
+
         detections.header = depth.header
 
         #print('working lol')
@@ -78,7 +93,40 @@ class obj_dist:
 
         self.marker_pub.publish(self.marker_array)
         self.detections_pub.publish(detections)
+    """
+
+    def callback2(self, det2d, image, depth):
+
+        detections = DetectedPersons()
+        detections.header = depth.header
+
+        #print('working lol')
+        if len(det2d.detections) > 0:
+            cv_depth = self.bridge.imgmsg_to_cv2(depth, 'passthrough')
+            cv_image = self.bridge.imgmsg_to_cv2(image, 'bgr8')
+
+            for i in range(0, 5):
+                if i <= len(det2d.detections) - 1:
+                    cood = self.get_human_distance2(cv_depth, cv_image, det2d.detections[i], i)
+                    self.set_model_coordinates(cood, i)
+
+                    det = DetectedPerson()
+                    det.confidence = det2d.detections[i].results[0].score
+                    det.pose.pose.position.x = cood[0]
+                    det.pose.pose.position.y = cood[1]
+                    det.pose.pose.position.z = cood[2]
+                    det.pose.pose.orientation.w=1.0
+                    det.modality = "rgbd"
+
+                    detections.detections.append(det)
+
+                else:
+                    self.set_model_coordinates((-1, -1, -1), i)
+
+        self.marker_pub.publish(self.marker_array)
+        self.detections_pub.publish(detections)
 
+    """ from previous subscription to bounding_box, missing confidence, replaced with callback2
     def get_human_distance(self, cv_depth, cv_image, box, person_id):
         roi_depth = cv_depth[box.ymin:box.ymax, box.xmin:box.xmax]
         # filtered_depth, _size = filter_background(roi_depth)
@@ -98,6 +146,35 @@ class obj_dist:
                         line_type)
             self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image))
 
+            # TODO: Add dynamic find floor
+            return x_meter, 0.0, current_avg
+        else:
+            return -1, -1, -1
+    """
+
+    def get_human_distance2(self, cv_depth, cv_image, det, person_id):
+        ymin = int(det.bbox.center.y - det.bbox.size_y/2)
+        ymax = int(det.bbox.center.y + det.bbox.size_y/2)
+        xmin = int(det.bbox.center.x - det.bbox.size_x/2)
+        xmax = int(det.bbox.center.x + det.bbox.size_x/2)
+        roi_depth = cv_depth[ymin:ymax, xmin:xmax]
+        # filtered_depth, _size = filter_background(roi_depth)
+        filtered_depth, _size = dynamic_background(roi_depth)
+        if _size:
+            self.moving_average[person_id].pop()
+            self.moving_average[person_id].insert(0, filtered_depth.sum() / _size / 1000.0)
+
+            current_avg = sum(self.moving_average[person_id]) / size_of_moving_avg
+
+            x_meter = get_x_in_meters(xmin, xmax, current_avg)
+            cv2.putText(cv_image, '{} meters / Person {}'.format(round(current_avg, 2), person_id),
+                        (xmin, ymax - 100),
+                        font,
+                        font_scale,
+                        font_color,
+                        line_type)
+            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image))
+
             # TODO: Add dynamic find floor
             return x_meter, 0.0, current_avg
         else:
diff --git a/human_detection/launch/node.launch b/human_detection/launch/node.launch
index efdbeb6dfb977ac9d5f786ef49ee155aef1e5447..60a848e9d65dbed62a4c6ccf6ee2fd74f9b98528 100644
--- a/human_detection/launch/node.launch
+++ b/human_detection/launch/node.launch
@@ -2,17 +2,19 @@
 <!-- -->
 <launch>
 
-  <arg name="node_name"        default="human_detection"/>
   <arg name="camera_in_topic"  default="/camera/color/image_raw"/>
-  <arg name="image_out_topic"  default="/$(arg node_name)/detection_image"/>
-  <arg name="bbox_out_topic"   default="/$(arg node_name)/bounding_box"/>
 
+  <arg name="node_name"        default="human_tensorflow_detection"/>
+  <arg name="image_out_topic"  default="/$(arg node_name)/image"/>
+  <arg name="bbox_out_topic"   default="/$(arg node_name)/bounding_box"/>
   <arg name="virtual_env_path" default="$(env HOME)/iri-lab/virtualenv/raadical_venv"/>
   <arg name="output"           default="screen"/>
+  <arg name="required"         default="true"/>
 
   <node pkg="human_detection"
         name="$(arg node_name)"
         type="human_detector.py"
+        required="$(arg required)"
         output="$(arg output)"
         args="--venv $(arg virtual_env_path)">
     <remap from="~camera/color/image_raw" to="$(arg camera_in_topic)"/>
diff --git a/human_detection/rviz/test.rviz b/human_detection/rviz/test.rviz
index 0b5f0eb93df498eaf831f92a92bfbb0ac3d114e9..25f9bfe395b26c4327e7dc6947f39137fa402869 100644
--- a/human_detection/rviz/test.rviz
+++ b/human_detection/rviz/test.rviz
@@ -6,8 +6,6 @@ Panels:
       Expanded:
         - /Global Options1
         - /Status1
-        - /DetectionDistanceImage1
-        - /MarkerArray1
       Splitter Ratio: 0.5
     Tree Height: 617
   - Class: rviz/Selection
@@ -118,7 +116,7 @@ Visualization Manager:
       Value: true
     - Class: rviz/Image
       Enabled: true
-      Image Topic: /human_detection/detection_image
+      Image Topic: /human_tensorflow_detection/image
       Max Value: 1
       Median window: 5
       Min Value: 0
@@ -130,11 +128,11 @@ Visualization Manager:
       Value: true
     - Class: rviz/Image
       Enabled: true
-      Image Topic: /human_bbox_to_distance/detection_distance_image
+      Image Topic: /human_tensorflow_detection_2d_to_3d/image
       Max Value: 1
       Median window: 5
       Min Value: 0
-      Name: DetectionDistanceImage
+      Name: DistanceImage
       Normalize Range: true
       Queue Size: 2
       Transport Hint: raw
@@ -142,8 +140,8 @@ Visualization Manager:
       Value: true
     - Class: rviz/MarkerArray
       Enabled: true
-      Marker Topic: /human_bbox_to_distance/people_marker_array
-      Name: MarkerArray
+      Marker Topic: /human_tensorflow_detection_2d_to_3d/people_marker_array
+      Name: DistanceMarker
       Namespaces:
         "": true
       Queue Size: 100
@@ -197,16 +195,16 @@ Visualization Manager:
       Yaw: 3.165403366088867
     Saved: ~
 Window Geometry:
-  DetectionDistanceImage:
-    collapsed: false
   DetectionImage:
     collapsed: false
   Displays:
     collapsed: false
+  DistanceImage:
+    collapsed: false
   Height: 846
   Hide Left Dock: false
   Hide Right Dock: false
-  QMainWindow State: 000000ff00000000fd000000040000000000000156000002f4fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000002f4000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001b3000002f4fc0200000005fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001c0044006500740065006300740069006f006e0049006d006100670065010000003d000001780000001600fffffffb0000002c0044006500740065006300740069006f006e00440069007300740061006e006300650049006d00610067006501000001bb000001760000001600fffffffb0000000a0056006900650077007300000000ff000001ee000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000006510000003efc0100000002fb0000000800540069006d0065000000000000000651000004f300fffffffb0000000800540069006d006501000000000000045000000000000000000000033c000002f400000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+  QMainWindow State: 000000ff00000000fd000000040000000000000156000002f4fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000002f4000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001b3000002f4fc0200000005fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001c0044006500740065006300740069006f006e0049006d006100670065010000003d000001780000001600fffffffb0000001a00440069007300740061006e006300650049006d00610067006501000001bb000001760000001600fffffffb0000000a0056006900650077007300000000ff000001ee000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000006510000003efc0100000002fb0000000800540069006d0065000000000000000651000004f300fffffffb0000000800540069006d006501000000000000045000000000000000000000033c000002f400000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
   Selection:
     collapsed: false
   Time:
diff --git a/human_detection/src/human_detector.py b/human_detection/src/human_detector.py
index 6895e5852eafa09e8dd6101e5de8d2db6ca05cb8..2f3c201ebc49af40de2167bb3ae445614a144105 100755
--- a/human_detection/src/human_detector.py
+++ b/human_detection/src/human_detector.py
@@ -71,6 +71,10 @@ num_detections = detection_graph.get_tensor_by_name('num_detections:0')
 # time_origin = time.time()
 class human_detector:
     def __init__(self):
+
+        text = "Current Tensorflow Version: " + str(tf.__version__)
+        rospy.loginfo(rospy.get_name() + ": " + text)
+
         self.bridge = CvBridge()
         self.image_sub = rospy.Subscriber("~camera/color/image_raw", Image, self.callback)
 
@@ -83,7 +87,7 @@ class human_detector:
         try:
             cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
         except CvBridgeError as e:
-            rospy.logerr("CvBridgeError: " +e)
+            rospy.logerr(rospy.get_name() + ": CvBridgeError: " +e)
             #print(e)
             return
 
@@ -178,7 +182,7 @@ class human_detector:
             cvt_msg.header.stamp = data.header.stamp
             self.image_pub.publish(cvt_msg)
         except CvBridgeError as e:
-            rospy.logerr("CvBridgeError: " + e)
+            rospy.logerr(rospy.get_name() + ": CvBridgeError: " + e)
             #print(e)
 
         # Append Accuracy
@@ -195,7 +199,6 @@ def main():
 
 if __name__ == '__main__':
     #print('Current Tensorflow Version: ' + str(tf.__version__))
-    str = "Current Tensorflow Version: " + str(tf.__version__)
-    rospy.loginfo(str)
+
     #rospy.set_param('use_sim_time', 'True')
     main()