Post Go back to editing

How to increase performance of AD-FXTOF1-EBZ on Raspberry Pi 3

Category: Software
Product Number: AD-FXTOF1-EBZ

I am working on a Raspberry Pi 3 model B v1.2 and i'm using the FXTOF1 camera to publish the depth images on a ros topic, I am using the latest sd card image and the Melodic ROS distribution. I followed all the installation guides on the official repository and I have built the opencv and ros packages too. Earlier I had problems with the source original ros camera_node so I replaced it with my own code (below) and it's working just as intended.

I'd like to eventually use this setup with a turtlebot to navigate the environment but the ROS node publishes with a frequency of 6Hz which isn't exactly a lot; is the low frequency normal on a raspberry? Is there any way to increase the performance wihout changing the setup? Keep in mind that I will use the pointcloud to navigate and I still have to figure out a method that can convert the depth matrix to a point cloud.

I have already tried to resize the frame and it did boost the frequency from 4Hz to 6Hz but i think I messed up the numbers because the frames are warped. If you need any further information just ask.

Sorry in advance but I am a student and new to robotics

#include <aditof/camera.h>
#include <aditof/depth_sensor_interface.h>
#include <aditof/frame.h>
#include <aditof/system.h>
#ifndef JS_BINDINGS
#include <glog/logging.h>
#else
#include <aditof/log_cout.h>
#endif

#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#ifdef OPENCV2
#include <opencv2/contrib/contrib.hpp>
#endif

#include "../../../opencv/aditof_opencv.h"

//added ros lib
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <iostream>

using namespace aditof;

int main(int argc, char *argv[]) {

#ifndef JS_BINDINGS
google::InitGoogleLogging(argv[0]);
FLAGS_alsologtostderr = 1;
#endif

Status status = Status::OK;

System system;

std::vector<std::shared_ptr<Camera>> cameras;
system.getCameraList(cameras);
if (cameras.empty()) {
LOG(WARNING) << "No cameras found!";
return 0;
}

auto camera = cameras.front();
status = camera->initialize();
if (status != Status::OK) {
LOG(ERROR) << "Could not initialize camera!";
return 0;
}

std::vector<std::string> frameTypes;
camera->getAvailableFrameTypes(frameTypes);
if (frameTypes.empty()) {
LOG(ERROR) << "No frame type available!";
return 0;
}

status = camera->setFrameType(frameTypes.front());
if (status != Status::OK) {
LOG(ERROR) << "Could not set camera frame type!";
return 0;
}

std::vector<std::string> modes;
camera->getAvailableModes(modes);
if (modes.empty()) {
LOG(ERROR) << "No camera modes available!";
return 0;
}

status = camera->setMode(modes[0]);
if (status != Status::OK) {
LOG(ERROR) << "Could not set camera mode!";
return 0;
}

aditof::CameraDetails cameraDetails;
camera->getDetails(cameraDetails);
int cameraRange = cameraDetails.depthParameters.maxDepth;
aditof::Frame frame;


aditof::FrameDetails myframe;

const int smallSignalThreshold = 50;
camera->setControl("noise_reduction_threshold",
std::to_string(smallSignalThreshold));

//cv::namedWindow("Display Image", cv::WINDOW_AUTOSIZE);


//added ros init
ros::init(argc, argv, "image_publisher");
ros::NodeHandle nh;
image_transport::ImageTransport it(nh);
image_transport::Publisher pub = it.advertise("camera/image", 1);
ros::Rate loop_rate(30);


while (nh.ok()) {
/* Request frame from camera */
status = camera->requestFrame(&frame);
if (status != Status::OK) {
LOG(ERROR) << "Could not request frame!";
return 0;
}

//frame resizing
status = frame.setDetails(myframe = (FrameDetails) {
400, // width
300, // heigth
400, // The width of the actual full frame
300, // heigth.2 vedi sopra
400, // rgbwidth
300, // rgbheigth
});
if (status != Status::OK) {
std::cout << "My error frame resizing";
return 0;
}


/* Convert from frame to depth mat */
cv::Mat mat;
status = fromFrameToDepthMat(frame, mat);
if (status != Status::OK) {
LOG(ERROR) << "Could not convert from frame to mat!";
return 0;
}

/* Distance factor */
double distance_scale = 255.0 / cameraRange;

/* Convert from raw values to values that opencv can understand */
mat.convertTo(mat, CV_8U, distance_scale);

/* Apply a rainbow color map to the mat to better visualize the
* depth data */
applyColorMap(mat, mat, cv::COLORMAP_RAINBOW);


sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", mat).toImageMsg();
pub.publish(msg);
ros::spinOnce();
loop_rate.sleep();

}

return 0;

}