#include "veyeimx287m.h" #include #include #include #include #include #include #include #include #include #include #include // orpheus #include "camera/veye_i2c.h" #include "constants.h" #include "protocols/httpserver.h" #include "veyeimx287m_types.h" static const struct v4l2_format_info { const char *name; unsigned int fourcc; unsigned char n_planes; } pixel_formats[] = { {"RGB332", V4L2_PIX_FMT_RGB332, 1}, {"RGB444", V4L2_PIX_FMT_RGB444, 1}, {"ARGB444", V4L2_PIX_FMT_ARGB444, 1}, {"XRGB444", V4L2_PIX_FMT_XRGB444, 1}, {"RGB555", V4L2_PIX_FMT_RGB555, 1}, {"ARGB555", V4L2_PIX_FMT_ARGB555, 1}, {"XRGB555", V4L2_PIX_FMT_XRGB555, 1}, {"RGB565", V4L2_PIX_FMT_RGB565, 1}, {"RGB555X", V4L2_PIX_FMT_RGB555X, 1}, {"RGB565X", V4L2_PIX_FMT_RGB565X, 1}, {"BGR666", V4L2_PIX_FMT_BGR666, 1}, {"BGR24", V4L2_PIX_FMT_BGR24, 1}, {"RGB24", V4L2_PIX_FMT_RGB24, 1}, {"BGR32", V4L2_PIX_FMT_BGR32, 1}, {"ABGR32", V4L2_PIX_FMT_ABGR32, 1}, {"XBGR32", V4L2_PIX_FMT_XBGR32, 1}, {"RGB32", V4L2_PIX_FMT_RGB32, 1}, {"ARGB32", V4L2_PIX_FMT_ARGB32, 1}, {"XRGB32", V4L2_PIX_FMT_XRGB32, 1}, {"HSV24", V4L2_PIX_FMT_HSV24, 1}, {"HSV32", V4L2_PIX_FMT_HSV32, 1}, {"Y8", V4L2_PIX_FMT_GREY, 1}, {"Y10", V4L2_PIX_FMT_Y10, 1}, {"Y12", V4L2_PIX_FMT_Y12, 1}, {"Y16", V4L2_PIX_FMT_Y16, 1}, {"UYVY", V4L2_PIX_FMT_UYVY, 1}, {"VYUY", V4L2_PIX_FMT_VYUY, 1}, {"YUYV", V4L2_PIX_FMT_YUYV, 1}, {"YVYU", V4L2_PIX_FMT_YVYU, 1}, {"YUV32", V4L2_PIX_FMT_YUV32, 1}, {"AYUV32", V4L2_PIX_FMT_AYUV32, 1}, {"XYUV32", V4L2_PIX_FMT_XYUV32, 1}, {"VUYA32", V4L2_PIX_FMT_VUYA32, 1}, {"VUYX32", V4L2_PIX_FMT_VUYX32, 1}, {"YUVA32", V4L2_PIX_FMT_YUVA32, 1}, {"YUVX32", V4L2_PIX_FMT_YUVX32, 1}, {"NV12", V4L2_PIX_FMT_NV12, 1}, {"NV12M", V4L2_PIX_FMT_NV12M, 2}, {"NV21", V4L2_PIX_FMT_NV21, 1}, {"NV21M", V4L2_PIX_FMT_NV21M, 2}, {"NV16", V4L2_PIX_FMT_NV16, 1}, {"NV16M", V4L2_PIX_FMT_NV16M, 2}, {"NV61", V4L2_PIX_FMT_NV61, 1}, {"NV61M", V4L2_PIX_FMT_NV61M, 2}, {"NV24", V4L2_PIX_FMT_NV24, 1}, {"NV42", V4L2_PIX_FMT_NV42, 1}, {"YUV420M", V4L2_PIX_FMT_YUV420M, 3}, {"YUV422M", V4L2_PIX_FMT_YUV422M, 3}, {"YUV444M", V4L2_PIX_FMT_YUV444M, 3}, {"YVU420M", V4L2_PIX_FMT_YVU420M, 3}, {"YVU422M", V4L2_PIX_FMT_YVU422M, 3}, {"YVU444M", V4L2_PIX_FMT_YVU444M, 3}, {"SBGGR8", V4L2_PIX_FMT_SBGGR8, 1}, {"SGBRG8", V4L2_PIX_FMT_SGBRG8, 1}, {"SGRBG8", V4L2_PIX_FMT_SGRBG8, 1}, {"SRGGB8", V4L2_PIX_FMT_SRGGB8, 1}, {"SBGGR10_DPCM8", V4L2_PIX_FMT_SBGGR10DPCM8, 1}, {"SGBRG10_DPCM8", V4L2_PIX_FMT_SGBRG10DPCM8, 1}, {"SGRBG10_DPCM8", V4L2_PIX_FMT_SGRBG10DPCM8, 1}, {"SRGGB10_DPCM8", V4L2_PIX_FMT_SRGGB10DPCM8, 1}, {"SBGGR10", V4L2_PIX_FMT_SBGGR10, 1}, {"SGBRG10", V4L2_PIX_FMT_SGBRG10, 1}, {"SGRBG10", V4L2_PIX_FMT_SGRBG10, 1}, {"SRGGB10", V4L2_PIX_FMT_SRGGB10, 1}, {"SBGGR10P", V4L2_PIX_FMT_SBGGR10P, 1}, {"SGBRG10P", V4L2_PIX_FMT_SGBRG10P, 1}, {"SGRBG10P", V4L2_PIX_FMT_SGRBG10P, 1}, {"SRGGB10P", V4L2_PIX_FMT_SRGGB10P, 1}, {"SBGGR12", V4L2_PIX_FMT_SBGGR12, 1}, {"SGBRG12", V4L2_PIX_FMT_SGBRG12, 1}, {"SGRBG12", V4L2_PIX_FMT_SGRBG12, 1}, {"SRGGB12", V4L2_PIX_FMT_SRGGB12, 1}, {"SBGGR16", V4L2_PIX_FMT_SBGGR16, 1}, {"SGBRG16", V4L2_PIX_FMT_SGBRG16, 1}, {"SGRBG16", V4L2_PIX_FMT_SGRBG16, 1}, {"SRGGB16", V4L2_PIX_FMT_SRGGB16, 1}, {"IPU3_SBGGR10", V4L2_PIX_FMT_IPU3_SBGGR10, 1}, {"IPU3_SGBRG10", V4L2_PIX_FMT_IPU3_SGBRG10, 1}, {"IPU3_SGRBG10", V4L2_PIX_FMT_IPU3_SGRBG10, 1}, {"IPU3_SRGGB10", V4L2_PIX_FMT_IPU3_SRGGB10, 1}, {"IPU3_Y10", V4L2_PIX_FMT_IPU3_Y10, 1}, {"DV", V4L2_PIX_FMT_DV, 1}, {"MJPEG", V4L2_PIX_FMT_MJPEG, 1}, {"MPEG", V4L2_PIX_FMT_MPEG, 1}, }; static const struct { const char *name; enum v4l2_field field; } fields[] = { {"any", V4L2_FIELD_ANY}, {"none", V4L2_FIELD_NONE}, {"top", V4L2_FIELD_TOP}, {"bottom", V4L2_FIELD_BOTTOM}, {"interlaced", V4L2_FIELD_INTERLACED}, {"seq-tb", V4L2_FIELD_SEQ_TB}, {"seq-bt", V4L2_FIELD_SEQ_BT}, {"alternate", V4L2_FIELD_ALTERNATE}, {"interlaced-tb", V4L2_FIELD_INTERLACED_TB}, {"interlaced-bt", V4L2_FIELD_INTERLACED_BT}, }; #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) static const char *v4l2_field_name(enum v4l2_field field) { unsigned int i; for (i = 0; i < ARRAY_SIZE(fields); ++i) { if (fields[i].field == field) return fields[i].name; } return "unknown"; } static const struct v4l2_format_info *v4l2_format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(pixel_formats); ++i) { if (pixel_formats[i].fourcc == fourcc) return &pixel_formats[i]; } return NULL; } static const char *v4l2_format_name(unsigned int fourcc) { const struct v4l2_format_info *info; static char name[5]; unsigned int i; info = v4l2_format_by_fourcc(fourcc); if (info) return info->name; for (i = 0; i < 4; ++i) { name[i] = fourcc & 0xff; fourcc >>= 8; } name[4] = '\0'; return name; } #define LOGD(...) \ do { \ printf(__VA_ARGS__); \ printf("\n"); \ } while (0) #define DBG(fmt, args...) LOGD("%s:%d, " fmt, __FUNCTION__, __LINE__, ##args); extern uint64_t dq_elapsed_ns; extern uint64_t get_elapsed_ns; extern uint64_t sum_elapsed_ns; extern uint64_t corr_elapsed_ns; extern uint64_t max_elapsed_ns; extern uint64_t value_elapsed_ns; extern uint64_t rot_elapsed_ns; extern uint64_t pix_elapsed_ns; extern uint64_t dropped_count; // constexpr char videoDevice[] = "/dev/video0"; VeyeIMX287m::VeyeIMX287m() {} VeyeIMX287m::~VeyeIMX287m() { for (auto &t : m_getThreads) { t.request_stop(); t.join(); } m_streamThread.request_stop(); m_streamThread.join(); // int buffer_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; const auto radxa_buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; // if (ioctl(m_cam_fd, VIDIOC_STREAMOFF, &buffer_type) == -1) { if (ioctl(m_cam_fd, VIDIOC_STREAMOFF, &radxa_buf_type) == -1) { std::cout << "cannot stop stream" << std::endl; } for (const auto &buffer : m_rawBuffers) { if (munmap(buffer.mem, radxa_raw_img_size) < 0) { DBG("Munmap failed!!."); } } if (m_cam_fd >= 0) { if (close(m_cam_fd) == -1) { std::cout << __func__ << ": cannot close camera: " << strerror(errno) << std::endl; } }; std::cout << "camera closed" << std::endl; } std::vector > VeyeIMX287m::search() { // FIXME: use saved params, get rid of hardcode // return only one camera for now const auto cam = std::make_shared(); if (!cam->init()) return {}; // if (!cam->set_autoExposure(false)) if (!cam->set_autoExposure(true)) return {}; if (!cam->set_exposureTime(std::chrono::microseconds(30))) return {}; if (!cam->set_autoGain(false)) return {}; if (!cam->set_gain(0.1)) return {}; return {cam}; } bool VeyeIMX287m::startStream() { constexpr auto radxa_buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; const auto ret = ioctl(m_cam_fd, VIDIOC_STREAMON, &radxa_buf_type); if (ret != 0) { std::cerr << "ioctl(VIDIOC_STREAMON) failed: " << errno << " (" << strerror(errno) << ")" << std::endl; return false; } for (auto &t : m_getThreads) { t = std::jthread{&VeyeIMX287m::getFramesLoop, this}; } std::cout << __func__ << " - OK" << std::endl; return true; } bool VeyeIMX287m::init() { if (!openCam()) return false; if (!initCam()) return false; if (!initI2C()) { return false; } // if (!initHttpServer()) // return false; return true; } bool VeyeIMX287m::set_autoExposure(const bool enable) { using namespace veye::imx287m; const uint32_t value = static_cast(enable ? ExposureMode::AutoExposureContinious : ExposureMode::Manual); return m_i2c->write(static_cast(Register::Exposure_Mode), value); } std::optional VeyeIMX287m::get_autoExposure() { using namespace veye::imx287m; const auto value = m_i2c->read(static_cast(Register::Exposure_Mode)); if (!value) { return {}; } return *value == static_cast(ExposureMode::AutoExposureContinious); } bool VeyeIMX287m::set_autoGain(const bool enable) { using namespace veye::imx287m; const uint32_t value = static_cast(enable ? GainMode::AutoGainContinious : GainMode::Manual); return m_i2c->write(static_cast(Register::Gain_Mode), value); } std::optional VeyeIMX287m::get_autoGain() { using namespace veye::imx287m; const auto value = m_i2c->read(static_cast(Register::Gain_Mode)); if (!value) { return {}; } return *value == static_cast(GainMode::AutoGainContinious); } bool VeyeIMX287m::set_exposureTime(const std::chrono::microseconds us) { using namespace veye::imx287m; return m_i2c->write(static_cast(Register::ME_Time), us.count()); } std::optional VeyeIMX287m::get_exposureTime() { using namespace veye::imx287m; const auto value = m_i2c->read(static_cast(Register::ME_Time)); if (!value) { return {}; } return std::chrono::microseconds{*value}; } bool VeyeIMX287m::set_gain(const float value) { using namespace veye::imx287m; return m_i2c->write(static_cast(Register::Manual_Gain), static_cast(value * 10)); } std::optional VeyeIMX287m::get_gain() { using namespace veye::imx287m; const auto value = m_i2c->read(static_cast(Register::Manual_Gain)); if (!value) { return {}; } return *value * 10; } bool VeyeIMX287m::openCam() { m_cam_fd = open(videoDevice, O_RDWR); if (m_cam_fd < 0) { fprintf(stderr, "cannot open cam '%s', error: '%s'\n", videoDevice, strerror(errno)); return false; } return true; } bool VeyeIMX287m::initCam() { int ret{-1}; constexpr bool radxa_zero_3et{true}; const auto radxa_buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; if constexpr (!radxa_zero_3et) { v4l2_format format; memset(&format, 0, sizeof(v4l2_format)); format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; format.fmt.pix.pixelformat = V4L2_PIX_FMT_GREY; format.fmt.pix.width = img_width; format.fmt.pix.height = img_height; ret = ioctl(m_cam_fd, VIDIOC_TRY_FMT, &format); if (ret < 0) { fprintf(stderr, "cannot try cam format: error - '%s'\n", strerror(errno)); return false; } // TODO: remove this? format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = ioctl(m_cam_fd, VIDIOC_S_FMT, &format); if (ret < 0) { fprintf(stderr, "cannot set cam format: error - '%s'\n", strerror(errno)); return false; } } v4l2_format fmt; memset(&fmt, 0, sizeof fmt); fmt.type = radxa_buf_type; if (ioctl(m_cam_fd, VIDIOC_G_FMT, &fmt) < 0) { printf("Unable to get format: %s (%d).\n", strerror(errno), errno); return false; } const int num_planes = fmt.fmt.pix_mp.num_planes; std::cout << "num_planes: " << num_planes << std::endl; if (num_planes != 1) { std::cerr << "multiple planes are not supported" << std::endl; return false; } printf("Video format: %s (%08x) %ux%u field %s, %u planes: \n", v4l2_format_name(fmt.fmt.pix_mp.pixelformat), fmt.fmt.pix_mp.pixelformat, fmt.fmt.pix_mp.width, fmt.fmt.pix_mp.height, v4l2_field_name((enum v4l2_field) fmt.fmt.pix_mp.field), fmt.fmt.pix_mp.num_planes); for (int i = 0; i < fmt.fmt.pix_mp.num_planes; i++) { printf(" * Stride %u, buffer size %u\n", fmt.fmt.pix_mp.plane_fmt[i].bytesperline, fmt.fmt.pix_mp.plane_fmt[i].sizeimage); fflush(stdout); } struct v4l2_requestbuffers rb; memset(&rb, 0, sizeof rb); rb.count = BUFFER_COUNT; if constexpr (radxa_zero_3et) { rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; } else { rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; } rb.memory = V4L2_MEMORY_MMAP; ret = ioctl(m_cam_fd, VIDIOC_REQBUFS, &rb); if (ret < 0) { fprintf(stderr, "cannot set cam request buffers: ioctl error - '%s'\n", strerror(errno)); return false; } if (rb.count < BUFFER_COUNT) { fprintf(stderr, "cannot set cam request buffers\n"); return false; } m_rawBuffers.resize(rb.count); std::cout << "query buffers" << std::endl; for (uint32_t i = 0; i < rb.count; i++) { std::cout << "-----------------------------------------------------" << std::endl; struct v4l2_buffer buf; struct v4l2_plane planes[VIDEO_MAX_PLANES]; memset(&buf, 0, sizeof buf); memset(planes, 0, sizeof planes); buf.index = i; buf.type = rb.type; buf.memory = V4L2_MEMORY_MMAP; buf.length = VIDEO_MAX_PLANES; buf.m.planes = planes; ret = ioctl(m_cam_fd, VIDIOC_QUERYBUF, &buf); if (ret < 0) { std::cerr << "ioctl(VIDIOC_QUERYBUF) failed: " << errno << " (" << strerror(errno) << ")" << std::endl; return false; } std::cout << "buffer.length: " << buf.length << std::endl; std::cout << "buffer.m.offset: " << buf.m.offset << std::endl; std::cout << "buffer.index: " << buf.index << " " << i << std::endl; const auto length = buf.m.planes[0].length; const auto offset = buf.m.planes[0].m.mem_offset; m_rawBuffers[i].mem = mmap(0, length, PROT_READ | PROT_WRITE, MAP_SHARED, m_cam_fd, offset); if (m_rawBuffers[i].mem == MAP_FAILED) { std::cerr << "mmap() failed: " << errno << " (" << strerror(errno) << ")" << std::endl; std::cerr << "length: " << length << std::endl; std::cerr << "offset: " << offset << std::endl; return false; } printf("Buffer mapped at address %p.\n", m_rawBuffers[i].mem); ret = ioctl(m_cam_fd, VIDIOC_QBUF, &buf); if (ret != 0) { std::cerr << "ioctl(VIDIOC_QBUF) failed: " << errno << " (" << strerror(errno) << ")" << std::endl; return false; } } fflush(stdout); fflush(stderr); return true; } bool VeyeIMX287m::initI2C() { m_i2c = std::make_shared(); return m_i2c != nullptr && m_i2c->open(); } void VeyeIMX287m::getFramesLoop(std::stop_token stopToken) { QElapsedTimer t; uint8_t threadIdx{0}; std::array, BUFFER_COUNT> futures; QThreadPool threadPool{}; threadPool.setMaxThreadCount(BUFFER_COUNT); while (!stopToken.stop_requested()) { size_t bufferIdx{std::numeric_limits::max()}; if (!dequeueImageBuffer(bufferIdx)) { continue; } const uint8_t i = threadIdx % futures.size(); futures[i].waitForFinished(); { t.start(); const auto &src = *(Image::radxa_data_t *) m_rawBuffers[bufferIdx].mem; // const auto image = std::make_shared(); // const auto &image = m_rawBuffers[i].image; auto &dst = m_rawBuffers[i].image->data; Image::copy(dst, src); // image->rotate(); // const auto pixels = image->sharedPixels(); get_elapsed_ns += t.nsecsElapsed(); // m_sync.rawSemQueue.enqueue(image); } futures[i] = QtConcurrent::run(&threadPool, [this, i]() { const auto image = m_rawBuffers[i].image; image->rotate(); const auto pixels = image->sharedPixels(); return i; }) /*.then(&threadPool, [this](const uint8_t i) { const auto image = m_rawBuffers[i].image; const auto pixels = image->sharedPixels(); return i; })*/ ; ++processedCounter; ++threadIdx; } } // TODO: check if some of buffers are being overritten during processing bool VeyeIMX287m::dequeueImageBuffer(size_t &imageIndex) { static struct timeval curr, prev; static uint16_t counter = 0; gettimeofday(&curr, NULL); double elapsedTime = (curr.tv_sec - prev.tv_sec) * 1000.0; // sec to ms elapsedTime += (curr.tv_usec - prev.tv_usec) / 1000.0; // us to ms // TODO: move this shit to some beautiful place if (elapsedTime > 1000. && processedCounter != 0) { fprintf(stderr, "fps: %d\tdropped: %lu sec: %ld " "dq: %lu get: %lu rot: %lu pix: %lu sum: %lu corr: " "%lu val: %lu\n", counter, dropped_count, curr.tv_sec % 1000, dq_elapsed_ns / 1000 / processedCounter, get_elapsed_ns / 1000 / processedCounter, rot_elapsed_ns / 1000 / processedCounter, pix_elapsed_ns / 1000 / processedCounter, sum_elapsed_ns / 1000 / processedCounter, corr_elapsed_ns / 1000 / processedCounter, // max_elapsed_ns / 1000 / processedCounter, value_elapsed_ns / 1000 / processedCounter); dq_elapsed_ns = 0; get_elapsed_ns = 0; sum_elapsed_ns = 0; corr_elapsed_ns = 0; max_elapsed_ns = 0; value_elapsed_ns = 0; rot_elapsed_ns = 0; pix_elapsed_ns = 0; dropped_count = 0; counter = 0; processedCounter = 0; prev = curr; } int ret; struct v4l2_buffer buf; struct v4l2_plane planes[VIDEO_MAX_PLANES]; memset(&buf, 0, sizeof(buf)); memset(planes, 0, sizeof planes); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; buf.memory = V4L2_MEMORY_MMAP; buf.length = VIDEO_MAX_PLANES; buf.m.planes = planes; static uint16_t requestIdx{0}; buf.index = requestIdx++ % BUFFER_COUNT; { { std::lock_guard lock(m_camMtx); QElapsedTimer t; t.start(); ret = ioctl(m_cam_fd, VIDIOC_DQBUF, &buf); ++counter; dq_elapsed_ns += t.nsecsElapsed(); } if (ret != 0) { std::cerr << "ioctl(VIDIOC_DQBUF) failed: " << errno << " (" << strerror(errno) << ")" << std::endl; return false; } if (buf.index < 0 || buf.index >= BUFFER_COUNT) { std::cerr << "invalid buffer index: " << buf.index << std::endl; return false; } } imageIndex = buf.index; // const auto &image = m_rawBuffers[buf.index].image; // image->height = img_height; // image->width = img_width; // // TODO: fill // // image.counters.encoderPosition = RotaryEncoder::instance()->position(); // image->counters.measurementCounter = buf.sequence; dropped_count += buf.sequence - m_previousFrameCounter.value_or(buf.sequence) - 1; m_previousFrameCounter = buf.sequence; // image->counters.timestampUs = buf.timestamp.tv_sec * 1000 * 1000 + buf.timestamp.tv_usec; { std::lock_guard lock(m_camMtx); ret = ioctl(m_cam_fd, VIDIOC_QBUF, &buf); } if (ret != 0) { std::cerr << "ioctl(VIDIOC_QBUF) failed: " << errno << " (" << strerror(errno) << ")" << std::endl; return false; } return true; } bool VeyeIMX287m::getImage(Image *image) { return false; // if (!image) { // std::cerr << __func__ << ": image is nullptr" << std::endl; // return false; // } // size_t bufferIdx{}; // if (!dequeueImageBuffer(bufferIdx)) { // return false; // } // // TODO: remove this bullshit. return ptr to image or copy image metainfo // // only, then copy data // // *image = std::move(m_images[bufferIdx]); // *image = std::move(*m_rawBuffers[bufferIdx].image); // { // QElapsedTimer t; // t.start(); // std::lock_guard lock{m_imageMutexes[bufferIdx]}; // auto &src = *(Image::radxa_data_t *) m_rawBuffers[bufferIdx].mem; // auto &dst = image->data; // Image::copy(dst, src); // get_elapsed_ns += t.nsecsElapsed(); // } // return true; } std::shared_ptr VeyeIMX287m::getImage() { std::shared_ptr result; // std::shared_ptr result = std::make_shared(); // if (m_lastProcessedImage != std::numeric_limits::max()) { // if (m_lastProcessedImage) { // return m_rawBuffers[m_lastProcessedImage].image; // } { std::lock_guard l{m_lastImageMtx}; // result = m_lastProcessedImage; // std::swap(result, m_lastProcessedImage); } // return {}; // { // return m_lastProcessedImage; // result = m_lastProcessedImage; // } return result; // size_t bufferIdx{}; // if (!dequeueImageBuffer(bufferIdx)) { // std::cerr << "cannot dequeue" << std::endl; // return {}; // } // // TODO: remove this bullshit. return ptr to image or copy image metainfo // // only, then copy data // // *image = std::move(m_images[bufferIdx]); // // result = m_rawBuffers[bufferIdx].image; // { // QElapsedTimer t; // t.start(); // std::lock_guard lock{m_imageMutexes[bufferIdx]}; // auto &src = *(Image::radxa_data_t *) m_rawBuffers[bufferIdx].mem; // auto &dst = result->data; // Image::copy(dst, src); // get_elapsed_ns += t.nsecsElapsed(); // } // // std::cerr << "ok" << std::endl; // return result; }