Skip to content

Commit

Permalink
Merge pull request #1028 from luxonis/release_2.26.0
Browse files Browse the repository at this point in the history
Release 2.26.0
  • Loading branch information
moratom authored May 25, 2024
2 parents c21bdd3 + b2bad76 commit 9048745
Show file tree
Hide file tree
Showing 37 changed files with 1,557 additions and 73 deletions.
4 changes: 3 additions & 1 deletion .github/workflows/main.workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ jobs:
exclude:
- os: windows-latest
cmake: '3.10.x'
- os: macos-latest # Skip the old cmake on latest macos - doesn't handle ARM64 aarch correctly
cmake: '3.10.x'

steps:
- name: Cache .hunter folder
Expand Down Expand Up @@ -333,4 +335,4 @@ jobs:
upload_url: ${{ steps.createRelease.outputs.upload_url }}
asset_path: ${{ github.workspace }}/depthai-core-${{ steps.tag.outputs.version }}-win32-no-opencv.zip
asset_name: depthai-core-${{ steps.tag.outputs.version }}-win32-no-opencv.zip
asset_content_type: application/octet-stream
asset_content_type: application/octet-stream
5 changes: 4 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ if(WIN32)
endif()

# Create depthai project
project(depthai VERSION "2.25.1" LANGUAGES CXX C)
project(depthai VERSION "2.26.0" LANGUAGES CXX C)
get_directory_property(has_parent PARENT_DIRECTORY)
if(has_parent)
set(DEPTHAI_VERSION ${PROJECT_VERSION} PARENT_SCOPE)
Expand Down Expand Up @@ -219,6 +219,7 @@ add_library(${TARGET_CORE_NAME}
src/pipeline/node/DetectionNetwork.cpp
src/pipeline/node/Script.cpp
src/pipeline/node/SpatialDetectionNetwork.cpp
src/pipeline/node/ImageAlign.cpp
src/pipeline/node/SystemLogger.cpp
src/pipeline/node/SpatialLocationCalculator.cpp
src/pipeline/node/AprilTag.cpp
Expand All @@ -230,6 +231,7 @@ add_library(${TARGET_CORE_NAME}
src/pipeline/node/DetectionParser.cpp
src/pipeline/node/UVC.cpp
src/pipeline/node/PointCloud.cpp
src/pipeline/node/Cast.cpp
src/pipeline/datatype/Buffer.cpp
src/pipeline/datatype/ImgFrame.cpp
src/pipeline/datatype/EncodedFrame.cpp
Expand All @@ -250,6 +252,7 @@ add_library(${TARGET_CORE_NAME}
src/pipeline/datatype/EdgeDetectorConfig.cpp
src/pipeline/datatype/TrackedFeatures.cpp
src/pipeline/datatype/FeatureTrackerConfig.cpp
src/pipeline/datatype/ImageAlignConfig.cpp
src/pipeline/datatype/ToFConfig.cpp
src/pipeline/datatype/PointCloudConfig.cpp
src/pipeline/datatype/PointCloudData.cpp
Expand Down
4 changes: 2 additions & 2 deletions cmake/Depthai/DepthaiBootloaderConfig.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ set(DEPTHAI_BOOTLOADER_MATURITY "release")
# set(DEPTHAI_BOOTLOADER_MATURITY "snapshot")

# "version if applicable"
set(DEPTHAI_BOOTLOADER_VERSION "0.0.27")
# set(DEPTHAI_BOOTLOADER_VERSION "0.0.24+57c26493754e2f00e57f6594b0b1a317f762d5f2")
set(DEPTHAI_BOOTLOADER_VERSION "0.0.28")
# set(DEPTHAI_BOOTLOADER_VERSION "0.0.27+5fb331f993adceeeda72202c233a9e3939ab3dab")
2 changes: 1 addition & 1 deletion cmake/Depthai/DepthaiDeviceSideConfig.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot")

# "full commit hash of device side binary"
set(DEPTHAI_DEVICE_SIDE_COMMIT "c7127782f2da45aac89d5b5b816d04cc45ae40be")
set(DEPTHAI_DEVICE_SIDE_COMMIT "24a3b465b979de3f69410cd225914d8bd029f3ba")

# "version if applicable"
set(DEPTHAI_DEVICE_SIDE_VERSION "")
36 changes: 36 additions & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,22 @@ hunter_private_data(
LOCATION concat_model
)

# blur model
hunter_private_data(
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/blur_simplified_openvino_2021.4_6shave.blob"
SHA1 "14d543bbaceffa438071f83be58ad22a07ce33ee"
FILE "blur_simplified_openvino_2021.4_6shave.blob"
LOCATION blur_model
)

# diff model
hunter_private_data(
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/diff_openvino_2022.1_6shave.blob"
SHA1 "fe9600e617d222f986a699f18e77e80ce2485000"
FILE "diff_openvino_2022.1_6shave.blob"
LOCATION diff_model
)

# normalization model
hunter_private_data(
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/normalize_openvino_2021.4_4shave.blob"
Expand Down Expand Up @@ -386,3 +402,23 @@ dai_add_example(imu_video_synced Sync/imu_video_synced.cpp ON OFF)
if(DEPTHAI_HAVE_PCL_SUPPORT)
dai_add_example(visualize_pointcloud PointCloud/visualize_pointcloud.cpp ON ON)
endif()

# ImageAlign
dai_add_example(tof_align ImageAlign/tof_align.cpp OFF OFF)
dai_add_example(image_align ImageAlign/image_align.cpp ON OFF)
dai_add_example(thermal_align ImageAlign/thermal_align.cpp OFF OFF)
dai_add_example(depth_align ImageAlign/depth_align.cpp ON OFF)

# Cast
dai_add_example(blur Cast/blur.cpp ON OFF)
target_compile_definitions(blur PRIVATE BLOB_PATH="${blur_model}")
dai_add_example(concat Cast/concat.cpp ON OFF)
target_compile_definitions(concat PRIVATE BLOB_PATH="${concat_model}")
dai_add_example(diff Cast/diff.cpp ON OFF)
target_compile_definitions(diff PRIVATE BLOB_PATH="${diff_model}")

# ToF
dai_add_example(spatial_tiny_yolo_tof_v3 SpatialDetection/spatial_tiny_yolo_tof.cpp OFF OFF)
dai_add_example(spatial_tiny_yolo_tof_v4 SpatialDetection/spatial_tiny_yolo_tof.cpp OFF OFF)
target_compile_definitions(spatial_tiny_yolo_tof_v3 PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}")
target_compile_definitions(spatial_tiny_yolo_tof_v4 PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}")
13 changes: 7 additions & 6 deletions examples/Camera/thermal_cam.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,18 @@ int main() {
// Find the sensor width, height.
int width, height;
bool thermal_found = false;
for (auto &features : d.getConnectedCameraFeatures()) {
if (std::find_if(features.supportedTypes.begin(), features.supportedTypes.end(), [](const dai::CameraSensorType &type) {
return type == dai::CameraSensorType::THERMAL;
}) != features.supportedTypes.end()) {
thermal->setBoardSocket(features.socket); // Thermal will always be on CAM_E
for(auto& features : d.getConnectedCameraFeatures()) {
if(std::find_if(features.supportedTypes.begin(),
features.supportedTypes.end(),
[](const dai::CameraSensorType& type) { return type == dai::CameraSensorType::THERMAL; })
!= features.supportedTypes.end()) {
thermal->setBoardSocket(features.socket); // Thermal will always be on CAM_E
width = features.width;
height = features.height;
thermal_found = true;
}
}
if (!thermal_found) {
if(!thermal_found) {
throw std::runtime_error("Thermal camera not found!");
}
thermal->setPreviewSize(width, height);
Expand Down
50 changes: 50 additions & 0 deletions examples/Cast/blur.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#include <depthai/depthai.hpp>
#include <opencv2/opencv.hpp>

constexpr int SHAPE = 300;

int main() {
dai::Pipeline p;

auto camRgb = p.create<dai::node::ColorCamera>();
auto nn = p.create<dai::node::NeuralNetwork>();
auto rgbOut = p.create<dai::node::XLinkOut>();
auto cast = p.create<dai::node::Cast>();
auto castXout = p.create<dai::node::XLinkOut>();

camRgb->setPreviewSize(SHAPE, SHAPE);
camRgb->setInterleaved(false);

nn->setBlobPath(BLOB_PATH);

rgbOut->setStreamName("rgb");
castXout->setStreamName("cast");

cast->setOutputFrameType(dai::ImgFrame::Type::BGR888p);

// Linking
camRgb->preview.link(nn->input);
camRgb->preview.link(rgbOut->input);
nn->out.link(cast->input);
cast->output.link(castXout->input);

dai::Device device(p);
auto qCam = device.getOutputQueue("rgb", 4, false);
auto qCast = device.getOutputQueue("cast", 4, false);

while(true) {
auto inCast = qCast->get<dai::ImgFrame>();
auto inRgb = qCam->get<dai::ImgFrame>();

if(inCast && inRgb) {
cv::imshow("Blur", inCast->getCvFrame());
cv::imshow("Original", inRgb->getCvFrame());
}

if(cv::waitKey(1) == 'q') {
break;
}
}

return 0;
}
63 changes: 63 additions & 0 deletions examples/Cast/concat.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#include <depthai/depthai.hpp>
#include <opencv2/opencv.hpp>

constexpr int SHAPE = 300;

int main() {
dai::Pipeline p;

auto camRgb = p.create<dai::node::ColorCamera>();
auto left = p.create<dai::node::MonoCamera>();
auto right = p.create<dai::node::MonoCamera>();
auto manipLeft = p.create<dai::node::ImageManip>();
auto manipRight = p.create<dai::node::ImageManip>();
auto nn = p.create<dai::node::NeuralNetwork>();
auto cast = p.create<dai::node::Cast>();
auto castXout = p.create<dai::node::XLinkOut>();

camRgb->setPreviewSize(SHAPE, SHAPE);
camRgb->setInterleaved(false);
camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);

left->setCamera("left");
left->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
manipLeft->initialConfig.setResize(SHAPE, SHAPE);
manipLeft->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p);

right->setCamera("right");
right->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
manipRight->initialConfig.setResize(SHAPE, SHAPE);
manipRight->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p);

nn->setBlobPath(BLOB_PATH);
nn->setNumInferenceThreads(2);

castXout->setStreamName("cast");
cast->setOutputFrameType(dai::ImgFrame::Type::BGR888p);

// Linking
left->out.link(manipLeft->inputImage);
right->out.link(manipRight->inputImage);
manipLeft->out.link(nn->inputs["img1"]);
camRgb->preview.link(nn->inputs["img2"]);
manipRight->out.link(nn->inputs["img3"]);
nn->out.link(cast->input);
cast->output.link(castXout->input);

// Pipeline is defined, now we can connect to the device
dai::Device device(p);
auto qCast = device.getOutputQueue("cast", 4, false);

while(true) {
auto inCast = qCast->get<dai::ImgFrame>();
if(inCast) {
cv::imshow("Concated frames", inCast->getCvFrame());
}

if(cv::waitKey(1) == 'q') {
break;
}
}

return 0;
}
66 changes: 66 additions & 0 deletions examples/Cast/diff.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#include <depthai/depthai.hpp>
#include <filesystem>
#include <opencv2/opencv.hpp>

constexpr int SHAPE = 720;

int main() {
dai::Pipeline p;

auto camRgb = p.create<dai::node::ColorCamera>();
auto nn = p.create<dai::node::NeuralNetwork>();
auto script = p.create<dai::node::Script>();
auto rgbXout = p.create<dai::node::XLinkOut>();
auto cast = p.create<dai::node::Cast>();
auto castXout = p.create<dai::node::XLinkOut>();

camRgb->setVideoSize(SHAPE, SHAPE);
camRgb->setPreviewSize(SHAPE, SHAPE);
camRgb->setInterleaved(false);

nn->setBlobPath(BLOB_PATH);

script->setScript(R"(
old = node.io['in'].get()
while True:
frame = node.io['in'].get()
node.io['img1'].send(old)
node.io['img2'].send(frame)
old = frame
)");

rgbXout->setStreamName("rgb");
castXout->setStreamName("cast");
cast->setOutputFrameType(dai::RawImgFrame::Type::GRAY8);

// Linking
camRgb->preview.link(script->inputs["in"]);
script->outputs["img1"].link(nn->inputs["img1"]);
script->outputs["img2"].link(nn->inputs["img2"]);
camRgb->video.link(rgbXout->input);
nn->out.link(cast->input);
cast->output.link(castXout->input);

// Pipeline is defined, now we can connect to the device
dai::Device device(p);
auto qCam = device.getOutputQueue("rgb", 4, false);
auto qCast = device.getOutputQueue("cast", 4, false);

while(true) {
auto colorFrame = qCam->get<dai::ImgFrame>();
if(colorFrame) {
cv::imshow("Color", colorFrame->getCvFrame());
}

auto inCast = qCast->get<dai::ImgFrame>();
if(inCast) {
cv::imshow("Diff", inCast->getCvFrame());
}

if(cv::waitKey(1) == 'q') {
break;
}
}

return 0;
}
2 changes: 1 addition & 1 deletion examples/FeatureTracker/feature_tracker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class FeatureTrackerDrawer {
// for how many frames the feature is tracked
static int trackedFeaturesPathLength;

using featureIdType = decltype(dai::Point2f::x);
using featureIdType = decltype(dai::TrackedFeature::id);

std::unordered_set<featureIdType> trackedIDs;
std::unordered_map<featureIdType, std::deque<dai::Point2f>> trackedFeaturesPath;
Expand Down
Loading

0 comments on commit 9048745

Please sign in to comment.