diff --git a/Changelog.md b/Changelog.md index d4487f62c..3acaf0482 100644 --- a/Changelog.md +++ b/Changelog.md @@ -6,6 +6,29 @@ ### Gazebo Rendering 8.X +### Gazebo Rendering 8.2.0 + +1. Backport: Adding cone primitives. + * [Pull request #1003](https://github.com/gazebosim/gz-rendering/pull/1003) + +1. Fixes deleter passed to the std::shared_ptr + * [Pull request #1009](https://github.com/gazebosim/gz-rendering/pull/1009) + +1. ogre2: Set custom projection matrix for other types of cameras + * [Pull request #1002](https://github.com/gazebosim/gz-rendering/pull/1002) + +1. Fix gz-cmake declaration on package.xml (Fix windows builds) + * [Pull request #1005](https://github.com/gazebosim/gz-rendering/pull/1005) + +1. Add package.xml + * [Pull request #981](https://github.com/gazebosim/gz-rendering/pull/981) + +1. Workaround on warnings for Ubuntu Noble + * [Pull request #995](https://github.com/gazebosim/gz-rendering/pull/995) + +1. Ogre2RenderEngine: on Windows if useCurrentGLContext is specified, set the externalWindowsHandle ogre-next option + * [Pull request #992](https://github.com/gazebosim/gz-rendering/pull/992) + ### Gazebo Rendering 8.1.1 (2024-04-10) 1. Use relative install paths for plugin shared libraries and media files diff --git a/examples/simple_demo_qml/GzRenderer.cc b/examples/simple_demo_qml/GzRenderer.cc index 0b55cca07..d8cf3b4fb 100644 --- a/examples/simple_demo_qml/GzRenderer.cc +++ b/examples/simple_demo_qml/GzRenderer.cc @@ -224,13 +224,20 @@ void GzRenderer::InitialiseOnMainThread() ////////////////////////////////////////////////// void GzRenderer::Render() { - // pre-render may regenerate textureId if the size changes - this->camera->PreRender(); - this->textureId = this->camera->RenderTextureGLId(); - // render to texture this->camera->Update(); + GLuint texIdSrgb = this->camera->RenderTextureGLId(); + + if (this->textureId != texIdSrgb) + { + glBindTexture(GL_TEXTURE_2D, texIdSrgb); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SRGB_DECODE_EXT, + GL_SKIP_DECODE_EXT); + } + + this->textureId = texIdSrgb; + // Move camera this->UpdateCamera(); } @@ -278,10 +285,6 @@ void GzRenderer::InitEngine() // quick check on sizing... gzmsg << "imageW: " << this->camera->ImageWidth() << "\n"; gzmsg << "imageH: " << this->camera->ImageHeight() << "\n"; - - // pre-render will force texture creation and may update texture id - this->camera->PreRender(); - this->textureId = this->camera->RenderTextureGLId(); } ////////////////////////////////////////////////// diff --git a/examples/simple_demo_qml/ThreadRenderer.cpp b/examples/simple_demo_qml/ThreadRenderer.cpp index 4aff4dee6..5eb75f97c 100644 --- a/examples/simple_demo_qml/ThreadRenderer.cpp +++ b/examples/simple_demo_qml/ThreadRenderer.cpp @@ -321,22 +321,25 @@ void TextureNode::NewTexture(int _id, const QSize &_size) void TextureNode::PrepareNode() { this->mutex.lock(); + // new render engine texture ID int newId = this->id; - QSize size = this->size; + QSize newSize = this->size; this->id = 0; this->mutex.unlock(); + if (newId) { delete this->texture; this->texture = nullptr; // note: include QQuickWindow::TextureHasAlphaChannel if the rendered content // has alpha. + #if QT_VERSION < QT_VERSION_CHECK(5, 14, 0) # ifndef _WIN32 # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wdeprecated-declarations" # endif - this->texture = this->window->createTextureFromId(newId, size); + this->texture = this->window->createTextureFromId(newId, newSize); # ifndef _WIN32 # pragma GCC diagnostic pop # endif @@ -346,7 +349,7 @@ void TextureNode::PrepareNode() QQuickWindow::NativeObjectTexture, static_cast(&newId), 0, - size); + newSize); #endif this->setTexture(this->texture); diff --git a/ogre/src/OgreDistortionPass.cc b/ogre/src/OgreDistortionPass.cc index 74722de6e..e6fa6fa87 100644 --- a/ogre/src/OgreDistortionPass.cc +++ b/ogre/src/OgreDistortionPass.cc @@ -196,8 +196,8 @@ void OgreDistortionPass::CreateRenderPass() distortedLocation, newDistortedCoordinates, currDistortedCoordinates; - unsigned int distortedIdx, - distortedCol, + unsigned int distortedIdx; + int distortedCol, distortedRow; double normalizedColLocation, normalizedRowLocation; @@ -223,9 +223,9 @@ void OgreDistortionPass::CreateRenderPass() focalLength); // compute the index in the distortion map - distortedCol = static_cast(round(distortedLocation.X() * + distortedCol = static_cast(round(distortedLocation.X() * this->dataPtr->distortionTexWidth)); - distortedRow = static_cast(round(distortedLocation.Y() * + distortedRow = static_cast(round(distortedLocation.Y() * this->dataPtr->distortionTexHeight)); // Note that the following makes sure that, for significant distortions, @@ -235,8 +235,11 @@ void OgreDistortionPass::CreateRenderPass() // nonlegacy distortion modes. // Make sure the distorted pixel is within the texture dimensions - if (distortedCol < this->dataPtr->distortionTexWidth && - distortedRow < this->dataPtr->distortionTexHeight) + if (distortedCol >= 0 && distortedRow >= 0 && + static_cast(distortedCol) < + this->dataPtr->distortionTexWidth && + static_cast(distortedRow) < + this->dataPtr->distortionTexHeight) { distortedIdx = distortedRow * this->dataPtr->distortionTexWidth + distortedCol; diff --git a/ogre/src/OgreWideAngleCamera.cc b/ogre/src/OgreWideAngleCamera.cc index b6e3ad3bc..d496d35d2 100644 --- a/ogre/src/OgreWideAngleCamera.cc +++ b/ogre/src/OgreWideAngleCamera.cc @@ -452,7 +452,11 @@ void OgreWideAngleCamera::CreateWideAngleTexture() double vfov = 2.0 * atan(tan(this->HFOV().Radian() / 2.0) / ratio); this->dataPtr->ogreCamera->setAspectRatio(ratio); - this->dataPtr->ogreCamera->setFOVy(Ogre::Radian(vfov)); + // Setting the fov is likely not necessary for the ogreCamera but + // clamp to max fov supported by ogre to avoid issues with building the + // frustum + this->dataPtr->ogreCamera->setFOVy(Ogre::Radian( + Ogre::Real(std::clamp(vfov, 0.0, GZ_PI)))); // create the env cameras and textures this->CreateEnvCameras(); diff --git a/ogre2/src/CMakeLists.txt b/ogre2/src/CMakeLists.txt index 0111d3200..e939e103f 100644 --- a/ogre2/src/CMakeLists.txt +++ b/ogre2/src/CMakeLists.txt @@ -17,7 +17,13 @@ set(engine_name "ogre2") gz_add_component(${engine_name} SOURCES ${sources} GET_TARGET_NAME ogre2_target) set(OGRE2_RESOURCE_PATH_STR "${OGRE2_RESOURCE_PATH}") -string(REPLACE ";" ":" OGRE2_RESOURCE_PATH_STR "${OGRE2_RESOURCE_PATH}") +# On non-Windows, we need to convert the CMake list delimited (;) to the +# list delimiter used in list of paths in code (:) +# On Windows, the list delimiter in code is already ;, not need to change it to : +if(NOT WIN32) + string(REPLACE ";" ":" OGRE2_RESOURCE_PATH_STR "${OGRE2_RESOURCE_PATH}") +endif() + set_property( SOURCE Ogre2RenderEngine.cc PROPERTY COMPILE_DEFINITIONS @@ -65,7 +71,10 @@ if (TARGET OpenGL::EGL) add_definitions(-DHAVE_EGL=1) endif() -target_compile_definitions(${ogre2_target} PRIVATE $<$:DEBUG=1 _DEBUG=1>) +# You might need to uncomment the following `target_compile_definitions` +# if you've built Ogre from source in Debug mode +# https://github.com/OGRECave/ogre-next/blob/003f51a0a90d1cf93fbea3c7302565b07c4f87b0/OgreMain/include/OgrePlatform.h#L350-L372 +# target_compile_definitions(${ogre2_target} PRIVATE $<$:DEBUG=1 _DEBUG=1>) set (versioned ${CMAKE_SHARED_LIBRARY_PREFIX}${PROJECT_NAME_LOWER}-${engine_name}${CMAKE_SHARED_LIBRARY_SUFFIX}) diff --git a/ogre2/src/Ogre2DepthCamera.cc b/ogre2/src/Ogre2DepthCamera.cc index f591b3c94..f9d8d329b 100644 --- a/ogre2/src/Ogre2DepthCamera.cc +++ b/ogre2/src/Ogre2DepthCamera.cc @@ -92,15 +92,13 @@ class Ogre2DepthGaussianNoisePass : public Ogre2GaussianNoisePass /// \brief Private data for the Ogre2DepthCamera class class gz::rendering::Ogre2DepthCameraPrivate { - /// \brief The depth buffer + /// \brief The depth buffer - also the outgoing point cloud data used + /// by newRgbPointCloud event public: float *depthBuffer = nullptr; /// \brief Outgoing depth data, used by newDepthFrame event. public: float *depthImage = nullptr; - /// \brief Outgoing point cloud data, used by newRgbPointCloud event. - public: float *pointCloudImage = nullptr; - /// \brief maximum value used for data outside sensor range public: float dataMaxVal = gz::math::INF_D; @@ -316,12 +314,6 @@ void Ogre2DepthCamera::Destroy() this->dataPtr->depthImage = nullptr; } - if (this->dataPtr->pointCloudImage) - { - delete [] this->dataPtr->pointCloudImage; - this->dataPtr->pointCloudImage = nullptr; - } - if (!this->ogreCamera) return; @@ -1195,10 +1187,6 @@ void Ogre2DepthCamera::PostRender() { this->dataPtr->depthImage = new float[len]; } - if (!this->dataPtr->pointCloudImage) - { - this->dataPtr->pointCloudImage = new float[len * channelCount]; - } // fill depth data for (unsigned int i = 0; i < height; ++i) @@ -1216,10 +1204,8 @@ void Ogre2DepthCamera::PostRender() // point cloud data if (this->dataPtr->newRgbPointCloud.ConnectionCount() > 0u) { - memcpy(this->dataPtr->pointCloudImage, - this->dataPtr->depthBuffer, len * channelCount * sizeof(float)); this->dataPtr->newRgbPointCloud( - this->dataPtr->pointCloudImage, width, height, channelCount, + this->dataPtr->depthBuffer, width, height, channelCount, "PF_FLOAT32_RGBA"); // Uncomment to debug color output @@ -1229,7 +1215,7 @@ void Ogre2DepthCamera::PostRender() // for (unsigned int j = 0; j < width; ++j) // { // float color = - // this->dataPtr->pointCloudImage[step + j*channelCount + 3]; + // this->dataPtr->depthBuffer[step + j*channelCount + 3]; // // unpack rgb data // uint32_t *rgba = reinterpret_cast(&color); // unsigned int r = *rgba >> 24 & 0xFF; @@ -1246,9 +1232,9 @@ void Ogre2DepthCamera::PostRender() // { // for (unsigned int j = 0; j < width; ++j) // { - // gzdbg << "[" << this->dataPtr->pointCloudImage[i*width*4+j*4] << "]" - // << "[" << this->dataPtr->pointCloudImage[i*width*4+j*4+1] << "]" - // << "[" << this->dataPtr->pointCloudImage[i*width*4+j*4+2] << "],"; + // gzdbg << "[" << this->dataPtr->depthBuffer[i*width*4+j*4] << "]" + // << "[" << this->dataPtr->depthBuffer[i*width*4+j*4+1] << "]" + // << "[" << this->dataPtr->depthBuffer[i*width*4+j*4+2] << "],"; // } // gzdbg << std::endl; // } diff --git a/ogre2/src/Ogre2GpuRays.cc b/ogre2/src/Ogre2GpuRays.cc index 293fbefb3..657dae232 100644 --- a/ogre2/src/Ogre2GpuRays.cc +++ b/ogre2/src/Ogre2GpuRays.cc @@ -127,14 +127,11 @@ class GZ_RENDERING_OGRE2_HIDDEN gz::rendering::Ogre2GpuRaysPrivate unsigned int, unsigned int, unsigned int, const std::string &)> newGpuRaysFrame; - /// \brief Raw buffer of gpu rays data. - public: float *gpuRaysBuffer = nullptr; - /// \brief Outgoing gpu rays data, used by newGpuRaysFrame event. public: float *gpuRaysScan = nullptr; - /// \brief Cubemap cameras - public: Ogre::Camera *cubeCam[6]; + /// \brief Cubemap camera + public: Ogre::Camera *cubeCam{nullptr}; /// \brief Texture packed with cubemap face and uv data public: Ogre::TextureGpu *cubeUVTexture = nullptr; @@ -567,7 +564,6 @@ Ogre2GpuRays::Ogre2GpuRays() for (unsigned int i = 0; i < this->dataPtr->kCubeCameraCount; ++i) { - this->dataPtr->cubeCam[i] = nullptr; this->dataPtr->ogreCompositorWorkspace1st[i] = nullptr; this->dataPtr->laserRetroMaterialSwitcher[i] = nullptr; this->dataPtr->firstPassTextures[i] = nullptr; @@ -598,12 +594,6 @@ void Ogre2GpuRays::Destroy() if (!this->dataPtr->ogreCamera) return; - if (this->dataPtr->gpuRaysBuffer) - { - delete [] this->dataPtr->gpuRaysBuffer; - this->dataPtr->gpuRaysBuffer = nullptr; - } - if (this->dataPtr->gpuRaysScan) { delete [] this->dataPtr->gpuRaysScan; @@ -680,13 +670,10 @@ void Ogre2GpuRays::Destroy() ogreSceneManager->destroyCamera(this->dataPtr->ogreCamera); this->dataPtr->ogreCamera = nullptr; - for (unsigned int i = 0; i < this->dataPtr->kCubeCameraCount; ++i) + if (this->dataPtr->cubeCam) { - if (this->dataPtr->cubeCam[i]) - { - ogreSceneManager->destroyCamera(this->dataPtr->cubeCam[i]); - this->dataPtr->cubeCam[i] = nullptr; - } + ogreSceneManager->destroyCamera(this->dataPtr->cubeCam); + this->dataPtr->cubeCam = nullptr; } } } @@ -765,10 +752,13 @@ void Ogre2GpuRays::ConfigureCamera() // Configure first pass texture size // Each cubemap texture covers 90 deg FOV so determine number of samples // within the view for both horizontal and vertical FOV - unsigned int hs = static_cast( - GZ_PI * 0.5 / hfovAngle.Radian() * this->RangeCount()); - unsigned int vs = static_cast( - GZ_PI * 0.5 / vfovAngle * this->VerticalRangeCount()); + unsigned int hs = (hfovAngle.Radian() < GZ_PI_2) ? this->RangeCount() : + static_cast( + GZ_PI_2 / hfovAngle.Radian() * this->RangeCount()); + + unsigned int vs = (vfovAngle < GZ_PI_2) ? this->VerticalRangeCount() : + static_cast( + GZ_PI_2 / vfovAngle * this->VerticalRangeCount()); // get the max number from the two unsigned int v = std::max(hs, vs); @@ -782,12 +772,12 @@ void Ogre2GpuRays::ConfigureCamera() v |= v >> 16; v++; - // limit min texture size to 128 // This is needed for large fov with low sample count, // e.g. 360 degrees and only 4 samples. Otherwise the depth data returned are // inaccurate. // \todo(anyone) For small fov, we shouldn't need such a high min texture size - // requirement, e.g. a single ray lidar only needs 1x1 texture. Look for ways + // requirement, e.g. a single ray lidar only needs 1x1 texture. However, + // using lower res textures also give inaccurate results. Look for ways // to compute the optimal min texture size unsigned int min1stPassSamples = 128u; @@ -986,6 +976,7 @@ void Ogre2GpuRays::Setup1stPass() Ogre::TextureFlags::RenderToTexture, Ogre::TextureTypes::Type2D); this->dataPtr->colorTexture->setResolution(this->dataPtr->w1st, this->dataPtr->h1st); + this->dataPtr->colorTexture->setNumMipmaps(1u); this->dataPtr->colorTexture->setPixelFormat(Ogre::PFG_R16_UNORM); this->dataPtr->colorTexture->scheduleTransitionTo( Ogre::GpuResidency::Resident); @@ -996,6 +987,7 @@ void Ogre2GpuRays::Setup1stPass() Ogre::TextureFlags::RenderToTexture, Ogre::TextureTypes::Type2D); this->dataPtr->depthTexture->setResolution(this->dataPtr->w1st, this->dataPtr->h1st); + this->dataPtr->depthTexture->setNumMipmaps(1u); this->dataPtr->depthTexture->setPixelFormat(Ogre::PFG_D32_FLOAT); this->dataPtr->depthTexture->scheduleTransitionTo( Ogre::GpuResidency::Resident); @@ -1006,6 +998,7 @@ void Ogre2GpuRays::Setup1stPass() Ogre::TextureFlags::RenderToTexture, Ogre::TextureTypes::Type2D); this->dataPtr->particleTexture->setResolution(this->dataPtr->w1st / 2u, this->dataPtr->h1st/ 2u); + this->dataPtr->particleTexture->setNumMipmaps(1u); this->dataPtr->particleTexture->setPixelFormat(Ogre::PFG_RGBA8_UNORM); this->dataPtr->particleTexture->scheduleTransitionTo( Ogre::GpuResidency::Resident); @@ -1016,6 +1009,7 @@ void Ogre2GpuRays::Setup1stPass() Ogre::TextureFlags::RenderToTexture, Ogre::TextureTypes::Type2D); this->dataPtr->particleDepthTexture->setResolution(this->dataPtr->w1st / 2u, this->dataPtr->h1st / 2u); + this->dataPtr->particleDepthTexture->setNumMipmaps(1u); this->dataPtr->particleDepthTexture->setPixelFormat(Ogre::PFG_D32_FLOAT); this->dataPtr->particleDepthTexture->scheduleTransitionTo( Ogre::GpuResidency::Resident); @@ -1057,32 +1051,18 @@ void Ogre2GpuRays::Setup1stPass() // create cubemap cameras and render to texture using 1st pass compositor Ogre::SceneManager *ogreSceneManager = this->scene->OgreSceneManager(); + this->dataPtr->cubeCam = ogreSceneManager->createCamera( + this->Name() + "_env"); + this->dataPtr->cubeCam->detachFromParent(); + this->ogreNode->attachObject(this->dataPtr->cubeCam); + this->dataPtr->cubeCam->setFOVy(Ogre::Degree(90)); + this->dataPtr->cubeCam->setAspectRatio(1); + this->dataPtr->cubeCam->setNearClipDistance(this->dataPtr->nearClipCube); + this->dataPtr->cubeCam->setFarClipDistance(this->FarClipPlane()); + this->dataPtr->cubeCam->setFixedYawAxis(false); + for (auto i : this->dataPtr->cubeFaceIdx) { - this->dataPtr->cubeCam[i] = ogreSceneManager->createCamera( - this->Name() + "_env" + std::to_string(i)); - this->dataPtr->cubeCam[i]->detachFromParent(); - this->ogreNode->attachObject(this->dataPtr->cubeCam[i]); - this->dataPtr->cubeCam[i]->setFOVy(Ogre::Degree(90)); - this->dataPtr->cubeCam[i]->setAspectRatio(1); - this->dataPtr->cubeCam[i]->setNearClipDistance(this->dataPtr->nearClipCube); - this->dataPtr->cubeCam[i]->setFarClipDistance(this->FarClipPlane()); - this->dataPtr->cubeCam[i]->setFixedYawAxis(false); - this->dataPtr->cubeCam[i]->yaw(Ogre::Degree(-90)); - this->dataPtr->cubeCam[i]->roll(Ogre::Degree(-90)); - - // orient camera to create cubemap - if (i == 0) - this->dataPtr->cubeCam[i]->yaw(Ogre::Degree(-90)); - else if (i == 1) - this->dataPtr->cubeCam[i]->yaw(Ogre::Degree(90)); - else if (i == 2) - this->dataPtr->cubeCam[i]->pitch(Ogre::Degree(90)); - else if (i == 3) - this->dataPtr->cubeCam[i]->pitch(Ogre::Degree(-90)); - else if (i == 5) - this->dataPtr->cubeCam[i]->yaw(Ogre::Degree(180)); - // create render texture - these textures pack the range data // that will be used in the 2nd pass texName = this->Name() + "_first_pass_" + std::to_string(i); @@ -1111,7 +1091,7 @@ void Ogre2GpuRays::Setup1stPass() ogreCompMgr->addWorkspace( this->scene->OgreSceneManager(), compoChannels, - this->dataPtr->cubeCam[i], + this->dataPtr->cubeCam, wsDefName, false, -1, 0, 0, Ogre::Vector4::ZERO, 0x00, this->dataPtr->kGpuRaysExecutionMask); @@ -1231,7 +1211,23 @@ void Ogre2GpuRays::UpdateRenderTarget1stPass() // update the compositors for (auto i : this->dataPtr->cubeFaceIdx) { - this->scene->UpdateAllHeightmaps(this->dataPtr->cubeCam[i]); + this->dataPtr->cubeCam->setOrientation(Ogre::Quaternion::IDENTITY); + this->dataPtr->cubeCam->yaw(Ogre::Degree(-90)); + this->dataPtr->cubeCam->roll(Ogre::Degree(-90)); + // orient camera to its corresponding cubemap face + if (i == 0) + this->dataPtr->cubeCam->yaw(Ogre::Degree(-90)); + else if (i == 1) + this->dataPtr->cubeCam->yaw(Ogre::Degree(90)); + else if (i == 2) + this->dataPtr->cubeCam->pitch(Ogre::Degree(90)); + else if (i == 3) + this->dataPtr->cubeCam->pitch(Ogre::Degree(-90)); + else if (i == 5) + this->dataPtr->cubeCam->yaw(Ogre::Degree(180)); + + this->scene->UpdateAllHeightmaps(this->dataPtr->cubeCam); + this->dataPtr->ogreCompositorWorkspace1st[i]->setEnabled(true); this->dataPtr->ogreCompositorWorkspace1st[i]->_validateFinalTarget(); @@ -1321,12 +1317,6 @@ void Ogre2GpuRays::PostRender() PixelFormat format = PF_FLOAT32_RGBA; unsigned int rawChannelCount = PixelUtil::ChannelCount(format); unsigned int bytesPerChannel = PixelUtil::BytesPerChannel(format); - int rawLen = width * height * rawChannelCount; - - if (!this->dataPtr->gpuRaysBuffer) - { - this->dataPtr->gpuRaysBuffer = new float[rawLen]; - } // blit data from gpu to cpu Ogre::Image2 image; @@ -1334,19 +1324,6 @@ void Ogre2GpuRays::PostRender() Ogre::TextureBox box = image.getData(0u); float *bufferTmp = static_cast(box.data); - // TODO(anyone): It seems wasteful to have gpuRaysBuffer at all - // We should be able to convert directly from bufferTmp to gpuRaysScan - - // copy data row by row. The texture box may not be a contiguous region of - // a texture - for (unsigned int i = 0; i < height; ++i) - { - unsigned int rawDataRowIdx = i * box.bytesPerRow / bytesPerChannel; - unsigned int rowIdx = i * width * rawChannelCount; - memcpy(&this->dataPtr->gpuRaysBuffer[rowIdx], &bufferTmp[rawDataRowIdx], - width * rawChannelCount * bytesPerChannel); - } - // Metal does not support RGB32_FLOAT so the internal texture format is // RGBA32_FLOAT. For backward compatibility, output data is kept in RGB // format instead of RGBA @@ -1359,21 +1336,19 @@ void Ogre2GpuRays::PostRender() // copy data from RGBA buffer to RGB buffer for (unsigned int row = 0; row < height; ++row) { + unsigned int rawDataRowIdx = row * box.bytesPerRow / bytesPerChannel; + unsigned int rowIdx = row * width * this->Channels(); + // the texture box step size could be larger than our image buffer step // size for (unsigned int column = 0; column < width; ++column) { - unsigned int idx = (row * width * this->Channels()) + - column * this->Channels(); - unsigned int rawIdx = (row * width * rawChannelCount) + - column * rawChannelCount; - - this->dataPtr->gpuRaysScan[idx] = - this->dataPtr->gpuRaysBuffer[rawIdx]; - this->dataPtr->gpuRaysScan[idx + 1] = - this->dataPtr->gpuRaysBuffer[rawIdx + 1]; - this->dataPtr->gpuRaysScan[idx + 2] = - this->dataPtr->gpuRaysBuffer[rawIdx + 2]; + unsigned int idx = rowIdx + column * this->Channels(); + unsigned int rawIdx = rawDataRowIdx + column * rawChannelCount; + + this->dataPtr->gpuRaysScan[idx] = bufferTmp[rawIdx]; + this->dataPtr->gpuRaysScan[idx + 1] = bufferTmp[rawIdx + 1]; + this->dataPtr->gpuRaysScan[idx + 2] = bufferTmp[rawIdx + 2]; } } diff --git a/ogre2/src/Ogre2RenderEngine.cc b/ogre2/src/Ogre2RenderEngine.cc index dfe35d670..e152fe39e 100644 --- a/ogre2/src/Ogre2RenderEngine.cc +++ b/ogre2/src/Ogre2RenderEngine.cc @@ -22,6 +22,8 @@ #endif #include #include +#include +#include #include #include @@ -153,14 +155,15 @@ Ogre2RenderEngine::Ogre2RenderEngine() : this->dummyWindowId = 0; std::string ogrePath = std::string(OGRE2_RESOURCE_PATH); - std::vector paths = common::split(ogrePath, ":"); + std::vector paths = common::Split(ogrePath, + common::SystemPaths::Delimiter()); for (const auto &path : paths) this->ogrePaths.push_back(path); const char *env = std::getenv("OGRE2_RESOURCE_PATH"); if (env) { - paths = common::split(std::string(env), ":"); + paths = common::Split(std::string(env), common::SystemPaths::Delimiter()); for (const auto &path : paths) this->ogrePaths.push_back(path); } diff --git a/ogre2/src/Ogre2WideAngleCamera.cc b/ogre2/src/Ogre2WideAngleCamera.cc index 398ec4434..921198cf5 100644 --- a/ogre2/src/Ogre2WideAngleCamera.cc +++ b/ogre2/src/Ogre2WideAngleCamera.cc @@ -1344,7 +1344,12 @@ void Ogre2WideAngleCamera::PrepareForFinalPass(Ogre::Pass *_pass) const double ratio = static_cast(this->ImageWidth()) / static_cast(this->ImageHeight()); const double vfov = 2.0 * atan(tan(this->HFOV().Radian() / 2.0) / ratio); - this->dataPtr->ogreCamera->setFOVy(Ogre::Radian(Ogre::Real(vfov))); + + // Setting the fov is likely not necessary in the final pass but + // clamp to max fov supported by ogre to avoid issues with building the + // frustum + this->dataPtr->ogreCamera->setFOVy(Ogre::Radian( + Ogre::Real(std::clamp(vfov, 0.0, GZ_PI)))); const float localHfov = static_cast(this->HFOV().Radian()); diff --git a/src/ShaderParam.cc b/src/ShaderParam.cc index 4e07e274b..e28616751 100644 --- a/src/ShaderParam.cc +++ b/src/ShaderParam.cc @@ -117,7 +117,7 @@ void ShaderParam::InitializeBuffer(uint32_t _count) { this->dataPtr->count = _count; this->dataPtr->buffer.reset(new float[_count], - std::default_delete()); + [](void *ptr) { delete [] static_cast(ptr); }); } ////////////////////////////////////////////////// diff --git a/test/integration/gpu_rays.cc b/test/integration/gpu_rays.cc index 63413af51..a5f18b826 100644 --- a/test/integration/gpu_rays.cc +++ b/test/integration/gpu_rays.cc @@ -697,8 +697,8 @@ TEST_F(GpuRaysTest, GZ_UTILS_TEST_DISABLED_ON_WIN32(SingleRay)) GTEST_SKIP() << "Unsupported on apple, see issue #35."; #endif - // Test GPU single ray box intersection. - // Place GPU above box looking downwards + // Test single ray box intersection. + // Place ray above box looking downwards // ray should intersect with center of box const double hMinAngle = 0.0; @@ -713,7 +713,7 @@ TEST_F(GpuRaysTest, GZ_UTILS_TEST_DISABLED_ON_WIN32(SingleRay)) VisualPtr root = scene->RootVisual(); - // Create first ray caster + // Create ray caster gz::math::Pose3d testPose(gz::math::Vector3d(0, 0, 7), gz::math::Quaterniond(0, GZ_PI/2.0, 0)); @@ -731,7 +731,7 @@ TEST_F(GpuRaysTest, GZ_UTILS_TEST_DISABLED_ON_WIN32(SingleRay)) // box in the center gz::math::Pose3d box01Pose(gz::math::Vector3d(0, 0, 4.5), - gz::math::Quaterniond::Identity); + gz::math::Quaterniond::Identity); VisualPtr visualBox1 = scene->CreateVisual("UnitBox1"); visualBox1->AddGeometry(scene->CreateBox()); visualBox1->SetWorldPosition(box01Pose.Pos()); @@ -754,9 +754,17 @@ TEST_F(GpuRaysTest, GZ_UTILS_TEST_DISABLED_ON_WIN32(SingleRay)) int mid = 0; double unitBoxSize = 1.0; double expectedRangeAtMidPointBox = testPose.Pos().Z() - - (abs(box01Pose.Pos().Z()) + unitBoxSize/2); + (std::abs(box01Pose.Pos().Z()) + unitBoxSize / 2); - // rays caster 1 should see box01 and box02 + // ray should detect box + EXPECT_NEAR(scan[mid], expectedRangeAtMidPointBox, LASER_TOL); + + gz::math::Pose3d newBox01Pose(gz::math::Vector3d(0, 0, 3.5), + gz::math::Quaterniond::Identity); + visualBox1->SetWorldPosition(newBox01Pose.Pos()); + gpuRays->Update(); + expectedRangeAtMidPointBox = testPose.Pos().Z() - + (std::abs(newBox01Pose.Pos().Z()) + unitBoxSize / 2); EXPECT_NEAR(scan[mid], expectedRangeAtMidPointBox, LASER_TOL); c.reset();