diff --git a/include/fcl/narrowphase/detail/convexity_based_algorithm/gjk_libccd-inl.h b/include/fcl/narrowphase/detail/convexity_based_algorithm/gjk_libccd-inl.h index e716fc5cf..7008b8863 100644 --- a/include/fcl/narrowphase/detail/convexity_based_algorithm/gjk_libccd-inl.h +++ b/include/fcl/narrowphase/detail/convexity_based_algorithm/gjk_libccd-inl.h @@ -1005,148 +1005,278 @@ static int __ccdEPA(const void *obj1, const void *obj2, return 0; } -/** Returns the points p1 and p2 on the original shapes that correspond to point - * p in the given simplex.*/ -static void extractClosestPoints(ccd_simplex_t* simplex, - ccd_vec3_t* p1, ccd_vec3_t* p2, ccd_vec3_t* p) -{ + +/** Reports true if p and q are coincident. */ +static bool are_coincident(const ccd_vec3_t& p, const ccd_vec3_t& q) { + // This uses a scale-dependent basis for determining coincidence. It examines + // each axis independently, and only, if all three axes are sufficiently + // close (relative to its own scale), are the two points considered + // coincident. + // + // For dimension i, two values are considered the same if: + // |pᵢ - qᵢ| <= ε·max(1, |pᵢ|, |pᵢ|) + // And the points are coincident if the previous condition for all + // `i ∈ {0, 1, 2}` (i.e. the x-, y-, *and* z-dimensions). + using std::abs; + using std::max; + + const ccd_real_t eps = constants::eps(); + // NOTE: Wrapping "1.0" with ccd_real_t accounts for mac problems where ccd + // is actually float based. + for (int i = 0; i < 3; ++i) { + const ccd_real_t scale = + max({ccd_real_t{1}, abs(p.v[i]), abs(q.v[i])}) * eps; + const ccd_real_t delta = abs(p.v[i] - q.v[i]); + if (delta > scale) return false; + } + return true; +} + +/** Determines if the the triangle defined by the three vertices has zero area. + Area can be zero for one of two reasons: + - the triangle is so small that the vertices are functionally coincident, or + - the vertices are co-linear. + Both conditions are computed with respect to machine precision. + @returns true if the area is zero. */ +static bool triangle_area_is_zero(const ccd_vec3_t& a, const ccd_vec3_t& b, + const ccd_vec3_t& c) { + // First coincidence condition. This doesn't *explicitly* test for b and c + // being coincident. That will be captured in the subsequent co-linearity + // test. If b and c *were* coincident, it would be cheaper to perform the + // coincidence test than the co-linearity test. + // However, the expectation is that typically the triangle will not have zero + // area. In that case, we want to minimize the number of tests performed on + // the average, so we prefer to eliminate one coincidence test. + if (are_coincident(a, b) || are_coincident(a, c)) return true; + + // We're going to compute the *sine* of the angle θ between edges (given that + // the vertices are *not* coincident). If the sin(θ) < ε, the edges are + // co-linear. + ccd_vec3_t AB, AC, n; + ccdVec3Sub2(&AB, &b, &a); + ccdVec3Sub2(&AC, &c, &a); + ccdVec3Normalize(&AB); + ccdVec3Normalize(&AC); + ccdVec3Cross(&n, &AB, &AC); + const ccd_real_t eps = constants::eps(); + // Second co-linearity condition. + if (ccdVec3Len2(&n) < eps * eps) return true; + return false; +} + +/** Given a single support point, `q`, extract the point `p1` and `p2`, the + points on object 1 and 2, respectively, in the support data of `q`. */ +static void extractObjectPointsFromPoint(ccd_support_t *q, ccd_vec3_t *p1, + ccd_vec3_t *p2) { + // TODO(SeanCurtis-TRI): Determine if I should be demanding that p1 and p2 + // are defined. + // Closest points are the ones stored in the simplex + if (p1) *p1 = q->v1; + if (p2) *p2 = q->v2; +} + +/** Given two support points which define a line segment (`a` and `b`), and a + point on that line segment `p`, computes the points `p1` and `p2`, the points + on object 1 and 2, respectively, in the support data which correspond to `p`. + @pre `p = a + s(b - a), 0 <= s <= 1` */ +static void extractObjectPointsFromSegment(ccd_support_t *a, ccd_support_t *b, + ccd_vec3_t *p1, ccd_vec3_t *p2, + ccd_vec3_t *p) { + // Closest points lie on the segment defined by the points in the simplex + // Let the segment be defined by points A and B. We can write p as + // + // p = A + s*AB, 0 <= s <= 1 + // p - A = s*AB + ccd_vec3_t AB; + ccdVec3Sub2(&AB, &(b->v), &(a->v)); + + // This defines three equations, but we only need one. Taking the i-th + // component gives + // + // p_i - A_i = s*AB_i. + // + // Thus, s is given by + // + // s = (p_i - A_i)/AB_i. + // + // To avoid dividing by an AB_i ≪ 1, we choose i such that |AB_i| is + // maximized + ccd_real_t abs_AB_x{std::abs(ccdVec3X(&AB))}; + ccd_real_t abs_AB_y{std::abs(ccdVec3Y(&AB))}; + ccd_real_t abs_AB_z{std::abs(ccdVec3Z(&AB))}; + + ccd_real_t A_i, AB_i, p_i; + if (abs_AB_x >= abs_AB_y && abs_AB_x >= abs_AB_z) { + A_i = ccdVec3X(&(a->v)); + AB_i = ccdVec3X(&AB); + p_i = ccdVec3X(p); + } else if (abs_AB_y >= abs_AB_z) { + A_i = ccdVec3Y(&(a->v)); + AB_i = ccdVec3Y(&AB); + p_i = ccdVec3Y(p); + } else { + A_i = ccdVec3Z(&(a->v)); + AB_i = ccdVec3Z(&AB); + p_i = ccdVec3Z(p); + } + + if (std::abs(AB_i) < constants::eps()) { + // Points are coincident; treat as a single point. + extractObjectPointsFromPoint(a, p1, p2); + return; + } + + auto calc_p = [](ccd_vec3_t *p_a, ccd_vec3_t *p_b, ccd_vec3_t *p, + ccd_real_t s) { + ccd_vec3_t sAB; + ccdVec3Sub2(&sAB, p_b, p_a); + ccdVec3Scale(&sAB, s); + ccdVec3Copy(p, p_a); + ccdVec3Add(p, &sAB); + }; + + // TODO(SeanCurtis-TRI): If p1 or p2 is null, there seems little point in + // calling this method. It seems that both of these being non-null should be + // a *requirement*. Determine that this is the case and do so. + ccd_real_t s = (p_i - A_i) / AB_i; + + if (p1) { + // p1 = A1 + s*A1B1 + calc_p(&(a->v1), &(b->v1), p1, s); + } + if (p2) { + // p2 = A2 + s*A2B2 + calc_p(&(a->v2), &(b->v2), p2, s); + } +} + +/** Returns the points `p1` and `p2` on the original shapes that correspond to + point `p` in the given simplex. + @pre simplex size <= 3. + @pre p lies _on_ the simplex (i.e., within the triangle, line segment, or is + coincident with the point). */ +static void extractClosestPoints(ccd_simplex_t *simplex, ccd_vec3_t *p1, + ccd_vec3_t *p2, ccd_vec3_t *p) { const int simplex_size = ccdSimplexSize(simplex); assert(simplex_size <= 3); if (simplex_size == 1) { - // Closest points are the ones stored in the simplex - if(p1) *p1 = simplex->ps[simplex->last].v1; - if(p2) *p2 = simplex->ps[simplex->last].v2; + extractObjectPointsFromPoint(&simplex->ps[0], p1, p2); + } + else if (simplex_size == 2) + { + extractObjectPointsFromSegment(&simplex->ps[0], &simplex->ps[1], p1, p2, p); } - else if (simplex_size ==2) + else // simplex_size == 3 { - // Closest points lie on the segment defined by the points in the simplex - // Let the segment be defined by points A and B. We can write p as - // - // p = A + s*AB, 0 <= s <= 1 - // p - A = s*AB - ccd_vec3_t AB; + if (triangle_area_is_zero(simplex->ps[0].v, simplex->ps[1].v, + simplex->ps[2].v)) { + // The triangle is degenerate; compute the nearest point to a line + // segment. The segment is defined by the most-distant vertex pair. + int a_index, b_index; + ccd_vec3_t AB, AC, BC; ccdVec3Sub2(&AB, &(simplex->ps[1].v), &(simplex->ps[0].v)); - - // This defines three equations, but we only need one. Taking the i-th - // component gives - // - // p_i - A_i = s*AB_i. - // - // Thus, s is given by - // - // s = (p_i - A_i)/AB_i. - // - // To avoid dividing by an AB_i ≪ 1, we choose i such that |AB_i| is - // maximized - ccd_real_t abs_AB_x{std::abs(ccdVec3X(&AB))}; - ccd_real_t abs_AB_y{std::abs(ccdVec3Y(&AB))}; - ccd_real_t abs_AB_z{std::abs(ccdVec3Z(&AB))}; - ccd_real_t s{0}; - - if (abs_AB_x >= abs_AB_y && abs_AB_x >= abs_AB_z) { - ccd_real_t A_x{ccdVec3X(&(simplex->ps[0].v))}; - ccd_real_t AB_x{ccdVec3X(&AB)}; - ccd_real_t p_x{ccdVec3X(p)}; - s = (p_x - A_x) / AB_x; - } else if (abs_AB_y >= abs_AB_z) { - ccd_real_t A_y{ccdVec3Y(&(simplex->ps[0].v))}; - ccd_real_t AB_y{ccdVec3Y(&AB)}; - ccd_real_t p_y{ccdVec3Y(p)}; - s = (p_y - A_y) / AB_y; + ccdVec3Sub2(&AC, &(simplex->ps[2].v), &(simplex->ps[0].v)); + ccdVec3Sub2(&BC, &(simplex->ps[2].v), &(simplex->ps[1].v)); + ccd_real_t AB_len2 = ccdVec3Len2(&AB); + ccd_real_t AC_len2 = ccdVec3Len2(&AC); + ccd_real_t BC_len2 = ccdVec3Len2(&BC); + if (AB_len2 >= AC_len2 && AB_len2 >= BC_len2) { + a_index = 0; + b_index = 1; + } else if (AC_len2 >= AB_len2 && AC_len2 >= BC_len2) { + a_index = 0; + b_index = 2; } else { - ccd_real_t A_z{ccdVec3Z(&(simplex->ps[0].v))}; - ccd_real_t AB_z{ccdVec3Z(&AB)}; - ccd_real_t p_z{ccdVec3Z(p)}; - s = (p_z - A_z) / AB_z; + a_index = 1; + b_index = 2; } + extractObjectPointsFromSegment(&simplex->ps[a_index], + &simplex->ps[b_index], p1, p2, p); + return; + } - if (p1) - { - // p1 = A1 + s*A1B1 - ccd_vec3_t sAB; - ccdVec3Sub2(&sAB, &(simplex->ps[1].v1), &(simplex->ps[0].v1)); - ccdVec3Scale(&sAB, s); - ccdVec3Copy(p1, &(simplex->ps[0].v1)); - ccdVec3Add(p1, &sAB); - } - if (p2) - { - // p2 = A2 + s*A2B2 - ccd_vec3_t sAB; - ccdVec3Sub2(&sAB, &(simplex->ps[1].v2), &(simplex->ps[0].v2)); - ccdVec3Scale(&sAB, s); - ccdVec3Copy(p2, &(simplex->ps[0].v2)); - ccdVec3Add(p2, &sAB); - } - } - else // simplex_size == 3 - { - // Closest points lie in the triangle defined by the points in the simplex - ccd_vec3_t AB, AC, n, v_prime, AB_cross_v_prime, AC_cross_v_prime; - // Let the triangle be defined by points A, B, and C. The triangle lies in - // the plane that passes through A, whose normal is given by n̂, where + // Compute the barycentric coordinates of point p in triangle ABC. // - // n = AB × AC - // n̂ = n / ‖n‖ - ccdVec3Sub2(&AB, &(simplex->ps[1].v), &(simplex->ps[0].v)); - ccdVec3Sub2(&AC, &(simplex->ps[2].v), &(simplex->ps[0].v)); - ccdVec3Cross(&n, &AB, &AC); - - // Since p lies in ABC, it can be expressed as + // A + // ╱╲ p = αA + βB + γC + // ╱ |╲ + // ╱ | ╲ α = 1 - β - γ + // ╱ p | ╲ β = AREA(pAC) / AREA(ABC) + // ╱ / \ ╲ γ = AREA(pAB) / AREA(ABC) + // ╱__/_____\_╲ + // B C AREA(XYZ) = |r_XY × r_XZ| / 2 // - // p = A + p' - // p' = s*AB + t*AC + // Rewrite coordinates in terms of cross products. // - // where 0 <= s, t, s+t <= 1. - ccdVec3Sub2(&v_prime, p, &(simplex->ps[0].v)); - - // To find the corresponding v1 and v2, we need - // values for s and t. Taking cross products with AB and AC gives the - // following system: + // β = AREA(pAC) / AREA(ABC) = |r_Ap × r_AC| / |r_AB × r_AC| + // γ = AREA(pAB) / AREA(ABC) = |r_AB × r_Ap| / |r_AB × r_AC| // - // AB × p' = t*(AB × AC) = t*n - // AC × p' = -s*(AB × AC) = -s*n - ccdVec3Cross(&AB_cross_v_prime, &AB, &v_prime); - ccdVec3Cross(&AC_cross_v_prime, &AC, &v_prime); - - // To convert this to a system of scalar equations, we take the dot product - // with n: + // NOTE: There are multiple options for the cross products, these have been + // selected to re-use as many symbols as possible. // - // n ⋅ (AB × p') = t * ‖n‖² - // n ⋅ (AC × p') = -s * ‖n‖² - ccd_real_t norm_squared_n{ccdVec3Len2(&n)}; - - // Therefore, s and t are given by + // Solving for β and γ: + // + // β = |r_Ap × r_AC| / |r_AB × r_AC| + // β = |r_Ap × r_AC| / |n| n ≙ r_AB × r_AC, n̂ = n / |n| + // β = n̂·(r_Ap × r_AC) / n̂·n This step arises from the fact + // that (r_Ap × r_AC) and n point + // in the same direction. It + // allows us to take a single sqrt + // instead of three. + // β = (n/|n|)·(r_Ap × r_AC) / (n/|n|)·n + // β = n·(r_Ap × r_AC) / n·n + // β = n·(r_Ap × r_AC) / |n|² // - // s = -n ⋅ (AC × p') / ‖n‖² - // t = n ⋅ (AB × p') / ‖n‖² - ccd_real_t s{-ccdVec3Dot(&n, &AC_cross_v_prime) / norm_squared_n}; - ccd_real_t t{ccdVec3Dot(&n, &AB_cross_v_prime) / norm_squared_n}; + // A similar process to solve for gamma + // γ = n·(r_AB × r_Ap) / |n|² + + // Compute n and |n|². + ccd_vec3_t r_AB, r_AC, n; + ccdVec3Sub2(&r_AB, &(simplex->ps[1].v), &(simplex->ps[0].v)); + ccdVec3Sub2(&r_AC, &(simplex->ps[2].v), &(simplex->ps[0].v)); + ccdVec3Cross(&n, &r_AB, &r_AC); + ccd_real_t norm_squared_n{ccdVec3Len2(&n)}; - if (p1) - { - // p1 = A1 + s*A1B1 + t*A1C1 - ccd_vec3_t sAB, tAC; - ccdVec3Sub2(&sAB, &(simplex->ps[1].v1), &(simplex->ps[0].v1)); - ccdVec3Scale(&sAB, s); - ccdVec3Sub2(&tAC, &(simplex->ps[2].v1), &(simplex->ps[0].v1)); - ccdVec3Scale(&tAC, t); - ccdVec3Copy(p1, &(simplex->ps[0].v1)); - ccdVec3Add(p1, &sAB); - ccdVec3Add(p1, &tAC); + // Compute r_Ap. + ccd_vec3_t r_Ap; + ccdVec3Sub2(&r_Ap, p, &(simplex->ps[0].v)); + + // Compute the cross products in the numerators. + ccd_vec3_t r_Ap_cross_r_AC, r_AB_cross_r_Ap; + ccdVec3Cross(&r_Ap_cross_r_AC, &r_Ap, &r_AC); // r_Ap × r_AC + ccdVec3Cross(&r_AB_cross_r_Ap, &r_AB, &r_Ap); // r_AB × r_Ap + + // Compute beta and gamma. + ccd_real_t beta{ccdVec3Dot(&n, &r_Ap_cross_r_AC) / norm_squared_n}; + ccd_real_t gamma{ccdVec3Dot(&n, &r_AB_cross_r_Ap) / norm_squared_n}; + + // Evaluate barycentric interpolation (with the locally defined barycentric + // coordinates). + auto interpolate = [&beta, &gamma](const ccd_vec3_t& r_WA, + const ccd_vec3_t& r_WB, + const ccd_vec3_t& r_WC, + ccd_vec3_t* r_WP) { + // r_WP = r_WA + β * r_AB + γ * r_AC + ccdVec3Copy(r_WP, &r_WA); + + ccd_vec3_t beta_r_AB; + ccdVec3Sub2(&beta_r_AB, &r_WB, &r_WA); + ccdVec3Scale(&beta_r_AB, beta); + ccdVec3Add(r_WP, &beta_r_AB); + + ccd_vec3_t gamma_r_AC; + ccdVec3Sub2(&gamma_r_AC, &r_WC, &r_WA); + ccdVec3Scale(&gamma_r_AC, gamma); + ccdVec3Add(r_WP, &gamma_r_AC); + }; + + if (p1) { + interpolate(simplex->ps[0].v1, simplex->ps[1].v1, simplex->ps[2].v1, p1); } - if (p2) - { - // p2 = A2 + s*A2B2 + t*A2C2 - ccd_vec3_t sAB, tAC; - ccdVec3Sub2(&sAB, &(simplex->ps[1].v2), &(simplex->ps[0].v2)); - ccdVec3Scale(&sAB, s); - ccdVec3Sub2(&tAC, &(simplex->ps[2].v2), &(simplex->ps[0].v2)); - ccdVec3Scale(&tAC, t); - ccdVec3Copy(p2, &(simplex->ps[0].v2)); - ccdVec3Add(p2, &sAB); - ccdVec3Add(p2, &tAC); + + if (p2) { + interpolate(simplex->ps[0].v2, simplex->ps[1].v2, simplex->ps[2].v2, p2); } } } diff --git a/test/narrowphase/detail/convexity_based_algorithm/CMakeLists.txt b/test/narrowphase/detail/convexity_based_algorithm/CMakeLists.txt index 65a6843b3..82f645866 100644 --- a/test/narrowphase/detail/convexity_based_algorithm/CMakeLists.txt +++ b/test/narrowphase/detail/convexity_based_algorithm/CMakeLists.txt @@ -1,5 +1,6 @@ set(tests test_gjk_libccd-inl.cpp + test_gjk_libccd-inl_extractClosestPoints.cpp ) # Build all the tests diff --git a/test/narrowphase/detail/convexity_based_algorithm/test_gjk_libccd-inl_extractClosestPoints.cpp b/test/narrowphase/detail/convexity_based_algorithm/test_gjk_libccd-inl_extractClosestPoints.cpp new file mode 100644 index 000000000..a8b565290 --- /dev/null +++ b/test/narrowphase/detail/convexity_based_algorithm/test_gjk_libccd-inl_extractClosestPoints.cpp @@ -0,0 +1,584 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2018. Toyota Research Institute + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of CNRS-LAAS and AIST nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** @author Sean Curtis */ + +#include "fcl/narrowphase/detail/convexity_based_algorithm/gjk_libccd-inl.h" + +#include +#include + +#include "fcl/common/types.h" + +namespace fcl { +namespace detail { + +using Vector3d = Vector3; + +// Helper functions to work with libccd types -- the ccd_vec3_t is a nuisance. +// This allows working with Eigen Vector3d's instead. + +ccd_vec3_t eigen_to_ccd(const Vector3d& vector) { + ccd_vec3_t out; + out.v[0] = vector(0); + out.v[1] = vector(1); + out.v[2] = vector(2); + return out; +} + +Vector3d ccd_to_eigen(const ccd_vec3_t& vector) { + // TODO(SeanCurtis-TRI): When libccd is *always* double precision, this can + // become: `return Vector3d{vector.v};` + return Vector3d{vector.v[0], vector.v[1], vector.v[2]}; +} + +// Tests that the given vector and libccd vector are the same with in a given +// tolerance. +::testing::AssertionResult are_same(const Vector3d& expected, + const ccd_vec3_t& tested, + double tolerance) { + if (tolerance < 0) { + return ::testing::AssertionFailure() << "Invalid tolerance: " + << tolerance; + } + + Vector3d ccd = ccd_to_eigen(tested); + Vector3d error = (expected - ccd).cwiseAbs(); + + for (int i = 0; i < 3; ++i) { + if (error(i) > tolerance) { + return ::testing::AssertionFailure() + << "Values at index " << i + << " exceed tolerance; " << expected(i) << " vs " + << ccd(i) << ", diff = " << error(i) + << ", tolerance = " << tolerance << "\nexpected = " + << expected.transpose() << "\ntested = " + << ccd.transpose() << "\n|delta| = " + << error.transpose(); + } + } + return ::testing::AssertionSuccess(); +} + +// These are tests on the extractClosestPoints() function (and its underlying +// support functions). +// +// These functions map a simplex and a point of interest on the "surface" of the +// simplex into a pair of points. The simplex is defined by support vertices +// (ccd_support_t). Each support vertex contains three vectors in R3. They +// represent: +// 1. A vertex `v` on the boundary of Minkowski difference of two objects: +// O₁ ⊕ -O₂. It is, by definition the difference of two vertex positions +// on the two objects: `v = v₁ - v₂`. +// 2. The vertex `v₁` on O₁ that contributes to the definition of `v`. +// 3. The vertex `v₂` on O₂ that contributes to the definition of `v`. +// +// extractClosestPoints() supports 1-, 2- and 3-simplices. +// +// The corresponding points are written to output parameters. If the output +// parameters are null, the value is not computed. These tests exercise all +// permutations of the two output parameters. + +// Simple wrapper to facilitate working with Eigen primitives in place of libccd +// primitives. +bool are_coincident(const Vector3d& p, const Vector3d& q) { + return libccd_extension::are_coincident(eigen_to_ccd(p), eigen_to_ccd(q)); +} + +// Tests the `are_coincident` function. +GTEST_TEST(DegenerateGeometry, CoincidentPoints) { + // The coincidence test uses this as the threshold. + const ccd_real_t eps = constants::eps(); + const ccd_real_t almost_eps = eps * 0.75; + const ccd_real_t extra_eps = eps * 1.5; + + Vector3d p{1, 1, 1}; + Vector3d q{p}; + // Exact coincidence at unit scale (down to the last bit) + EXPECT_TRUE(are_coincident(p, q)); + + // Coincidence within a unit-scaled epsilon. + EXPECT_TRUE(are_coincident(p + Vector3d{almost_eps, 0, 0}, q)); + EXPECT_TRUE(are_coincident(p + Vector3d{0, almost_eps, 0}, q)); + EXPECT_TRUE(are_coincident(p + Vector3d{0, 0, almost_eps}, q)); + + // Distinct points just outside a unit-scaled epsilon. + EXPECT_FALSE(are_coincident(p + Vector3d{extra_eps, 0, 0}, q)); + EXPECT_FALSE(are_coincident(p + Vector3d{0, extra_eps, 0}, q)); + EXPECT_FALSE(are_coincident(p + Vector3d{0, 0, extra_eps}, q)); + + // Coincidence within a larger-than-unit-scale scale factor. + const double scale = 100; + p << scale, scale, scale; + q = p; + // Exact coincidence at a larger-than-unit-scale (down to the last bit). + EXPECT_TRUE(are_coincident(p, q)); + + // Coincidence within a larger-than-unit-scale epsilon. + EXPECT_TRUE(are_coincident(p + Vector3d{almost_eps * scale, 0, 0}, q)); + EXPECT_TRUE(are_coincident(p + Vector3d{0, almost_eps * scale, 0}, q)); + EXPECT_TRUE(are_coincident(p + Vector3d{0, 0, almost_eps * scale}, q)); + + // Distinct points just outside a larger-than-unit-scaled epsilon. + EXPECT_FALSE(are_coincident(p + Vector3d{extra_eps * scale, 0, 0}, q)); + EXPECT_FALSE(are_coincident(p + Vector3d{0, extra_eps * scale, 0}, q)); + EXPECT_FALSE(are_coincident(p + Vector3d{0, 0, extra_eps * scale}, q)); + + // Coincidence within a smaller-than-unit-scale scale factor. NOTE: eps + // stays at an *absolute* tolerance as the scale gets smaller. + p << 0.01, 0.01, 0.01; + q = p; + // Exact coincidence at a smaller-than-unit-scale (down to the last bit). + EXPECT_TRUE(are_coincident(p, q)); + + // Coincidence within a smaller-than-unit-scale epsilon. + EXPECT_TRUE(are_coincident(p + Vector3d{almost_eps, 0, 0}, q)); + EXPECT_TRUE(are_coincident(p + Vector3d{0, almost_eps, 0}, q)); + EXPECT_TRUE(are_coincident(p + Vector3d{0, 0, almost_eps}, q)); + + // Distinct points just outside a smaller-than-unit-scaled epsilon. + EXPECT_FALSE(are_coincident(p + Vector3d{extra_eps, 0, 0}, q)); + EXPECT_FALSE(are_coincident(p + Vector3d{0, extra_eps, 0}, q)); + EXPECT_FALSE(are_coincident(p + Vector3d{0, 0, extra_eps}, q)); +} + +// Wrapper to allow invocation of triangle_area_is_zero with eigen primitives. +bool triangle_area_is_zero(const Vector3d& a, const Vector3d& b, + const Vector3d& c) { + return fcl::detail::libccd_extension::triangle_area_is_zero(eigen_to_ccd(a), + eigen_to_ccd(b), + eigen_to_ccd(c)); +} + +// Tests the `triangle_area_is_zero()` function. NOTE: This computation +// makes use of `are_coincident()`. Only a single test of coincident +// vertices is provided (to exercise the code path). The permutations for how +// two points can be considered coincident are done in their own tests. +GTEST_TEST(DegenerateGeometry, ZeroAreaTriangle) { + using std::asin; + Vector3d a{0, 0, 0}; + Vector3d b{1, 0, 0}; + Vector3d c{0, 1, 0}; + + // Viable triangle + EXPECT_FALSE(triangle_area_is_zero(a, b, c)); + + // Triangle with token coincident vertices to make sure it keys on it. + EXPECT_TRUE(triangle_area_is_zero(a, a, c)); + + // Co-linearity tests. + + // Obvious co-linearity + EXPECT_TRUE(triangle_area_is_zero(a, b, 3 * b)); + + // Test co-linearity + // The co-linear threshold is based on the angles of the triangle. If the + // triangle has an angle θ, such that |sin(θ)| < ε, then the triangle is + // considered to be degenerate. This condition implicitly defines an envelope + // around θ. We define θₑ such that sin(θₑ) = ε. The condition |sin(θ)| < ε + // implies |θ| < θₑ. So, if the smallest angle is less than θₑ, the triangle + // will be considered co-linear. If the smallest angle is larger than θₑ, the + // triangle is considered valid. + // + // The test wants to provide proof as to the boundary of the tolerance + // envelope. Therefore, it will construct two triangles. One whose smallest + // angle is θₑ - δ and the other triangle's smallest angle is θₑ + δ, for + // some suitably small δ. + // + // The `triangle_area_is_zero()` function has a second failure condition: + // coincident points. We'll make sure that the points are all far enough away + // from each other that this failure condition won't be triggered. + // + // The triangle consists of three vertices: A, B, C. + // A: [0, 0, 0] (for simplicity). + // B: [1, 1, 1] (again, for simplicity). + // C: We must define C such that the angle ∠CAB is θ = θₑ ± δ. + // + // We'll do it by construction: + // 1. Pick a vector v perpendicular to [1, 1, 1] (e.g., v = [1, 1, -2]). + // 2. C' = B + v̂ * |B|·tan(θ) (with v̂ = v / |v|). This produces a point C' + // that: + // a) forms a triangle where ∠C`AB < θₑ, if θ < θₑ, but + // b) the points B and C' *may* be coincident. + // 3. Move the point C' farther away (in the AC' direction). This preserves + // the angle, but distances B from C'. So, C ≙ s·C' (for some scalar + // s ≠ 1). This trick works because A is the origin and we generally + // assume that |C'| >> ε. + // + // This triangle illustrates the construction (but rotated for ascii art). + // + // A + // /| θ = ∠BAC = ∠BAC' + // / | + // C'/__| + // / / B + // / / + // // + // C + + const ccd_real_t eps = constants::eps(); + const ccd_real_t theta_e = asin(eps); + + a << 0, 0, 0; + b << 1, 1, 1; + const Vector3d v_hat = Vector3d(1, 1, -2).normalized(); + + // Triangle where θ < θₑ. + const ccd_real_t colinear_theta = tan(theta_e * 0.75); + Vector3d c_prime = b + (b.norm() * colinear_theta) * v_hat; + c = c_prime * 2; + EXPECT_FALSE(are_coincident(b, c)); + EXPECT_TRUE(triangle_area_is_zero(a, b, c)); + + // Triangle where θ > θₑ. + const ccd_real_t good_tan = tan(theta_e * 1.5); + c_prime = b + (b.norm() * good_tan) * v_hat; + c = c_prime * 2; + // Confirm the test doesn't report false because the points are coincident. + EXPECT_FALSE(are_coincident(b, c)); + ASSERT_FALSE(triangle_area_is_zero(a, b, c)); +} + +// This class creates a single simplex. It is the Minkowski difference of +// one triangles and a vertex. +class ExtractClosestPoint : public ::testing::Test { + protected: + void SetUp() override { + // Configure a 3-simplex; "last" is the index of the last valid simplex. + // For an n-simplex, last is always n - 1. + simplex_.last = 2; + for (int i = 0; i < 3; ++i) { + const Vector3d minkowski_diff{v0_ - t1_[i]}; + write_support(minkowski_diff, v0_, t1_[i], &simplex_.ps[i]); + } + } + + // Write the three Eigen vector values into a ccd support vector. + static void write_support(const Vector3d& minkowski_diff, const Vector3d& v0, + const Vector3d& v1, ccd_support_t* support) { + support->v = eigen_to_ccd(minkowski_diff); + support->v1 = eigen_to_ccd(v0); + support->v2 = eigen_to_ccd(v1); + } + + // Performs the common work of evaluating extractClosetPoint() on a + // permutation of parameters. + void EvaluateExtractClosestPoint(ccd_simplex_t* simplex, + const Vector3d& p0_expected, + const Vector3d& p1_expected, + ccd_vec3_t* closest, + const char* message) { + using fcl::detail::libccd_extension::extractClosestPoints; + + const Vector3d& dummy1{-1, -2, -3}; + const Vector3d& dummy2{-2, -3, -4}; + ccd_vec3_t p0 = eigen_to_ccd(dummy1); + ccd_vec3_t p1 = eigen_to_ccd(dummy2); + + // Confirm expected solution are not the dummy values. + EXPECT_FALSE(are_same(p0_expected, p0, kTolerance)); + EXPECT_FALSE(are_same(p1_expected, p1, kTolerance)); + + // Test extraction of neither. + EXPECT_NO_THROW(extractClosestPoints(simplex, nullptr, nullptr, closest)) + << message; + + // Test extraction of p0. + EXPECT_NO_THROW(extractClosestPoints(simplex, &p0, nullptr, closest)) + << message; + EXPECT_TRUE(are_same(p0_expected, p0, kTolerance)) << message; + + // Test extraction of p1. + EXPECT_NO_THROW(extractClosestPoints(simplex, nullptr, &p1, closest)) + << message; + EXPECT_TRUE(are_same(p1_expected, p1, kTolerance)) << message; + + // Test extraction of both. + p0 = eigen_to_ccd(dummy1); + p1 = eigen_to_ccd(dummy2); + EXPECT_NO_THROW(extractClosestPoints(simplex, &p0, &p1, closest)) + << message; + EXPECT_TRUE(are_same(p0_expected, p0, kTolerance)) << message; + EXPECT_TRUE(are_same(p1_expected, p1, kTolerance)) << message; + } + + // Perform linear interpolation between points a & b. + // @pre 0 <= s <= 1. + static Vector3d lerp(const Vector3d& a, const Vector3d& b, double s) { + return a * s + b * (1 - s); + }; + + ccd_simplex_t simplex_; + + // Vertex on object 0. + const Vector3d v0_{0.5, 1, 0}; + + // Vertices 0, 1, & 2 for triangle 1. + const Vector3d t1_[3] = {{0, 0.25, 0.25}, {1, 0.25, 0.25}, {0.5, 0.8, 1.0}}; + + // TODO(SeanCurtis-TRI): Change this to 1e-15 when the mac libccd + // single/double precision has been worked out. + const double kTolerance{constants::eps_78()}; +}; + +// Test extraction from a 1-simplex support method. +TEST_F(ExtractClosestPoint, ExtractFrom1SimplexSupport) { + using namespace fcl::detail::libccd_extension; + const Vector3d& dummy1{-1, -2, -3}; + const Vector3d& dummy2{-2, -3, -4}; + // Set up the single support *point*. + ccd_vec3_t p0 = eigen_to_ccd(dummy1); + ccd_vec3_t p1 = eigen_to_ccd(dummy2); + + // Test extraction of neither. + EXPECT_NO_THROW( + extractObjectPointsFromPoint(&simplex_.ps[0], nullptr, nullptr)); + + // Test extraction of p1. + EXPECT_NO_THROW(extractObjectPointsFromPoint(&simplex_.ps[0], &p0, nullptr)); + EXPECT_TRUE(are_same(v0_, p0, kTolerance)); + + // Test extraction of p2. + EXPECT_NO_THROW(extractObjectPointsFromPoint(&simplex_.ps[0], nullptr, &p1)); + EXPECT_TRUE(are_same(t1_[0], p1, kTolerance)); + + // Test extraction of both. + p0 = eigen_to_ccd(dummy1); + p1 = eigen_to_ccd(dummy2); + extractObjectPointsFromPoint(&simplex_.ps[0], &p0, &p1); + EXPECT_TRUE(are_same(v0_, p0, kTolerance)); + EXPECT_TRUE(are_same(t1_[0], p1, kTolerance)); +} + +// Test extraction from a 1-simplex through the extractClosestPoints() method. +TEST_F(ExtractClosestPoint, ExtractFrom1Simplex) { + simplex_.last = 0; + + // NOTE: For a 1-simplex, the closest point isn't used. + ccd_vec3_t closest = eigen_to_ccd({0, 0, 0}); + + EvaluateExtractClosestPoint(&simplex_, v0_, t1_[0], &closest, + "ExtractFrom1Simplex"); +} + +// Test extraction from a 2-simplex support method. +TEST_F(ExtractClosestPoint, ExtractFrom2SimplexSupport) { + using namespace fcl::detail::libccd_extension; + const Vector3d& dummy1{-1, -2, -3}; + const Vector3d& dummy2{-2, -3, -4}; + + ccd_vec3_t p0 = eigen_to_ccd(dummy1); + ccd_vec3_t p1 = eigen_to_ccd(dummy2); + + // The query point we're going to use is a simple linear combination of the + // two end points. + const Vector3d m0 = ccd_to_eigen(simplex_.ps[0].v); + const Vector3d m1 = ccd_to_eigen(simplex_.ps[1].v); + + const double s = 1 / 3.0; + ASSERT_TRUE(s >= 0 && s <= 1); + + ccd_vec3_t closest = eigen_to_ccd(lerp(m0, m1, s)); + const Vector3d p0_expected = v0_; + const Vector3d p1_expected = lerp(t1_[0], t1_[1], s); + + // Test extraction of neither. + EXPECT_NO_THROW(extractObjectPointsFromSegment( + &simplex_.ps[0], &simplex_.ps[1], nullptr, nullptr, &closest)); + + // Test extraction of p1. + EXPECT_NO_THROW(extractObjectPointsFromSegment( + &simplex_.ps[0], &simplex_.ps[1], &p0, nullptr, &closest)); + EXPECT_TRUE(are_same(p0_expected, p0, kTolerance)); + + // Test extraction of p2. + EXPECT_NO_THROW(extractObjectPointsFromSegment( + &simplex_.ps[0], &simplex_.ps[1], nullptr, &p1, &closest)); + EXPECT_TRUE(are_same(p1_expected, p1, kTolerance)); + + // Test extraction of both. + p0 = eigen_to_ccd(dummy1); + p1 = eigen_to_ccd(dummy2); + EXPECT_NO_THROW(extractObjectPointsFromSegment( + &simplex_.ps[0], &simplex_.ps[1], &p0, &p1, &closest)); + EXPECT_TRUE(are_same(p0_expected, p0, kTolerance)); + EXPECT_TRUE(are_same(p1_expected, p1, kTolerance)); +} + +// Test extraction from a 2-simplex through the extractClosestPoints() method. +TEST_F(ExtractClosestPoint, ExtractFrom2Simplex) { + simplex_.last = 1; + + // The query point we're going to use is a simple linear combination of the + // two end points. + const Vector3d m0 = ccd_to_eigen(simplex_.ps[0].v); + const Vector3d m1 = ccd_to_eigen(simplex_.ps[1].v); + + const double s = 1 / 3.0; + ASSERT_TRUE(s >= 0 && s <= 1); + + ccd_vec3_t closest = eigen_to_ccd(lerp(m0, m1, s)); + const Vector3d p0_expected = v0_; + const Vector3d p1_expected = lerp(t1_[0], t1_[1], s); + + EvaluateExtractClosestPoint(&simplex_, p0_expected, p1_expected, &closest, + "ExtractFrom2Simplex"); +} + +// Tests the case where the simplex is a degenerate simplex -- i.e., it is +// actually a line segment. +TEST_F(ExtractClosestPoint, ExtractFrom2SimplexDegenerate) { + simplex_.last = 1; + // NOTE: This exercises the knowledge that the coincidence tolerance is eps. + const ccd_real_t eps = 0.5 * constants::eps(); + + // Copy the first support vertex into the second support vertex and then + // perturb the second a small amount. We add a small amount to the + // x-components of the minkowski sum *and* the object1 vertex. + ccdSupportCopy(&simplex_.ps[1], &simplex_.ps[0]); + simplex_.ps[1].v.v[0] += eps; + simplex_.ps[1].v1.v[0] += eps; + // Confirm that the input and expected answer (v0_) match. + ASSERT_TRUE(are_same(v0_, simplex_.ps[0].v1, kTolerance)); + + // The line segment is now of length 1; the answer should be essentially the + // same as evaluating for a single point. + ccd_vec3_t closest = eigen_to_ccd({0, 0, 0}); + + EvaluateExtractClosestPoint(&simplex_, v0_, t1_[0], &closest, + "ExtractFrom2SimplexDegenerate"); +} + +// Test extraction from a 3-simplex through the extractClosestPoints() method. +// Note: there is no support method for the 3-simplex like there is for the 1- +// and 2-simplices. +TEST_F(ExtractClosestPoint, ExtractFrom3Simplex) { + // Compute a "closest point" based on arbitrary barycentric coordinates. + const double alpha = 0.25; + const double beta = 0.33; + ASSERT_TRUE(alpha >= 0 && alpha <= 1 && beta >= 0 && beta <= 1 && + alpha + beta <= 1); + + const Vector3d m0 = ccd_to_eigen(simplex_.ps[0].v); + const Vector3d m1 = ccd_to_eigen(simplex_.ps[1].v); + const Vector3d m2 = ccd_to_eigen(simplex_.ps[2].v); + + // Interpolate three vertices via barycentric coordinates s1 and s2. + auto interpolate = [](const Vector3d& a, const Vector3d& b, const Vector3d& c, + double s1, double s2) -> Vector3d { + return a * s1 + b * s2 + c * (1 - s1 - s2); + }; + + ccd_vec3_t closest = eigen_to_ccd(interpolate(m0, m1, m2, alpha, beta)); + const Vector3d p0_expected = v0_; + const Vector3d p1_expected = interpolate(t1_[0], t1_[1], t1_[2], alpha, beta); + + EvaluateExtractClosestPoint(&simplex_, p0_expected, p1_expected, &closest, + "ExtractFrom3Simplex"); +} + +// Tests the case where the 3-simplex is degenerate -- the points are considered +// coincident. +TEST_F(ExtractClosestPoint, ExtractFrom3SimplesDegenerateCoincident) { + // This test essentially reproduces the *valid* result from + // ExtractFrom2Simplex(). The difference is that it *claims* to be a triangle. + + // NOTE: This exercises the knowledge that the coincidence tolerance is eps. + const ccd_real_t eps = 0.5 * constants::eps(); + + // Copy the first support vertex into the second and third support vertices + // and then perturb the copies a small amount. We add a small amount to the + // x-components of the minkowski sum *and* the object1 vertex. + for (auto i : {1, 2}) { + ccdSupportCopy(&simplex_.ps[i], &simplex_.ps[0]); + simplex_.ps[i].v.v[0] += eps; + simplex_.ps[i].v1.v[0] += eps; + } + // Confirm that the input and expected answer (v0_) match. + ASSERT_TRUE(are_same(v0_, simplex_.ps[0].v1, kTolerance)); + + // The triangle has zero area because the vertices are all coincident; + // the answer should be essentially the same as evaluating for a single point. + ccd_vec3_t closest = eigen_to_ccd({0, 0, 0}); + + EvaluateExtractClosestPoint(&simplex_, v0_, t1_[0], &closest, + "ExtractFrom3SimplexDegenerateCoincident"); +} + +// Tests the case where the 3-simplex is degenerate -- the points are considered +// co-linear. +TEST_F(ExtractClosestPoint, ExtractFrom3SimplesDegenerateColinear) { + // The query point we're going to use is a simple linear combination of the + // v0 and v1 (from the minkowski sum). That means the points on the triangle + // should likewise be a combination of v0 and v1 from each shape. + const Vector3d m0 = ccd_to_eigen(simplex_.ps[0].v); + const Vector3d m1 = ccd_to_eigen(simplex_.ps[1].v); + + const double s = 1 / 3.0; + ASSERT_TRUE(s >= 0 && s <= 1); + + ccd_vec3_t closest = eigen_to_ccd(lerp(m0, m1, s)); + const Vector3d p0_expected = v0_; + const Vector3d p1_expected = lerp(t1_[0], t1_[1], s); + + // Now set up co-linear configuration. Vertex 2 will simply be a linear + // combination of vertex 0 and vertex 1. + // v2 = 2 * (v1 - v0) + v0 = 2 * v1 - v0. + // Note: this puts v2 on the same line, but not inside the line segment + // spanned by v0 and v1. Because the closest point lies on the segment, this + // confirms that arbitrarily extending the degeneracy doesn't change the + // answer. + auto linearly_combine = [](const ccd_vec3_t& a, const ccd_vec3_t& b, + ccd_vec3_t* dst) { + auto A = ccd_to_eigen(a); + auto B = ccd_to_eigen(b); + auto C = 2 * B - A; + *dst = eigen_to_ccd(C); + }; + linearly_combine(simplex_.ps[0].v, simplex_.ps[1].v, &simplex_.ps[2].v); + linearly_combine(simplex_.ps[0].v1, simplex_.ps[1].v1, &simplex_.ps[2].v1); + linearly_combine(simplex_.ps[0].v2, simplex_.ps[1].v2, &simplex_.ps[2].v2); + + EvaluateExtractClosestPoint(&simplex_, p0_expected, p1_expected, &closest, + "ExtractFrom3SimplexDegenerateColinear"); +} + +} // namespace detail +} // namespace fcl + +//============================================================================== +int main(int argc, char* argv[]) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/test/test_fcl_distance.cpp b/test/test_fcl_distance.cpp index 98b02da0e..d49dc7e1f 100644 --- a/test/test_fcl_distance.cpp +++ b/test/test_fcl_distance.cpp @@ -41,6 +41,9 @@ #include "test_fcl_utility.h" #include "fcl_resources/config.h" +// TODO(SeanCurtis-TRI): A file called `test_fcl_distance.cpp` should *not* have +// collision tests. + using namespace fcl; bool verbose = false; @@ -305,6 +308,50 @@ GTEST_TEST(FCL_DISTANCE, mesh_distance) test_mesh_distance(); } +template +void NearestPointFromDegenerateSimplex() { + // Tests a historical bug. In certain configurations, the distance query + // would terminate with a degenerate 3-simplex; the triangle was actually a + // line segment. As a result, nearest points were populated with NaN values. + // See https://github.com/flexible-collision-library/fcl/issues/293 for + // more discussion. + DistanceResult result; + DistanceRequest request; + request.enable_nearest_points = true; + request.gjk_solver_type = GJKSolverType::GST_LIBCCD; + + // These values were extracted from a real-world scenario that produced NaNs. + std::shared_ptr> box_geometry_1( + new Box(2.750000, 6.000000, 0.050000)); + std::shared_ptr> box_geometry_2( + new Box(0.424000, 0.150000, 0.168600)); + CollisionObject box_object_1( + box_geometry_1, Eigen::Quaterniond(1, 0, 0, 0).matrix(), + Eigen::Vector3d(1.625000, 0.000000, 0.500000)); + CollisionObject box_object_2( + box_geometry_2, + Eigen::Quaterniond(0.672811, 0.340674, 0.155066, 0.638138).matrix(), + Eigen::Vector3d(0.192074, -0.277870, 0.273546)); + + EXPECT_NO_THROW(fcl::distance(&box_object_1, &box_object_2, request, result)); + + // The values here have been visually confirmed from the computation. + S expected_dist{0.053516322172152138}; + Vector3 expected_p0{-1.375, -0.098881502700918666, -0.025000000000000022}; + Vector3 expected_p1{0.21199965773384655, 0.074999692703297122, + 0.084299993303443954}; + EXPECT_TRUE(nearlyEqual(result.nearest_points[0], expected_p0)); + EXPECT_TRUE(nearlyEqual(result.nearest_points[1], expected_p1)); + // TODO(SeanCurtis-TRI): Change this tolerance to constants::eps_34() when + // the mac single/double libccd problem is resolved. + EXPECT_NEAR(expected_dist, result.min_distance, + constants::eps_34()); +} + +GTEST_TEST(FCL_DISTANCE, NearestPointFromDegenerateSimplex) { + NearestPointFromDegenerateSimplex(); +} + template void distance_Test_Oriented(const Transform3& tf, const std::vector>& vertices1, const std::vector& triangles1,