image: pointcloudlibrary/env:20.04
- container: env2204
image: pointcloudlibrary/env:22.04
- - container: env2304
- image: pointcloudlibrary/env:23.04
+ - container: env2404
+ image: pointcloudlibrary/env:24.04
stages:
- stage: formatting
CC: gcc
CXX: g++
BUILD_GPU: ON
- CMAKE_ARGS: '-DPCL_WARNINGS_ARE_ERRORS=ON'
- 23.04 GCC: # latest Ubuntu
- CONTAINER: env2304
+ CMAKE_ARGS: '-DPCL_WARNINGS_ARE_ERRORS=ON -DCMAKE_CXX_STANDARD=14 -DCMAKE_CUDA_STANDARD=14'
+ 24.04 GCC: # latest Ubuntu
+ CONTAINER: env2404
CC: gcc
CXX: g++
BUILD_GPU: ON
+ CMAKE_ARGS: '-DCMAKE_CXX_STANDARD=17 -DCMAKE_CUDA_STANDARD=17'
container: $[ variables['CONTAINER'] ]
timeoutInMinutes: 0
variables:
vmImage: '$(VMIMAGE)'
strategy:
matrix:
- Monterey 12:
- VMIMAGE: 'macOS-12'
- OSX_VERSION: '12'
Ventura 13:
VMIMAGE: 'macOS-13'
OSX_VERSION: '13'
+ Sonoma 14:
+ VMIMAGE: 'macOS-14'
+ OSX_VERSION: '14'
timeoutInMinutes: 0
variables:
BUILD_DIR: '$(Agent.WorkFolder)/build'
# find the commit hash on a quick non-forced update too
fetchDepth: 10
- script: |
- brew install cmake pkg-config boost eigen flann glew libusb qhull vtk glew qt5 libpcap libomp google-benchmark
+ brew install cmake pkg-config boost eigen flann glew libusb qhull vtk glew freeglut qt5 libpcap libomp suite-sparse zlib google-benchmark cjson
brew install brewsci/science/openni
git clone https://github.com/abseil/googletest.git $GOOGLE_TEST_DIR # the official endpoint changed to abseil/googletest
cd $GOOGLE_TEST_DIR && git checkout release-1.8.1
-DGTEST_INCLUDE_DIR="$GOOGLE_TEST_DIR/googletest/include" \
-DQt5_DIR=/usr/local/opt/qt5/lib/cmake/Qt5 \
-DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DBUILD_surface_on_nurbs=ON -DUSE_UMFPACK=ON \
-DBUILD_simulation=ON \
-DBUILD_global_tests=ON \
-DBUILD_benchmarks=ON \
-DBUILD_GPU=$BUILD_GPU \
-DBUILD_cuda_io=$BUILD_GPU \
-DBUILD_gpu_tracking=$BUILD_GPU \
- -DBUILD_gpu_surface=$BUILD_GPU \
- -DBUILD_gpu_people=$BUILD_GPU
+ -DBUILD_gpu_surface=$BUILD_GPU
# Temporary fix to ensure no tests are skipped
cmake $(Build.SourcesDirectory)
displayName: 'CMake Configuration'
UBUNTU_VERSION: 20.04
VTK_VERSION: 7
TAG: 20.04
- # Test the latest LTS version of Ubuntu
Ubuntu 22.04:
UBUNTU_VERSION: 22.04
VTK_VERSION: 9
TAG: 22.04
- Ubuntu 23.04:
- UBUNTU_VERSION: 23.04
+ # Test the latest LTS version of Ubuntu
+ Ubuntu 24.04:
+ UBUNTU_VERSION: 24.04
+ VTK_VERSION: 9
+ TAG: 24.04
+ # Test the latest version of Ubuntu (non LTS)
+ Ubuntu 24.10:
+ UBUNTU_VERSION: 24.10
USE_LATEST_CMAKE: true
VTK_VERSION: 9
- TAG: 23.04
+ TAG: 24.10
steps:
- script: |
dockerBuildArgs="" ; \
PLATFORM: x86
TAG: windows2022-x86
GENERATOR: "'Visual Studio 16 2019' -A Win32"
- VCPKGCOMMIT: 8eb57355a4ffb410a2e94c07b4dca2dffbee8e50
+ VCPKGCOMMIT: f7423ee180c4b7f40d43402c2feb3859161ef625
Winx64:
PLATFORM: x64
TAG: windows2022-x64
vmImage: 'ubuntu-latest'
strategy:
matrix:
- # Only ROS Melodic works with current releases of PCL
- # Kinetic has stopped supporting any version later than PCL 1.7.2
- ROS Melodic:
- flavor: "melodic"
+ ROS Noetic:
+ flavor: "noetic"
variables:
tag: "ros"
steps:
steps:
- script: |
POSSIBLE_VTK_VERSION=("9") \
- POSSIBLE_CMAKE_CXX_STANDARD=("14" "17" "20") \
+ POSSIBLE_CMAKE_CXX_STANDARD=("14" "17" "20" "23") \
POSSIBLE_CMAKE_BUILD_TYPE=("None" "Debug" "Release" "RelWithDebInfo" "MinSizeRel") \
- POSSIBLE_COMPILER_PACKAGE=("g++" "g++-10" "g++-11" "g++-12" "g++-13" "clang libomp-dev" "clang-13 libomp-13-dev" "clang-14 libomp-14-dev" "clang-15 libomp-15-dev" "clang-16 libomp-16-dev" "clang-17 libomp-17-dev") \
- POSSIBLE_CMAKE_C_COMPILER=("gcc" "gcc-10" "gcc-11" "gcc-12" "gcc-13" "clang" "clang-13" "clang-14" "clang-15" "clang-16" "clang-17") \
- POSSIBLE_CMAKE_CXX_COMPILER=("g++" "g++-10" "g++-11" "g++-12" "g++-13" "clang++" "clang++-13" "clang++-14" "clang++-15" "clang++-16" "clang++-17") \
+ POSSIBLE_COMPILER_PACKAGE=("g++" "g++-11" "g++-12" "g++-13" "g++-14" "clang libomp-dev" "clang-14 libomp-14-dev" "clang-15 libomp-15-dev" "clang-16 libomp-16-dev" "clang-17 libomp-17-dev" "clang-18 libomp-18-dev" "clang-19 libomp-19-dev") \
+ POSSIBLE_CMAKE_C_COMPILER=("gcc" "gcc-11" "gcc-12" "gcc-13" "gcc-14" "clang" "clang-14" "clang-15" "clang-16" "clang-17" "clang-18" "clang-19") \
+ POSSIBLE_CMAKE_CXX_COMPILER=("g++" "g++-11" "g++-12" "g++-13" "g++-14" "clang++" "clang++-14" "clang++-15" "clang++-16" "clang++-17" "clang++-18" "clang++-19") \
CHOSEN_COMPILER=$[RANDOM%${#POSSIBLE_COMPILER_PACKAGE[@]}] \
- dockerBuildArgs="--build-arg VTK_VERSION=${POSSIBLE_VTK_VERSION[$[RANDOM%${#POSSIBLE_VTK_VERSION[@]}]]} --build-arg CMAKE_CXX_STANDARD=${POSSIBLE_CMAKE_CXX_STANDARD[$[RANDOM%${#POSSIBLE_CMAKE_CXX_STANDARD[@]}]]} --build-arg CMAKE_BUILD_TYPE=${POSSIBLE_CMAKE_BUILD_TYPE[$[RANDOM%${#POSSIBLE_CMAKE_BUILD_TYPE[@]}]]} --build-arg COMPILER_PACKAGE=${POSSIBLE_COMPILER_PACKAGE[$CHOSEN_COMPILER]} --build-arg CMAKE_C_COMPILER=${POSSIBLE_CMAKE_C_COMPILER[$CHOSEN_COMPILER]} --build-arg CMAKE_CXX_COMPILER=${POSSIBLE_CMAKE_CXX_COMPILER[$CHOSEN_COMPILER]}" ; \
+ dockerBuildArgs="--build-arg VTK_VERSION=${POSSIBLE_VTK_VERSION[$[RANDOM%${#POSSIBLE_VTK_VERSION[@]}]]} --build-arg CMAKE_CXX_STANDARD=${POSSIBLE_CMAKE_CXX_STANDARD[$[RANDOM%${#POSSIBLE_CMAKE_CXX_STANDARD[@]}]]} --build-arg CMAKE_BUILD_TYPE=${POSSIBLE_CMAKE_BUILD_TYPE[$[RANDOM%${#POSSIBLE_CMAKE_BUILD_TYPE[@]}]]} --build-arg COMPILER_PACKAGE=\"${POSSIBLE_COMPILER_PACKAGE[$CHOSEN_COMPILER]}\" --build-arg CMAKE_C_COMPILER=${POSSIBLE_CMAKE_C_COMPILER[$CHOSEN_COMPILER]} --build-arg CMAKE_CXX_COMPILER=${POSSIBLE_CMAKE_CXX_COMPILER[$CHOSEN_COMPILER]}" ; \
echo "##vso[task.setvariable variable=dockerBuildArgs]$dockerBuildArgs"
displayName: "Prepare docker build arguments"
- task: Docker@2
---
Checks: >
-*,
+ bugprone-copy-constructor-init,
+ bugprone-macro-parentheses,
cppcoreguidelines-pro-type-cstyle-cast,
cppcoreguidelines-pro-type-static-cast-downcast,
google-readability-casting,
libboost-filesystem-dev \
libboost-iostreams-dev \
libboost-system-dev \
+ libcjson-dev \
libflann-dev \
libglew-dev \
+ freeglut3-dev \
libgtest-dev \
libomp-dev \
libopenni-dev \
# flavor appears twice, once for the FOR, once for the contents since
# Dockerfile seems to reset arguments after a FOR appears
-ARG flavor="melodic"
+ARG flavor="noetic"
FROM ros:${flavor}-robot
-ARG flavor="melodic"
+ARG flavor="noetic"
ARG workspace="/root/catkin_ws"
COPY ${flavor}_rosinstall.yaml ${workspace}/src/.rosinstall
RUN sed -i "s/^# deb-src/deb-src/" /etc/apt/sources.list \
&& apt update \
&& apt install -y \
+ git \
+ libboost-iostreams-dev \
libeigen3-dev \
libflann-dev \
libqhull-dev \
- python-pip \
+ python3-pip \
ros-${flavor}-tf2-eigen \
&& pip install -U pip \
&& pip install catkin_tools \
--- /dev/null
+- git:
+ local-name: 'noetic/perception_pcl'
+ uri: 'https://github.com/ros-perception/perception_pcl'
+ version: 'melodic-devel'
+- git:
+ local-name: 'noetic/pcl_msgs'
+ uri: 'https://github.com/ros-perception/pcl_msgs'
+ version: 'noetic-devel'
+- git:
+ local-name: 'pcl'
+ uri: 'https://github.com/PointCloudLibrary/pcl'
+ version: 'master'
"C:\TEMP\VisualStudio.chman", `
"--add", `
"Microsoft.VisualStudio.Workload.VCTools", `
- "Microsoft.Net.Component.4.8.SDK", `
+ "Microsoft.Net.Component.4.8.SDK", `
"Microsoft.VisualStudio.Component.VC.ATLMFC", `
"--includeRecommended" `
-Wait -PassThru; `
del c:\temp\vs_buildtools.exe;
-# VCPKG requires update if Cmake version is > 3.20.5 see: https://github.com/microsoft/vcpkg-tool/pull/107
RUN iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1')); `
- choco install cmake --version=3.20.5 --installargs 'ADD_CMAKE_TO_PATH=System' -y --no-progress; `
+ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' -y --no-progress; `
choco install git -y --no-progress
RUN git clone https://github.com/microsoft/vcpkg.git; cd vcpkg; git checkout $Env:VCPKGCOMMIT;
# To explicit set VCPKG to only build Release version of the libraries.
COPY $PLATFORM'-windows-rel.cmake' 'c:\vcpkg\triplets\'$PLATFORM'-windows-rel.cmake'
-RUN cd .\vcpkg; `
- .\bootstrap-vcpkg.bat; `
- .\vcpkg install boost flann eigen3 qhull vtk[qt,opengl] gtest benchmark openni2 --triplet $Env:PLATFORM-windows-rel --host-triplet $Env:PLATFORM-windows-rel --clean-after-build --x-buildtrees-root=C:\b; `
+RUN cd .\vcpkg; `
+ .\bootstrap-vcpkg.bat; `
+ .\vcpkg install `
+ boost-accumulators `
+ boost-asio `
+ boost-algorithm `
+ boost-filesystem `
+ boost-format `
+ boost-graph `
+ boost-interprocess `
+ boost-iostreams `
+ boost-math `
+ boost-ptr-container `
+ boost-signals2 `
+ boost-sort `
+ boost-uuid `
+ boost-cmake `
+ flann `
+ eigen3 `
+ qhull `
+ glew `
+ freeglut `
+ vtk[qt,opengl] `
+ gtest `
+ benchmark `
+ openni2 `
+ cjson `
+ --triplet $Env:PLATFORM-windows-rel --host-triplet $Env:PLATFORM-windows-rel --clean-after-build --x-buildtrees-root=C:\b;
tidy:
runs-on: ubuntu-latest
container:
- image: pointcloudlibrary/env:22.04
+ image: pointcloudlibrary/env:24.04
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Run clang-tidy
run: |
bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
- cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_COMPILER=/usr/bin/clang-14 -DCMAKE_C_COMPILER=/usr/bin/clang-14 . \
+ cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_CXX_COMPILER=/usr/bin/clang-18 -DCMAKE_C_COMPILER=/usr/bin/clang-18 . \
-DBUILD_benchmarks=ON \
-DBUILD_examples=ON \
-DBUILD_simulation=ON \
set(SUBSYS_DESC "Point cloud 2d")
set(SUBSYS_DEPS common filters)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
-PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS vtk)
+PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_ADD_DOC("${SUBSYS_NAME}")
"include/pcl/${SUBSYS_NAME}/impl/morphology.hpp"
)
-if(${VTK_FOUND})
- set(VTK_IO_TARGET_LINK_LIBRARIES vtkCommon vtkWidgets vtkIO vtkImaging)
-endif()
-
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
set(LIB_NAME "pcl_${SUBSYS_NAME}")
+PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} INCLUDES ${incs} ${impl_incs})
+target_link_libraries(${LIB_NAME} INTERFACE pcl_filters)
+
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} HEADER_ONLY)
#Install include files
const int height = edges.height;
const int width = edges.width;
- maxima.height = height;
- maxima.width = width;
- maxima.resize(height * width);
+ maxima.resize(edges.width, edges.height);
for (auto& point : maxima)
point.intensity = 0.0f;
# ChangeList
+## = 1.15.0 (22 February 2025) =
+
+### Notable changes
+
+**New features** *added to PCL*
+
+* **[filters]** UniformSampling Filter: add option for a minimum number of points required per voxel [[#5968](https://github.com/PointCloudLibrary/pcl/pull/5968)]
+* **[sample_consensus]** Torus ransac model [[#5816](https://github.com/PointCloudLibrary/pcl/pull/5816)]
+* **[common]** Allow type conversion in fromPCLPointCloud2 [[#6059](https://github.com/PointCloudLibrary/pcl/pull/6059)]
+* **[segmentation]** ExtractPolygonalPrismData and ConcaveHull bugfix [[#5168](https://github.com/PointCloudLibrary/pcl/pull/5168)]
+* Allow hidden visibility default on gcc/clang [[#5779](https://github.com/PointCloudLibrary/pcl/pull/5779)]
+
+**Deprecation** *of public APIs, scheduled to be removed after two minor releases*
+
+* **[registration]** Fix spelling error in pcl::NormalDistributionsTransform getOulierRatio/setOulierRatio [[#6140](https://github.com/PointCloudLibrary/pcl/pull/6140)]
+
+**Removal** *of the public APIs deprecated in previous releases*
+
+* Remove Deprecated Code for 1.15.0 release [[#6040](https://github.com/PointCloudLibrary/pcl/pull/6040)]
+
+**Behavior changes** *in classes, apps, or tools*
+
+* **[cmake]** Compile PCL as C++17 by default, switching back to C++14 currently still possible [[#6201](https://github.com/PointCloudLibrary/pcl/pull/6201)]
+
+**ABI changes** *that are still API compatible*
+
+* **[recognition]** C++17 filesystem for recognition module [[#5935](https://github.com/PointCloudLibrary/pcl/pull/5935)]
+* **[registration]** Add OMP based Multi-threading to IterativeClosestPoint [[#4520](https://github.com/PointCloudLibrary/pcl/pull/4520)]
+* **[segmentation]** ExtractPolygonalPrismData and ConcaveHull bugfix [[#5168](https://github.com/PointCloudLibrary/pcl/pull/5168)]
+
+### Changes grouped by module
+
+#### CMake:
+
+* Remove global includes and use target include for Apps [[#6009](https://github.com/PointCloudLibrary/pcl/pull/6009)]
+* Give users more control regarding with which point types classes are … [[#6062](https://github.com/PointCloudLibrary/pcl/pull/6062)]
+* Fix finding lib when install with relwithdebinfo or minsizerel [[#6089](https://github.com/PointCloudLibrary/pcl/pull/6089)]
+* Use system-installed cJSON, if available [[#6084](https://github.com/PointCloudLibrary/pcl/pull/6084)]
+* Remove broken support for external metslib. [[#5959](https://github.com/PointCloudLibrary/pcl/pull/5959)]
+* Require at least Boost 1.71, support Boost 1.86, switch to BoostConfig.cmake [[#6105](https://github.com/PointCloudLibrary/pcl/pull/6105)]
+* Remove global includes and use target include for GPU/CUDA [[#6010](https://github.com/PointCloudLibrary/pcl/pull/6010)]
+* Remove findGLEW and FindOpenMP as they are already present in CMake. Update minimum required cmake to 3.16.3 [[#6100](https://github.com/PointCloudLibrary/pcl/pull/6100)]
+* PCLConfig.cmake.in: Handle potentially empty ${LIB} variable [[#6134](https://github.com/PointCloudLibrary/pcl/pull/6134)]
+* Replace make_directory (deprecated since CMake version 3.0) [[#6150](https://github.com/PointCloudLibrary/pcl/pull/6150)]
+* Updates to CXX flags and removal of cmake checks less than 3.16. [[#6144](https://github.com/PointCloudLibrary/pcl/pull/6144)]
+* Fix OpenMP on macOS/homebrew [[#6114](https://github.com/PointCloudLibrary/pcl/pull/6114)]
+* Preparation for Boost 1.87, flag change for GCC 14 [[#6166](https://github.com/PointCloudLibrary/pcl/pull/6166)]
+* Print warning if incompatible alignment option chosen [[#6184](https://github.com/PointCloudLibrary/pcl/pull/6184)]
+* **[behavior change]** Compile PCL as C++17 by default, switching back to C++14 currently still possible [[#6201](https://github.com/PointCloudLibrary/pcl/pull/6201)]
+* Fix linking with pcl_io_ply [[#6205](https://github.com/PointCloudLibrary/pcl/pull/6205)]
+* Remove global includes from PCL_SUBSYS_DEPEND in PCL_TARGETS and adjust accordingly [[#6013](https://github.com/PointCloudLibrary/pcl/pull/6013)]
+
+#### libpcl_common:
+
+* **[new feature]** Allow type conversion in fromPCLPointCloud2 [[#6059](https://github.com/PointCloudLibrary/pcl/pull/6059)]
+* change `sprintf` to `snprintf` to suppress deprecated warning on macOS [[#6102](https://github.com/PointCloudLibrary/pcl/pull/6102)]
+* Remove "No data to copy" warning in conversions.h [[#6108](https://github.com/PointCloudLibrary/pcl/pull/6108)]
+* add PCL_HIGH convenience macro [[#6136](https://github.com/PointCloudLibrary/pcl/pull/6136)]
+* Remove unnecessary PCL_EXPORTS in common [[#6141](https://github.com/PointCloudLibrary/pcl/pull/6141)]
+* Print warning if incompatible alignment option chosen [[#6184](https://github.com/PointCloudLibrary/pcl/pull/6184)]
+* Update `__func__` and `__PRETTY_FUNCTION__` defines for MSVC [[#6222](https://github.com/PointCloudLibrary/pcl/pull/6222)]
+
+#### libpcl_features:
+
+* Added parallel implementation of PrincipalCurvaturesEstimation [[#6048](https://github.com/PointCloudLibrary/pcl/pull/6048)]
+* :bug: Fix RoPS total area [[#6187](https://github.com/PointCloudLibrary/pcl/pull/6187)]
+
+#### libpcl_filters:
+
+* **[new feature]** UniformSampling Filter: add option for a minimum number of points required per voxel [[#5968](https://github.com/PointCloudLibrary/pcl/pull/5968)]
+* Fix issues related to nan/invalid points [[#6036](https://github.com/PointCloudLibrary/pcl/pull/6036)]
+* Make UniformSampling inherit from FilterIndices instead of Filter [[#6064](https://github.com/PointCloudLibrary/pcl/pull/6064)]
+* Fix an integer overflow issue in the PassThrough filter. [[#6097](https://github.com/PointCloudLibrary/pcl/pull/6097)]
+* Convolution3D: use dynamic schedule for OpenMP [[#6155](https://github.com/PointCloudLibrary/pcl/pull/6155)]
+* Add OpenMP to radius outlier removal [[#6182](https://github.com/PointCloudLibrary/pcl/pull/6182)]
+
+#### libpcl_gpu:
+
+* fix int type overflow by casting int to size_t [[#6058](https://github.com/PointCloudLibrary/pcl/pull/6058)]
+
+#### libpcl_io:
+
+* Fix problem with normals in obj loader [[#6047](https://github.com/PointCloudLibrary/pcl/pull/6047)]
+* fix saveRangeImagePlanarFilePNG [[#6095](https://github.com/PointCloudLibrary/pcl/pull/6095)]
+* fix pcd io small probability bug [[#6122](https://github.com/PointCloudLibrary/pcl/pull/6122)]
+* Preparation for Boost 1.87, flag change for GCC 14 [[#6166](https://github.com/PointCloudLibrary/pcl/pull/6166)]
+* Fix linking with pcl_io_ply [[#6205](https://github.com/PointCloudLibrary/pcl/pull/6205)]
+* Fix regression in OBJ loader [[#6228](https://github.com/PointCloudLibrary/pcl/pull/6228)]
+
+#### libpcl_octree:
+
+* Faster octree nearestKSearch [[#6037](https://github.com/PointCloudLibrary/pcl/pull/6037)]
+* Faster octree radiusSearch [[#6045](https://github.com/PointCloudLibrary/pcl/pull/6045)]
+
+#### libpcl_outofcore:
+
+* Use system-installed cJSON, if available [[#6084](https://github.com/PointCloudLibrary/pcl/pull/6084)]
+
+#### libpcl_recognition:
+
+* **[ABI break]** C++17 filesystem for recognition module [[#5935](https://github.com/PointCloudLibrary/pcl/pull/5935)]
+* add border type setting in linemod gauss step [[#6103](https://github.com/PointCloudLibrary/pcl/pull/6103)]
+
+#### libpcl_registration:
+
+* **[ABI break]** Add OMP based Multi-threading to IterativeClosestPoint [[#4520](https://github.com/PointCloudLibrary/pcl/pull/4520)]
+* GICP: parallel covariance computation [[#6046](https://github.com/PointCloudLibrary/pcl/pull/6046)]
+* Fixes and improvements for FPCS and KFPCS [[#6073](https://github.com/PointCloudLibrary/pcl/pull/6073)]
+* **[deprecation]** Fix spelling error in pcl::NormalDistributionsTransform getOulierRatio/setOulierRatio [[#6140](https://github.com/PointCloudLibrary/pcl/pull/6140)]
+* fix registration iteration 1 mse print [[#6198](https://github.com/PointCloudLibrary/pcl/pull/6198)]
+* Speed up PPFRegistration by using a hash function with fewer collisions [[#6223](https://github.com/PointCloudLibrary/pcl/pull/6223)]
+
+#### libpcl_sample_consensus:
+
+* **[new feature]** Torus ransac model [[#5816](https://github.com/PointCloudLibrary/pcl/pull/5816)]
+* Circle3D: fix optimizeModelCoefficients use an uninitialized var [[#6088](https://github.com/PointCloudLibrary/pcl/pull/6088)]
+* Set better default for RRANSAC and RMSAC [[#6158](https://github.com/PointCloudLibrary/pcl/pull/6158)]
+* Circle3D: Changed segmentation collinear check precision from float to double. [[#6175](https://github.com/PointCloudLibrary/pcl/pull/6175)]
+
+#### libpcl_search:
+
+* Faster organized radius search [[#6160](https://github.com/PointCloudLibrary/pcl/pull/6160)]
+
+#### libpcl_segmentation:
+
+* fix pcl::squaredEuclideanDistance already defined in grabcut_2d.cpp.obj [[#5985](https://github.com/PointCloudLibrary/pcl/pull/5985)]
+* **[new feature][ABI break]** ExtractPolygonalPrismData and ConcaveHull bugfix [[#5168](https://github.com/PointCloudLibrary/pcl/pull/5168)]
+* fix: Initialization with correct signedness [[#6111](https://github.com/PointCloudLibrary/pcl/pull/6111)]
+
+#### libpcl_surface:
+
+* Remove deprecated readdir_r [[#6035](https://github.com/PointCloudLibrary/pcl/pull/6035)]
+* Update opennurbs VS issue (opennurbs_lookup.cpp) [[#6143](https://github.com/PointCloudLibrary/pcl/pull/6143)]
+* Add missing include for implementation header in bilateral_upsampling.h [[#6203](https://github.com/PointCloudLibrary/pcl/pull/6203)]
+
+#### libpcl_visualization:
+
+* Fix issues related to nan/invalid points [[#6036](https://github.com/PointCloudLibrary/pcl/pull/6036)]
+* Fix boost hash data type [[#6053](https://github.com/PointCloudLibrary/pcl/pull/6053)]
+
+#### PCL Apps:
+
+* Preparation for Boost 1.87, flag change for GCC 14 [[#6166](https://github.com/PointCloudLibrary/pcl/pull/6166)]
+
+#### PCL Tools:
+
+* compile missing tools/bilateral_upsampling [[#6112](https://github.com/PointCloudLibrary/pcl/pull/6112)]
+
+#### CI:
+
+* Update windows docker boost and cmake [[#6033](https://github.com/PointCloudLibrary/pcl/pull/6033)]
+* CI: Install cjson [[#6082](https://github.com/PointCloudLibrary/pcl/pull/6082)]
+* Update Windows x86 docker, install boost-cmake [[#6109](https://github.com/PointCloudLibrary/pcl/pull/6109)]
+* Update clang-tidy github action [[#6116](https://github.com/PointCloudLibrary/pcl/pull/6116)]
+* Update macOS on CI (macOS 12 is now unmaintained) [[#6147](https://github.com/PointCloudLibrary/pcl/pull/6147)]
+* CI updates [[#6153](https://github.com/PointCloudLibrary/pcl/pull/6153)]
+* Fix OpenMP on macOS/homebrew [[#6114](https://github.com/PointCloudLibrary/pcl/pull/6114)]
+
+## = 1.14.1 (03 May 2024) =
+
+### Notable changes
+
+**New features** *added to PCL*
+
+* **[cmake]** Make Boost filesystem optional for C++17 [[#5937](https://github.com/PointCloudLibrary/pcl/pull/5937)]
+* **[common]** Enhance toPCLPointCloud2 to remove padding on request [[#5913](https://github.com/PointCloudLibrary/pcl/pull/5913)]
+* **[io]** Enable writing data in binary PLY format to std::ostream [[#5975](https://github.com/PointCloudLibrary/pcl/pull/5975)]
+
+### Changes grouped by module
+
+#### CMake:
+
+* Switch latest Ubuntu CI to C++17 [[#5931](https://github.com/PointCloudLibrary/pcl/pull/5931)]
+* **[new feature]** Make Boost filesystem optional for C++17 [[#5937](https://github.com/PointCloudLibrary/pcl/pull/5937)]
+* Add OpenGL_GLU as external dependency. [[#5963](https://github.com/PointCloudLibrary/pcl/pull/5963)]
+* Preparation for default hidden visibility on gcc [[#5970](https://github.com/PointCloudLibrary/pcl/pull/5970)]
+* Cmake cuda find_package cuda is deprecated. [[#5953](https://github.com/PointCloudLibrary/pcl/pull/5953)]
+* fix build with CUDA [[#5976](https://github.com/PointCloudLibrary/pcl/pull/5976)]
+* Enable compatibility with Boost 1.85.0 [[#6014](https://github.com/PointCloudLibrary/pcl/pull/6014)]
+
+#### libpcl_common:
+
+* Rename variables with reserved names (PointXYZLAB) [[#5951](https://github.com/PointCloudLibrary/pcl/pull/5951)]
+* **[new feature]** Enhance toPCLPointCloud2 to remove padding on request [[#5913](https://github.com/PointCloudLibrary/pcl/pull/5913)]
+* Fix behaviour of eigen33 function if smallest eigenvalue is not unique [[#5956](https://github.com/PointCloudLibrary/pcl/pull/5956)]
+* Add option to choose boost filesystem over std filesystem [[#6005](https://github.com/PointCloudLibrary/pcl/pull/6005)]
+
+#### libpcl_filters:
+
+* Fix Bug in NormalSpaceSampling::findBin() [[#5936](https://github.com/PointCloudLibrary/pcl/pull/5936)]
+* VoxelGridOcclusionEstimation should always round down to go from coordinates to voxel indices. [[#5942](https://github.com/PointCloudLibrary/pcl/pull/5942)]
+* StatisticalOutlierRemoval: fix potential container overflow read [[#5980](https://github.com/PointCloudLibrary/pcl/pull/5980)]
+* fixing ignored `pcl::Indices` in `VoxelGrid` of `PCLPointCloud2` [[#5979](https://github.com/PointCloudLibrary/pcl/pull/5979)]
+
+#### libpcl_gpu:
+
+* Add missing PCL_EXPORTS (economical_download, optimizeModelCoefficients, vtkRenderWindowInteractorFixNew) [[#5926](https://github.com/PointCloudLibrary/pcl/pull/5926)]
+
+#### libpcl_io:
+
+* Real Sense 2 grabber stream fix [[#5912](https://github.com/PointCloudLibrary/pcl/pull/5912)]
+* Improve documentation in vtk_lib_io [[#5955](https://github.com/PointCloudLibrary/pcl/pull/5955)]
+* Add special implementation for raw_fallocate for OpenBSD [[#5957](https://github.com/PointCloudLibrary/pcl/pull/5957)]
+* Fix missing include in ply_parser.h (#5962) [[#5964](https://github.com/PointCloudLibrary/pcl/pull/5964)]
+* **[new feature]** Enable writing data in binary PLY format to std::ostream [[#5975](https://github.com/PointCloudLibrary/pcl/pull/5975)]
+* OBJReader: fix possible out-of-bounds access [[#5988](https://github.com/PointCloudLibrary/pcl/pull/5988)]
+* ImageGrabber: Fix potential index out of bounds [[#6016](https://github.com/PointCloudLibrary/pcl/pull/6016)]
+
+#### libpcl_registration:
+
+* NDT: allow access to target cloud distribution [[#5969](https://github.com/PointCloudLibrary/pcl/pull/5969)]
+* Optimize Eigen block operations [[#5974](https://github.com/PointCloudLibrary/pcl/pull/5974)]
+
+#### libpcl_sample_consensus:
+
+* Add missing PCL_EXPORTS (economical_download, optimizeModelCoefficients, vtkRenderWindowInteractorFixNew) [[#5926](https://github.com/PointCloudLibrary/pcl/pull/5926)]
+
+#### libpcl_surface:
+
+* Add `pcl::PointXYZLNormal` to GP3 PCL_INSTANTIATE (#5981) [[#5983](https://github.com/PointCloudLibrary/pcl/pull/5983)]
+
+#### libpcl_visualization:
+
+* Add missing PCL_EXPORTS (economical_download, optimizeModelCoefficients, vtkRenderWindowInteractorFixNew) [[#5926](https://github.com/PointCloudLibrary/pcl/pull/5926)]
+* Add missing pragma once in qvtk_compatibility.h [[#5943](https://github.com/PointCloudLibrary/pcl/pull/5943)]
+* fixed setShapeRenderingProperties(PCL_VISUALIZER_FONT_SIZE) [[#5993](https://github.com/PointCloudLibrary/pcl/pull/5993)]
+* fix addPolygon and addLine return value error [[#5996](https://github.com/PointCloudLibrary/pcl/pull/5996)]
+
+#### CI:
+
+* Switch latest Ubuntu CI to C++17 [[#5931](https://github.com/PointCloudLibrary/pcl/pull/5931)]
+* Fix ubuntu-variety CI and update compilers [[#5990](https://github.com/PointCloudLibrary/pcl/pull/5990)]
+
## = 1.14.0 (03 January 2024) =
### Notable changes
### ---[ PCL global CMake
-cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
-
-if(POLICY CMP0074)
- # 1. Remove with 3.12.4.
- # 2. Remove search paths with *_ROOT since they will be automatically checked
- cmake_policy(SET CMP0074 NEW)
-endif()
+cmake_minimum_required(VERSION 3.16.3 FATAL_ERROR)
# Set target C++ standard and required compiler features
-set(CMAKE_CXX_STANDARD 14 CACHE STRING "The target C++ standard. PCL requires C++14 or higher.")
+set(CMAKE_CXX_STANDARD 17 CACHE STRING "The target C++ standard. PCL requires C++14 or higher.")
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
-set(PCL_CXX_COMPILE_FEATURES cxx_std_14)
+if("${CMAKE_CXX_STANDARD}" GREATER_EQUAL 17)
+ set(PCL_CXX_COMPILE_FEATURES cxx_std_17)
+ set(PCL__cplusplus 201703L)
+ set(PCL_REQUIRES_MSC_VER 1912)
+elseif("${CMAKE_CXX_STANDARD}" EQUAL 14)
+ set(PCL_CXX_COMPILE_FEATURES cxx_std_14)
+ set(PCL__cplusplus 201402L)
+ set(PCL_REQUIRES_MSC_VER 1900)
+else()
+ message(FATAL_ERROR "Unknown or unsupported C++ standard specified")
+endif()
-set(CMAKE_CUDA_STANDARD 14 CACHE STRING "The target CUDA/C++ standard. PCL requires CUDA/C++ 14 or higher.")
+set(CMAKE_CUDA_STANDARD 17 CACHE STRING "The target CUDA/C++ standard. PCL requires CUDA/C++ 14 or higher.")
set(CMAKE_CUDA_STANDARD_REQUIRED ON)
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "possible configurations" FORCE)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "build type default to RelWithDebInfo, set to Release to improve performance" FORCE)
endif()
-project(PCL VERSION 1.14.0)
+project(PCL VERSION 1.15.0)
string(TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER)
if(MSVC AND ("${MSVC_VERSION}" LESS 1910))
# Create a variable with expected default CXX flags
# This will be used further down the road to check if the user explicitly provided CXX flags
if(CMAKE_COMPILER_IS_MSVC)
- set(CMAKE_CXX_FLAGS_DEFAULT "/DWIN32 /D_WINDOWS /W3 /GR /EHsc")
+ set(CMAKE_CXX_FLAGS_DEFAULT ${CMAKE_CXX_FLAGS_INIT})
+ string(STRIP ${CMAKE_CXX_FLAGS_DEFAULT} CMAKE_CXX_FLAGS_DEFAULT)
else()
set(CMAKE_CXX_FLAGS_DEFAULT "")
endif()
PCL_CHECK_FOR_AVX()
endif()
+# Cuda
+option(WITH_CUDA "Build NVIDIA-CUDA support" TRUE)
+if(WITH_CUDA)
+ include("${PCL_SOURCE_DIR}/cmake/pcl_find_cuda.cmake")
+endif()
+
+
# ---[ Unix/Darwin/Windows specific flags
if(CMAKE_COMPILER_IS_GNUCXX)
if("${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7)
- string(APPEND CMAKE_CXX_FLAGS " -Wabi=11")
+ string(APPEND CMAKE_CXX_FLAGS " -Wabi=18")
else()
string(APPEND CMAKE_CXX_FLAGS " -Wabi")
endif()
endif()
if(CMAKE_COMPILER_IS_MSVC)
- add_definitions("-DBOOST_ALL_NO_LIB -D_SCL_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS -DNOMINMAX -DPCL_ONLY_CORE_POINT_TYPES ${SSE_DEFINITIONS}")
-
+ add_definitions("-DBOOST_ALL_NO_LIB -D_SCL_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS -DNOMINMAX ${SSE_DEFINITIONS}")
+
if("${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
+
string(APPEND CMAKE_CXX_FLAGS " /fp:precise ${SSE_FLAGS} ${AVX_FLAGS}")
+ set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler=/bigobj")
+
+ set(PCL_USE_GLOBAL_OPTIMIZATION TRUE)
+ if(CUDA_FOUND)
+ if(${CUDA_VERSION_STRING} VERSION_GREATER_EQUAL "10.0" AND ${CUDA_VERSION_STRING} VERSION_LESS "12.0")
+ set(PCL_USE_GLOBAL_OPTIMIZATION FALSE)
+ message("Global optimizations /GL has been turned off, as it doesn't work with nvcc/thrust in CUDA 10 and 11.")
+ endif()
+ endif()
# Add extra code generation/link optimizations
- if(CMAKE_MSVC_CODE_LINK_OPTIMIZATION AND (NOT BUILD_CUDA) AND (NOT BUILD_GPU))
+ if(CMAKE_MSVC_CODE_LINK_OPTIMIZATION AND PCL_USE_GLOBAL_OPTIMIZATION)
string(APPEND CMAKE_CXX_FLAGS_RELEASE " /GL")
string(APPEND CMAKE_SHARED_LINKER_FLAGS_RELEASE " /LTCG /OPT:REF")
string(APPEND CMAKE_EXE_LINKER_FLAGS_RELEASE " /LTCG")
- else()
- set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler=/bigobj")
-
- message("Global optimizations /GL has been turned off, as it doesn't work with nvcc/thrust")
endif()
# /MANIFEST:NO") # please, don't disable manifest generation, otherwise crash at start for vs2008
include(ProcessorCount)
ProcessorCount(CPUCores)
set(MSVC_MP ${CPUCores} CACHE STRING "Number of simultaneously running compilers (0 = automatic detection by MSVC). See documentation of /MP flag.")
- if (CMAKE_VERSION VERSION_LESS 3.11.0)
- # Usage of COMPILE_LANGUAGE generator expression for MSVC in add_compile_options requires at least CMake 3.11, see https://gitlab.kitware.com/cmake/cmake/issues/17435
- if(MSVC_MP EQUAL 0)
- # MSVC_MP is 0 in case the information cannot be determined by ProcessorCount => fallback
- string(APPEND CMAKE_C_FLAGS " /MP")
- string(APPEND CMAKE_CXX_FLAGS " /MP")
- elseif(MSVC_MP GREATER 1)
- string(APPEND CMAKE_C_FLAGS " /MP${MSVC_MP}")
- string(APPEND CMAKE_CXX_FLAGS " /MP${MSVC_MP}")
- endif()
- else()
- if(MSVC_MP EQUAL 0)
- # MSVC_MP is 0 in case the information cannot be determined by ProcessorCount => fallback
- # Generator expression is necessary to limit /MP flag to C/CXX, so flag will be not set to e.g. CUDA (see https://gitlab.kitware.com/cmake/cmake/issues/17535)
- add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:/MP>)
- elseif(MSVC_MP GREATER 1)
- add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:/MP${MSVC_MP}>)
- endif()
+
+ if(MSVC_MP EQUAL 0)
+ # MSVC_MP is 0 in case the information cannot be determined by ProcessorCount => fallback
+ # Generator expression is necessary to limit /MP flag to C/CXX, so flag will be not set to e.g. CUDA (see https://gitlab.kitware.com/cmake/cmake/issues/17535)
+ add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:/MP>)
+ elseif(MSVC_MP GREATER 1)
+ add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:/MP${MSVC_MP}>)
endif()
endif()
string(APPEND CMAKE_CXX_FLAGS " /bigobj")
set(CLANG_LIBRARIES "stdc++")
endif()
-if(CMAKE_COMPILER_IS_MINGW)
- add_definitions(-DPCL_ONLY_CORE_POINT_TYPES)
-endif()
-
include("${PCL_SOURCE_DIR}/cmake/pcl_utils.cmake")
DISSECT_VERSION()
GET_OS_INFO()
set(PCL_OUTPUT_LIB_DIR "${PCL_BINARY_DIR}/${LIB_INSTALL_DIR}")
set(PCL_OUTPUT_BIN_DIR "${PCL_BINARY_DIR}/${BIN_INSTALL_DIR}")
-make_directory("${PCL_OUTPUT_LIB_DIR}")
-make_directory("${PCL_OUTPUT_BIN_DIR}")
+file(MAKE_DIRECTORY "${PCL_OUTPUT_LIB_DIR}")
+file(MAKE_DIRECTORY "${PCL_OUTPUT_BIN_DIR}")
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PCL_OUTPUT_LIB_DIR}")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${PCL_OUTPUT_BIN_DIR}")
if(WIN32)
# OpenMP (optional)
option(WITH_OPENMP "Build with parallelization using OpenMP" TRUE)
+option(USE_HOMEBREW_FALLBACK "(macOS-only) also look in 'brew --prefix' for libraries (e.g. OpenMP)" TRUE)
if(WITH_OPENMP)
find_package(OpenMP COMPONENTS C CXX)
+ if(APPLE AND NOT OpenMP_FOUND)
+ if(USE_HOMEBREW_FALLBACK)
+ # libomp 15.0+ from brew is keg-only, so have to search in other locations.
+ # See https://github.com/Homebrew/homebrew-core/issues/112107#issuecomment-1278042927.
+ execute_process(COMMAND brew --prefix libomp
+ OUTPUT_VARIABLE HOMEBREW_LIBOMP_PREFIX
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set(OpenMP_C_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_LIBOMP_PREFIX}/include")
+ set(OpenMP_CXX_FLAGS "-Xpreprocessor -fopenmp -I${HOMEBREW_LIBOMP_PREFIX}/include")
+ set(OpenMP_C_LIB_NAMES omp)
+ set(OpenMP_CXX_LIB_NAMES omp)
+ set(OpenMP_omp_LIBRARY ${HOMEBREW_LIBOMP_PREFIX}/lib/libomp.dylib)
+ find_package(OpenMP COMPONENTS C CXX)
+ endif()
+ endif()
endif()
if(OpenMP_FOUND)
string(APPEND CMAKE_C_FLAGS " ${OpenMP_C_FLAGS}")
PCL_ADD_GRABBER_DEPENDENCY("RSSDK" "RealSense SDK support")
PCL_ADD_GRABBER_DEPENDENCY("RSSDK2" "RealSense SDK 2.0 (librealsense) support")
-# metslib
-if(PKG_CONFIG_FOUND)
- pkg_check_modules(METSLIB metslib)
- if(METSLIB_FOUND)
- set(HAVE_METSLIB ON)
- include_directories(SYSTEM ${METSLIB_INCLUDE_DIRS})
- else()
- include_directories(SYSTEM "${PCL_SOURCE_DIR}/recognition/include/pcl/recognition/3rdparty/")
- endif()
-else()
- include_directories(SYSTEM ${PCL_SOURCE_DIR}/recognition/include/pcl/recognition/3rdparty/)
-endif()
-
# LibPNG
option(WITH_PNG "PNG file support" TRUE)
if(WITH_PNG)
find_package(PNG)
if(PNG_FOUND)
set(HAVE_PNG ON)
- include_directories(SYSTEM "${PNG_INCLUDE_DIR}")
endif()
endif()
endif()
endif()
-# Cuda
-option(WITH_CUDA "Build NVIDIA-CUDA support" TRUE)
-if(WITH_CUDA)
- include("${PCL_SOURCE_DIR}/cmake/pcl_find_cuda.cmake")
+# find GLEW before VTK as it uses custom findGLEW that doesn't work with cmakes findGLEW.
+option(WITH_GLEW "Support for GLEW" TRUE)
+if(WITH_GLEW)
+ find_package(GLEW QUIET)
endif()
endif()
endif()
+option(WITH_SYSTEM_CJSON "Use system cJSON" TRUE)
+if(WITH_SYSTEM_CJSON)
+ find_package(cJSON)
+ if(cJSON_FOUND)
+ set(HAVE_CJSON ON)
+ else()
+ message(WARNING "It is recommended to install an up-to-date version of cJSON on your system. CMake did not find one, and will instead use a cJSON version bundled with the PCL source code. However, that is an older version which may pose a risk and may be removed in a future PCL release.")
+ endif()
+endif()
+
+set(CMAKE_REQUIRED_LIBRARIES Eigen3::Eigen) # so that Eigen/Core is found below
+CHECK_CXX_SOURCE_COMPILES("
+#include <Eigen/Core>
+#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
+#error Eigen will not use handmade_aligned_malloc (which is fine, we just throw an error here to make this compilation fail)
+#endif
+int main() { return 0; }"
+PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC)
+if (PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC) # CHECK_CXX_SOURCE_COMPILES does not necessarily set this to 0 or 1
+ set(PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC 1)
+else()
+ set(PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC 0)
+endif()
+unset(CMAKE_REQUIRED_LIBRARIES)
+
### ---[ Create the config.h file
set(pcl_config_h_in "${CMAKE_CURRENT_SOURCE_DIR}/pcl_config.h.in")
set(pcl_config_h "${CMAKE_CURRENT_BINARY_DIR}/include/pcl/pcl_config.h")
#------------------------------------------------------------------------------------
# Set default policy behavior similar to minimum requirement version
-cmake_policy(VERSION 3.10)
-
-# explicitly set policies we already support in newer cmake versions
-if(POLICY CMP0074)
- # TODO: update *_ROOT variables to be PCL_*_ROOT or equivalent.
- # CMP0074 directly affects how Find* modules work and *_ROOT variables. Since
- # this is a config file that will be consumed by parent projects with (likely)
- # NEW behavior, we need to push a policy stack.
- cmake_policy(SET CMP0074 NEW)
-endif()
+cmake_policy(VERSION 3.16.3)
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/Modules")
elseif(NOT BOOST_INCLUDEDIR)
set(BOOST_INCLUDEDIR "@Boost_INCLUDE_DIR@")
endif()
-
- set(Boost_ADDITIONAL_VERSIONS
- "@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@.@Boost_SUBMINOR_VERSION@" "@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@"
- "1.84.0" "1.84" "1.83.0" "1.83" "1.82.0" "1.82" "1.81.0" "1.81" "1.80.0" "1.80"
- "1.79.0" "1.79" "1.78.0" "1.78" "1.77.0" "1.77" "1.76.0" "1.76" "1.75.0" "1.75"
- "1.74.0" "1.74" "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
- "1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65")
-
- find_package(Boost 1.65.0 ${QUIET_} COMPONENTS @PCLCONFIG_AVAILABLE_BOOST_MODULES@)
+
+ find_package(Boost 1.71.0 ${QUIET_} COMPONENTS @PCLCONFIG_AVAILABLE_BOOST_MODULES@ CONFIG)
set(BOOST_FOUND ${Boost_FOUND})
set(BOOST_INCLUDE_DIRS "${Boost_INCLUDE_DIR}")
string(TOUPPER "${_component}" COMPONENT)
string(TOUPPER "${_lib}" LIB)
- string(REGEX REPLACE "[.-]" "_" LIB ${LIB})
+ string(REGEX REPLACE "[.-]" "_" LIB "${LIB}")
if(${LIB}_FOUND)
list(APPEND PCL_${COMPONENT}_INCLUDE_DIRS ${${LIB}_INCLUDE_DIRS})
set(PCL_${COMPONENT}_INCLUDE_DIRS ${PCL_${COMPONENT}_INCLUDE_DIRS} PARENT_SCOPE)
-
+
if(${LIB} MATCHES "VTK")
if(${${LIB}_VERSION_MAJOR} GREATER_EQUAL 9)
set(ISVTK9ORGREATER TRUE)
endif()
endif()
-
+
if(${LIB}_USE_FILE AND NOT ISVTK9ORGREATER )
include(${${LIB}_USE_FILE})
else()
#set a suffix for debug libraries
set(PCL_DEBUG_SUFFIX "@CMAKE_DEBUG_POSTFIX@")
set(PCL_RELEASE_SUFFIX "@CMAKE_RELEASE_POSTFIX@")
+set(PCL_RELWITHDEBINFO_SUFFIX "@CMAKE_RELWITHDEBINFO_POSTFIX@")
+set(PCL_MINSIZEREL_SUFFIX "@CMAKE_MINSIZEREL_POSTFIX@")
set(PCL_SHARED_LIBS "@PCL_SHARED_LIBS@")
list(APPEND PCL_COMPILE_OPTIONS @PCLCONFIG_AVX_COMPILE_OPTIONS@)
set(pcl_all_components @PCLCONFIG_AVAILABLE_COMPONENTS@)
+# insert "io_ply" before "io"
+list(FIND pcl_all_components "io" pcl_pos_io)
+list(INSERT pcl_all_components ${pcl_pos_io} "io_ply")
+unset(pcl_pos_io)
list(LENGTH pcl_all_components PCL_NB_COMPONENTS)
#list each component dependencies IN PCL
@PCLCONFIG_OPTIONAL_DEPENDENCIES@
+# io_ply subcomponent
+list(APPEND pcl_io_int_dep io_ply)
+set(pcl_io_ply_int_dep common)
+set(pcl_io_ply_ext_dep boost)
+
# VTK components required by PCL
set(PCL_VTK_COMPONENTS "@PCL_VTK_COMPONENTS@")
string(REGEX REPLACE "^cuda_(.*)$" "\\1" cuda_component "${component}")
string(REGEX REPLACE "^gpu_(.*)$" "\\1" gpu_component "${component}")
+ string(REGEX REPLACE "^io_(.*)$" "\\1" io_component "${component}")
find_path(PCL_${COMPONENT}_INCLUDE_DIR
NAMES pcl/${component}
pcl/apps/${component}
pcl/cuda/${cuda_component} pcl/cuda/${component}
pcl/gpu/${gpu_component} pcl/gpu/${component}
+ pcl/io/${io_component}
HINTS ${PCL_INCLUDE_DIRS}
PATH_SUFFIXES
${component}/include
apps/${component}/include
cuda/${cuda_component}/include
gpu/${gpu_component}/include
+ io/${io_component}/include
DOC "path to ${component} headers"
NO_DEFAULT_PATH)
mark_as_advanced(PCL_${COMPONENT}_INCLUDE_DIR)
# Skip find_library for header only modules
list(FIND pcl_header_only_components ${component} _is_header_only)
if(_is_header_only EQUAL -1)
- find_library(PCL_${COMPONENT}_LIBRARY ${pcl_component}${PCL_RELEASE_SUFFIX}
+ find_library(PCL_${COMPONENT}_LIBRARY
+ NAMES ${pcl_component}${PCL_RELEASE_SUFFIX} ${pcl_component}${PCL_RELWITHDEBINFO_SUFFIX} ${pcl_component}${PCL_MINSIZEREL_SUFFIX}
HINTS ${PCL_LIBRARY_DIRS}
DOC "path to ${pcl_component} library"
NO_DEFAULT_PATH)
[![Release][release-image]][releases]
[![License][license-image]][license]
-[release-image]: https://img.shields.io/badge/release-1.14.0-green.svg?style=flat
+[release-image]: https://img.shields.io/badge/release-1.15.0-green.svg?style=flat
[releases]: https://github.com/PointCloudLibrary/pcl/releases
[license-image]: https://img.shields.io/badge/license-BSD-green.svg?style=flat
[ci-latest-build]: https://dev.azure.com/PointCloudLibrary/pcl/_build/latest?definitionId=9&branchName=master
[ci-ubuntu-20.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2020.04%20GCC&label=Ubuntu%2020.04%20GCC
[ci-ubuntu-22.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=Ubuntu&configuration=Ubuntu%2022.04%20Clang&label=Ubuntu%2022.04%20Clang
-[ci-ubuntu-23.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2023.04%20GCC&label=Ubuntu%2023.04%20GCC
+[ci-ubuntu-24.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2024.04%20GCC&label=Ubuntu%2024.04%20GCC
[ci-windows-x86]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20Build&configuration=Windows%20Build%20x86&label=Windows%20VS2019%20x86
[ci-windows-x64]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20Build&configuration=Windows%20Build%20x64&label=Windows%20VS2019%20x64
-[ci-macos-12]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=macOS&configuration=macOS%20Monterey%2012&label=macOS%20Monterey%2012
[ci-macos-13]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=macOS&configuration=macOS%20Ventura%2013&label=macOS%20Ventura%2013
+[ci-macos-14]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=macOS&configuration=macOS%20Sonoma%2014&label=macOS%20Sonoma%2014
[ci-docs]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/Documentation?branchName=master
[ci-latest-docs]: https://dev.azure.com/PointCloudLibrary/pcl/_build/latest?definitionId=14&branchName=master
Build Platform | Status
------------------------ | ------------------------------------------------------------------------------------------------- |
-Ubuntu | [![Status][ci-ubuntu-20.04]][ci-latest-build] <br> [![Status][ci-ubuntu-22.04]][ci-latest-build] <br> [![Status][ci-ubuntu-23.04]][ci-latest-build] |
+Ubuntu | [![Status][ci-ubuntu-20.04]][ci-latest-build] <br> [![Status][ci-ubuntu-22.04]][ci-latest-build] <br> [![Status][ci-ubuntu-24.04]][ci-latest-build] |
Windows | [![Status][ci-windows-x86]][ci-latest-build] <br> [![Status][ci-windows-x64]][ci-latest-build] |
-macOS | [![Status][ci-macos-12]][ci-latest-build] <br> [![Status][ci-macos-13]][ci-latest-build] |
+macOS | [![Status][ci-macos-13]][ci-latest-build] <br> [![Status][ci-macos-14]][ci-latest-build] |
Documentation | [![Status][ci-docs]][ci-latest-docs] |
Read the Docs | [](https://pcl.readthedocs.io/projects/tutorials/en/master/?badge=master) |
set(SUBSUBSYS_DESC "3D recognition framework")
set(SUBSUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search recognition ml)
set(SUBSUBSYS_EXT_DEPS vtk openni)
+set(REASON "")
+set(DEFAULT OFF)
-# Default to not building for now
-if(${DEFAULT} STREQUAL "TRUE")
- set(DEFAULT FALSE)
+if(NOT TARGET Boost::filesystem)
+ set(REASON "Boost filesystem is not available.")
endif()
PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
return()
endif()
-include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/include")
-
set(incs_fw
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/feature_wrapper/normal_estimator.h"
)
set(LIB_NAME "pcl_${SUBSUBSYS_NAME}")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSUBSYS_NAME} SOURCES ${srcs} ${impl_incs_pipeline} ${incs_utils} ${incs_fw} ${incs_fw_global} ${incs_fw_local} ${incc_tools_framework} ${incs_pipelines} ${incs_pc_source})
-target_link_libraries("${LIB_NAME}" pcl_apps pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search pcl_registration)
+target_link_libraries("${LIB_NAME}" pcl_apps pcl_common pcl_io pcl_filters pcl_keypoints pcl_recognition pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search pcl_registration)
if(WITH_OPENNI)
target_link_libraries("${LIB_NAME}" ${OPENNI_LIBRARIES})
loadOrGenerate(std::string& dir, std::string& model_path, ModelT& model)
{
const std::string pathmodel = dir + '/' + model.class_ + '/' + model.id_;
- bf::path trained_dir = pathmodel;
+ pcl_fs::path trained_dir = pathmodel;
model.views_.reset(new std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr>);
model.poses_.reset(
model.assembled_.reset(new pcl::PointCloud<pcl::PointXYZ>);
uniform_sampling(model_path, 100000, *model.assembled_, model_scale_);
- if (bf::exists(trained_dir)) {
+ if (pcl_fs::exists(trained_dir)) {
// load views, poses and self-occlusions
std::vector<std::string> view_filenames;
- for (const auto& dir_entry : bf::directory_iterator(trained_dir)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(trained_dir)) {
// check if its a directory, then get models in it
- if (!(bf::is_directory(dir_entry))) {
+ if (!(pcl_fs::is_directory(dir_entry))) {
// check that it is a ply file and then add, otherwise ignore..
std::vector<std::string> strs;
std::vector<std::string> strs_;
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- for (const auto& dir_entry : bf::directory_iterator(path)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(path)) {
std::string file_name = (dir_entry.path().filename()).string();
std::vector<std::string> strs;
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- bf::path desc_dir = path;
- if (!bf::exists(desc_dir))
- bf::create_directory(desc_dir);
+ pcl_fs::path desc_dir = path;
+ if (!pcl_fs::exists(desc_dir))
+ pcl_fs::create_directory(desc_dir);
const std::string path_view = path + "/view_" + std::to_string(v) + ".pcd";
pcl::io::savePCDFileBinary(path_view, *processed);
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- for (const auto& dir_entry : bf::directory_iterator(path)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(path)) {
std::string file_name = (dir_entry.path().filename()).string();
std::vector<std::string> strs;
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- bf::path desc_dir = path;
- if (!bf::exists(desc_dir))
- bf::create_directory(desc_dir);
+ pcl_fs::path desc_dir = path;
+ if (!pcl_fs::exists(desc_dir))
+ pcl_fs::create_directory(desc_dir);
const std::string path_view = path + "/view_" + std::to_string(v) + ".pcd";
pcl::io::savePCDFileBinary(path_view, *processed);
const std::string dir = path + "/roll_trans_" + std::to_string(view_id) + '_' +
std::to_string(d_id) + ".txt";
- const bf::path file_path = dir;
- if (bf::exists(file_path)) {
+ const pcl_fs::path file_path = dir;
+ if (pcl_fs::exists(file_path)) {
PersistenceUtils::readMatrixFromFile(dir, pose_matrix);
return true;
}
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- for (const auto& dir_entry : bf::directory_iterator(path)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(path)) {
std::string file_name = (dir_entry.path().filename()).string();
std::vector<std::string> strs;
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- bf::path desc_dir = path;
- if (!bf::exists(desc_dir))
- bf::create_directory(desc_dir);
+ pcl_fs::path desc_dir = path;
+ if (!pcl_fs::exists(desc_dir))
+ pcl_fs::create_directory(desc_dir);
const std::string path_view = path + "/view_" + std::to_string(v) + ".pcd";
pcl::io::savePCDFileBinary(path_view, *processed);
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- for (const auto& dir_entry : bf::directory_iterator(path)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(path)) {
std::string file_name = (dir_entry.path().filename()).string();
std::vector<std::string> strs;
std::string path =
source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
- bf::path desc_dir = path;
- if (!bf::exists(desc_dir))
- bf::create_directory(desc_dir);
+ pcl_fs::path desc_dir = path;
+ if (!pcl_fs::exists(desc_dir))
+ pcl_fs::create_directory(desc_dir);
const std::string path_view = path = "/view_" + std::to_string(v) + ".pcd";
pcl::io::savePCDFileBinary(path_view, *processed);
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/io/pcd_io.h>
#include <boost/algorithm/string.hpp>
-#include <boost/filesystem.hpp>
#include <fstream>
#include <pcl/apps/3d_rec_framework/utils/metrics.h>
// Instantiation
-template class pcl::rec_3d_framework::
+// GlobalClassifier is the parent class of GlobalNNPipeline. They must be instantiated
+// in this order, otherwise visibility attributes of the former are not applied
+// correctly.
+template class PCL_EXPORTS pcl::rec_3d_framework::GlobalClassifier<pcl::PointXYZ>;
+
+template class PCL_EXPORTS pcl::rec_3d_framework::
GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNPipeline<
- Metrics::HistIntersectionUnionDistance,
- pcl::PointXYZ,
- pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::
+template class PCL_EXPORTS
+ pcl::rec_3d_framework::GlobalNNPipeline<Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZ,
+ pcl::VFHSignature308>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::ESFSignature640>;
-
-template class pcl::rec_3d_framework::GlobalClassifier<pcl::PointXYZ>;
#include <flann/algorithms/dist.h>
void
-getScenesInDirectory(bf::path& dir,
+getScenesInDirectory(pcl_fs::path& dir,
std::string& rel_path_so_far,
std::vector<std::string>& relative_paths)
{
// list models in MODEL_FILES_DIR_ and return list
- for (const auto& dir_entry : bf::directory_iterator(dir)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(dir)) {
// check if its a directory, then get models in it
- if (bf::is_directory(dir_entry)) {
+ if (pcl_fs::is_directory(dir_entry)) {
std::string so_far =
rel_path_so_far + (dir_entry.path().filename()).string() + "/";
- bf::path curr_path = dir_entry.path();
+ pcl_fs::path curr_path = dir_entry.path();
getScenesInDirectory(curr_path, so_far, relative_paths);
}
else {
{
// read mians scenes
- bf::path ply_files_dir = scenes_dir;
+ pcl_fs::path ply_files_dir = scenes_dir;
std::vector<std::string> files;
std::string start;
getScenesInDirectory(ply_files_dir, start, files);
}
void
-getModelsInDirectory(bf::path& dir,
+getModelsInDirectory(pcl_fs::path& dir,
std::string& rel_path_so_far,
std::vector<std::string>& relative_paths,
std::string& ext)
{
- for (const auto& dir_entry : bf::directory_iterator(dir)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(dir)) {
// check if its a directory, then get models in it
- if (bf::is_directory(dir_entry)) {
+ if (pcl_fs::is_directory(dir_entry)) {
std::string so_far =
rel_path_so_far + (dir_entry.path().filename()).string() + "/";
- bf::path curr_path = dir_entry.path();
+ pcl_fs::path curr_path = dir_entry.path();
getModelsInDirectory(curr_path, so_far, relative_paths, ext);
}
else {
return -1;
}
- bf::path models_dir_path = path;
- if (!bf::exists(models_dir_path)) {
+ pcl_fs::path models_dir_path = path;
+ if (!pcl_fs::exists(models_dir_path)) {
PCL_ERROR("Models dir path %s does not exist, use -models_dir [dir] option\n",
path.c_str());
return -1;
std::vector<std::string> files;
std::string start;
std::string ext = std::string("ply");
- bf::path dir = models_dir_path;
+ pcl_fs::path dir = models_dir_path;
getModelsInDirectory(dir, start, files, ext);
assert(files.size() == 4);
set(SUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search recognition ml stereo 2d)
set(SUBSYS_OPT_DEPS openni vtk ${QTX})
-set(DEFAULT FALSE)
-set(REASON "Disabled by default")
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${SUBSYS_OPT_DEPS})
if(NOT build)
set(CMAKE_AUTORCC ON)
set(CMAKE_AUTOUIC ON)
+list(APPEND CMAKE_AUTOUIC_SEARCH_PATHS "src")
+
+if(VTK_FOUND)
+ set(incs "include/pcl/${SUBSYS_NAME}/render_views_tesselated_sphere.h")
+ set(srcs "src/render_views_tesselated_sphere.cpp")
+endif()
+
+if(QHULL_FOUND)
+ set(incs
+ "include/pcl/${SUBSYS_NAME}/dominant_plane_segmentation.h"
+ "include/pcl/${SUBSYS_NAME}/timer.h"
+ ${incs}
+ )
+ set(impl_incs "include/pcl/${SUBSYS_NAME}/impl/dominant_plane_segmentation.hpp")
+ set(srcs "src/dominant_plane_segmentation.cpp" ${srcs})
+endif()
+
+set(LIB_NAME "pcl_${SUBSYS_NAME}")
+PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${impl_incs} ${incs})
+target_link_libraries("${LIB_NAME}" pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search)
+PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC})
+# Install include files
+PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
+PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl" ${impl_incs})
+
# to be filled with all targets the apps subsystem
set(PCL_APPS_ALL_TARGETS)
-include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/include")
-
PCL_ADD_EXECUTABLE(pcl_test_search_speed COMPONENT ${SUBSYS_NAME} SOURCES src/test_search.cpp)
target_link_libraries(pcl_test_search_speed pcl_common pcl_io pcl_search pcl_kdtree pcl_visualization)
PCL_ADD_EXECUTABLE(pcl_nn_classification_example COMPONENT ${SUBSYS_NAME} SOURCES src/nn_classification_example.cpp)
target_link_libraries(pcl_nn_classification_example pcl_common pcl_io pcl_features pcl_kdtree)
+target_include_directories(pcl_nn_classification_example PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
PCL_ADD_EXECUTABLE(pcl_pyramid_surface_matching COMPONENT ${SUBSYS_NAME} SOURCES src/pyramid_surface_matching.cpp)
target_link_libraries(pcl_pyramid_surface_matching pcl_common pcl_io pcl_features pcl_registration pcl_filters)
endif()
if(VTK_FOUND)
- set(incs "include/pcl/${SUBSYS_NAME}/render_views_tesselated_sphere.h")
- set(srcs "src/render_views_tesselated_sphere.cpp")
-
PCL_ADD_EXECUTABLE(pcl_ppf_object_recognition COMPONENT ${SUBSYS_NAME} SOURCES src/ppf_object_recognition.cpp)
target_link_libraries(pcl_ppf_object_recognition pcl_common pcl_io pcl_filters pcl_features pcl_registration pcl_visualization pcl_sample_consensus pcl_segmentation)
PCL_ADD_EXECUTABLE(pcl_face_trainer COMPONENT ${SUBSYS_NAME} SOURCES src/face_detection/face_trainer.cpp)
target_link_libraries(pcl_face_trainer pcl_features pcl_recognition pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_surface pcl_keypoints pcl_ml pcl_search pcl_kdtree)
- PCL_ADD_EXECUTABLE(pcl_fs_face_detector COMPONENT ${SUBSYS_NAME} SOURCES src/face_detection//filesystem_face_detection.cpp BUNDLE)
+ PCL_ADD_EXECUTABLE(pcl_fs_face_detector COMPONENT ${SUBSYS_NAME} SOURCES src/face_detection/filesystem_face_detection.cpp BUNDLE)
target_link_libraries(pcl_fs_face_detector pcl_features pcl_recognition pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_surface pcl_keypoints pcl_ml pcl_search pcl_kdtree)
+ target_include_directories(pcl_fs_face_detector PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
PCL_ADD_EXECUTABLE(pcl_stereo_ground_segmentation COMPONENT ${SUBSYS_NAME} SOURCES src/stereo_ground_segmentation.cpp)
target_link_libraries(pcl_stereo_ground_segmentation pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_features pcl_stereo)
BUNDLE)
target_link_libraries(pcl_manual_registration pcl_common pcl_io pcl_visualization pcl_segmentation pcl_features pcl_surface pcl_registration ${QTX}::Widgets)
+ target_include_directories(pcl_manual_registration PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
PCL_ADD_EXECUTABLE(pcl_pcd_video_player
COMPONENT
src/pcd_video_player/pcd_video_player.ui
BUNDLE)
- target_link_libraries(pcl_pcd_video_player pcl_common pcl_io pcl_visualization pcl_segmentation pcl_features pcl_surface ${QTX}::Widgets)
+ target_link_libraries(pcl_pcd_video_player pcl_common pcl_io pcl_registration pcl_visualization pcl_segmentation pcl_features pcl_surface ${QTX}::Widgets)
+ target_include_directories(pcl_pcd_video_player PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
endif()
if(WITH_OPENNI)
target_link_libraries(pcl_openni_octree_compression pcl_common pcl_io pcl_filters pcl_visualization pcl_octree)
if(HAVE_PNG)
- PCL_ADD_EXECUTABLE(pcl_openni_organized_compression COMPONENT ${SUBSYS_NAME} SOURCES src/openni_organized_compression.cpp BUNDLE)
- target_link_libraries(pcl_openni_organized_compression pcl_common pcl_io pcl_filters pcl_visualization pcl_octree)
+ PCL_ADD_EXECUTABLE(pcl_openni_organized_compression COMPONENT ${SUBSYS_NAME} SOURCES src/openni_organized_compression.cpp BUNDLE)
+ target_link_libraries(pcl_openni_organized_compression pcl_common pcl_io pcl_filters pcl_visualization pcl_octree)
endif()
PCL_ADD_EXECUTABLE(pcl_openni_shift_to_depth_conversion COMPONENT ${SUBSYS_NAME} SOURCES src/openni_shift_to_depth_conversion.cpp BUNDLE)
PCL_ADD_EXECUTABLE(pcl_openni_face_detector COMPONENT ${SUBSYS_NAME} SOURCES src/face_detection//openni_face_detection.cpp src/face_detection//openni_frame_source.cpp BUNDLE)
target_link_libraries(pcl_openni_face_detector pcl_features pcl_recognition pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_surface pcl_keypoints pcl_ml pcl_search pcl_kdtree)
+ target_include_directories(pcl_openni_face_detector PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
if(QT_FOUND AND HAVE_QVTK)
# OpenNI Passthrough application demo
COMPONENT
${SUBSYS_NAME}
SOURCES
+ include/pcl/apps/openni_passthrough_qt.h
include/pcl/apps/openni_passthrough.h
src/openni_passthrough.cpp
src/openni_passthrough.ui)
target_link_libraries(pcl_openni_passthrough pcl_common pcl_io pcl_filters pcl_visualization ${QTX}::Widgets)
-
- list(APPEND CMAKE_AUTOUIC_SEARCH_PATHS "src")
+ target_include_directories(pcl_openni_passthrough PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
# OpenNI Organized Connected Component application demo
PCL_ADD_EXECUTABLE(pcl_organized_segmentation_demo
src/organized_segmentation_demo.ui
BUNDLE)
target_link_libraries(pcl_organized_segmentation_demo pcl_common pcl_io pcl_visualization pcl_segmentation pcl_features pcl_surface ${QTX}::Widgets)
+ target_include_directories(pcl_organized_segmentation_demo PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
endif()
if(QHULL_FOUND)
- PCL_ADD_EXECUTABLE(pcl_openni_3d_convex_hull COMPONENT ${SUBSYS_NAME} SOURCES src/openni_3d_convex_hull.cpp BUNDLE)
- target_link_libraries(pcl_openni_3d_convex_hull pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_surface)
-
- PCL_ADD_EXECUTABLE(pcl_openni_3d_concave_hull COMPONENT ${SUBSYS_NAME} SOURCES src/openni_3d_concave_hull.cpp BUNDLE)
- target_link_libraries(pcl_openni_3d_concave_hull pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_surface)
-
- PCL_ADD_EXECUTABLE(pcl_openni_tracking COMPONENT ${SUBSYS_NAME} SOURCES src/openni_tracking.cpp BUNDLE)
- target_link_libraries(pcl_openni_tracking pcl_common pcl_io pcl_surface pcl_visualization pcl_filters pcl_features pcl_segmentation pcl_tracking pcl_search)
-
- PCL_ADD_EXECUTABLE(pcl_openni_planar_convex_hull COMPONENT ${SUBSYS_NAME} SOURCES src/openni_planar_convex_hull.cpp BUNDLE)
- target_link_libraries(pcl_openni_planar_convex_hull pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_surface)
-
- PCL_ADD_EXECUTABLE(pcl_ni_linemod COMPONENT ${SUBSYS_NAME} SOURCES src/ni_linemod.cpp BUNDLE)
- target_link_libraries(pcl_ni_linemod pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_surface pcl_search)
+ PCL_ADD_EXECUTABLE(pcl_openni_3d_convex_hull COMPONENT ${SUBSYS_NAME} SOURCES src/openni_3d_convex_hull.cpp BUNDLE)
+ target_link_libraries(pcl_openni_3d_convex_hull pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_surface)
+
+ PCL_ADD_EXECUTABLE(pcl_openni_3d_concave_hull COMPONENT ${SUBSYS_NAME} SOURCES src/openni_3d_concave_hull.cpp BUNDLE)
+ target_link_libraries(pcl_openni_3d_concave_hull pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_surface)
+
+ PCL_ADD_EXECUTABLE(pcl_openni_tracking COMPONENT ${SUBSYS_NAME} SOURCES src/openni_tracking.cpp BUNDLE)
+ target_link_libraries(pcl_openni_tracking pcl_common pcl_io pcl_surface pcl_visualization pcl_filters pcl_features pcl_segmentation pcl_tracking pcl_search)
+
+ PCL_ADD_EXECUTABLE(pcl_openni_planar_convex_hull COMPONENT ${SUBSYS_NAME} SOURCES src/openni_planar_convex_hull.cpp BUNDLE)
+ target_link_libraries(pcl_openni_planar_convex_hull pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_surface)
+
+ PCL_ADD_EXECUTABLE(pcl_ni_linemod COMPONENT ${SUBSYS_NAME} SOURCES src/ni_linemod.cpp BUNDLE)
+ target_link_libraries(pcl_ni_linemod pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_surface pcl_search)
+ target_include_directories(pcl_ni_linemod PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
endif() # QHULL_FOUND
PCL_ADD_EXECUTABLE(pcl_ni_agast COMPONENT ${SUBSYS_NAME} SOURCES src/ni_agast.cpp BUNDLE)
target_link_libraries(pcl_ni_agast pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_keypoints pcl_surface pcl_search)
+ target_include_directories(pcl_ni_agast PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
PCL_ADD_EXECUTABLE(pcl_ni_brisk COMPONENT ${SUBSYS_NAME} SOURCES src/ni_brisk.cpp BUNDLE)
target_link_libraries(pcl_ni_brisk pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_sample_consensus pcl_features pcl_keypoints pcl_surface pcl_search)
+ target_include_directories(pcl_ni_brisk PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
PCL_ADD_EXECUTABLE(pcl_ni_susan COMPONENT ${SUBSYS_NAME} SOURCES src/ni_susan.cpp BUNDLE)
target_link_libraries(pcl_ni_susan pcl_common pcl_visualization pcl_features pcl_keypoints pcl_search)
+ target_include_directories(pcl_ni_susan PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
PCL_ADD_EXECUTABLE(pcl_ni_trajkovic COMPONENT ${SUBSYS_NAME} SOURCES src/ni_trajkovic.cpp BUNDLE)
target_link_libraries(pcl_ni_trajkovic pcl_common pcl_visualization pcl_features pcl_keypoints pcl_search)
+ target_include_directories(pcl_ni_trajkovic PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
PCL_ADD_EXECUTABLE(pcl_openni_klt COMPONENT ${SUBSYS_NAME} SOURCES src/openni_klt.cpp BUNDLE)
- target_link_libraries(pcl_openni_klt pcl_common pcl_io pcl_visualization pcl_tracking)
+ target_link_libraries(pcl_openni_klt pcl_common pcl_io pcl_keypoints pcl_visualization pcl_tracking)
endif() # WITH_OPENNI
endif() # VTK_FOUND
# OpenGL and GLUT
if(OPENGL_FOUND AND GLUT_FOUND)
- if(NOT WIN32)
- include_directories(SYSTEM "${OPENGL_INCLUDE_DIR}")
- endif()
PCL_ADD_EXECUTABLE(pcl_grabcut_2d COMPONENT ${SUBSYS_NAME} SOURCES src/grabcut_2d.cpp BUNDLE)
if(APPLE)
set(_glut_target ${GLUT_glut_LIBRARY})
topological_sort(PCL_APPS_MODULES_NAMES PCL_APPS_ _DEPENDS)
sort_relative(PCL_APPS_MODULES_NAMES_UNSORTED PCL_APPS_MODULES_NAMES PCL_APPS_MODULES_DIRS)
foreach(subdir ${PCL_APPS_MODULES_DIRS})
-add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/${subdir}")
+ add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/${subdir}")
endforeach()
-if(QHULL_FOUND)
- set(incs
- "include/pcl/${SUBSYS_NAME}/dominant_plane_segmentation.h"
- "include/pcl/${SUBSYS_NAME}/timer.h"
- ${incs}
- )
- set(impl_incs "include/pcl/${SUBSYS_NAME}/impl/dominant_plane_segmentation.hpp")
- set(srcs "src/dominant_plane_segmentation.cpp" ${srcs})
-endif()
-
-set(LIB_NAME "pcl_${SUBSYS_NAME}")
-PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${impl_incs} ${incs})
-target_link_libraries("${LIB_NAME}" pcl_common pcl_io pcl_filters pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search)
-PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC})
-# Install include files
-PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
-PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl" ${impl_incs})
-
if(CMAKE_GENERATOR_IS_IDE)
set(SUBSYS_TARGET_NAME APPS_BUILD)
else()
set(SUBSUBSYS_NAME cloud_composer)
set(SUBSUBSYS_DESC "Cloud Composer - Application for Manipulating Point Clouds")
-set(SUBSUBSYS_DEPS common io visualization filters apps)
+set(SUBSUBSYS_DEPS common io visualization features filters apps)
set(SUBSUBSYS_EXT_DEPS vtk ${QTX})
+set(REASON "")
+set(DEFAULT OFF)
-# QVTK?
-if(NOT HAVE_QVTK)
- set(DEFAULT AUTO_OFF)
- set(REASON "Cloud composer requires QVTK")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
+# Have Qt?
+if("${QTX}" STREQUAL "")
+ set(REASON "Cloud composer requires Qt.")
endif()
-#Default to not building for now
-if("${DEFAULT}" STREQUAL "TRUE")
- set(DEFAULT FALSE)
+if(NOT HAVE_QVTK)
+ set(REASON "VTK was not built with Qt support.")
endif()
PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
return()
endif()
-include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/include")
-
#Create subdirectory for plugin libs
set(CLOUD_COMPOSER_PLUGIN_DIR "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/cloud_composer_plugins")
make_directory("${CLOUD_COMPOSER_PLUGIN_DIR}")
PCL_ADD_LIBRARY(pcl_cc_tool_interface COMPONENT ${SUBSUBSYS_NAME} SOURCES ${INTERFACE_HEADERS} ${INTERFACE_SOURCES})
-target_link_libraries(pcl_cc_tool_interface pcl_common pcl_filters pcl_search pcl_visualization ${QTX}::Widgets)
+target_link_libraries(pcl_cc_tool_interface pcl_common pcl_features pcl_filters pcl_search pcl_visualization ${QTX}::Widgets)
set(PCL_LIB_TYPE ${PCL_LIB_TYPE_ORIGIN})
set(SUBSUBSYS_NAME in_hand_scanner)
set(SUBSUBSYS_DESC "In-hand scanner for small objects")
-set(SUBSUBSYS_DEPS common features io kdtree apps)
-set(SUBSUBSYS_LIBS pcl_common pcl_features pcl_io pcl_kdtree)
+set(SUBSUBSYS_DEPS common geometry features io kdtree)
+set(SUBSUBSYS_LIBS pcl_common pcl_geometry pcl_features pcl_io pcl_kdtree)
set(SUBSUBSYS_EXT_DEPS ${QTX} OpenGL OpenGL_GLU openni)
+set(REASON "")
+set(DEFAULT OFF)
-################################################################################
-
-# Default to not building for now
-if(${DEFAULT} STREQUAL "TRUE")
- set(DEFAULT FALSE)
+# Have Qt?
+if("${QTX}" STREQUAL "")
+ set(REASON "Cloud composer requires Qt.")
endif()
pcl_subsubsys_option(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
################################################################################
set(INCS
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/common_types.h"
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/icp.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/input_data_processing.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/integration.h"
return()
endif()
-include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/include")
-
set(EXE_NAME "pcl_${SUBSUBSYS_NAME}")
PCL_ADD_EXECUTABLE(
BUNDLE)
target_link_libraries("${EXE_NAME}" ${SUBSUBSYS_LIBS} ${OPENGL_LIBRARIES} ${QTX}::Concurrent ${QTX}::Widgets ${QTX}::OpenGL)
+target_include_directories(${EXE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
if (${QTX} MATCHES "Qt6")
target_link_libraries("${EXE_NAME}" ${QTX}::OpenGLWidgets)
endif()
BUNDLE)
target_link_libraries(pcl_offline_integration ${SUBSUBSYS_LIBS} ${OPENGL_LIBRARIES} ${QTX}::Concurrent ${QTX}::Widgets ${QTX}::OpenGL)
+target_include_directories(pcl_offline_integration PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
if (${QTX} MATCHES "Qt6")
target_link_libraries(pcl_offline_integration ${QTX}::OpenGLWidgets)
endif()
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2009-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-#include <boost/math/special_functions/fpclassify.hpp>
-#include <boost/signals2/connection.hpp>
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2009-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-#include <Eigen/Cholesky>
-#include <Eigen/Core>
-#include <Eigen/Geometry>
void
drawText();
- /** \brief Actual implementeation of startGrabber (needed so it can be run in a
+ /** \brief Actual implementation of startGrabber (needed so it can be run in a
* different thread and doesn't block the application when starting up). */
void
startGrabberImpl();
/** \brief How to draw the mesh. */
enum MeshRepresentation {
MR_POINTS, /**< Draw the points. */
- MR_EDGES, /**< Wireframe represen of the mesh. */
+ MR_EDGES, /**< Wireframe representation of the mesh. */
MR_FACES /**< Draw the faces of the mesh without edges. */
};
#include <pcl/apps/in_hand_scanner/integration.h>
#include <pcl/apps/in_hand_scanner/offline_integration.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/transforms.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/io/pcd_io.h>
#include <boost/algorithm/string/case_conv.hpp>
-#include <boost/filesystem.hpp>
#include <QApplication>
#include <QFileDialog>
const std::string& extension,
std::vector<std::string>& files) const
{
- if (path_dir.empty() || !boost::filesystem::exists(path_dir)) {
+ if (path_dir.empty() || !pcl_fs::exists(path_dir)) {
std::cerr << "ERROR in offline_integration.cpp: Invalid path\n '" << path_dir
<< "'\n";
return (false);
}
- boost::filesystem::directory_iterator it_end;
- for (boost::filesystem::directory_iterator it(path_dir); it != it_end; ++it) {
+ pcl_fs::directory_iterator it_end;
+ for (pcl_fs::directory_iterator it(path_dir); it != it_end; ++it) {
if (!is_directory(it->status()) &&
boost::algorithm::to_upper_copy(it->path().extension().string()) ==
boost::algorithm::to_upper_copy(extension)) {
#pragma once
+#include <pcl/common/pcl_filesystem.h>
+
namespace face_detection_apps_utils {
inline bool
}
inline void
-getFilesInDirectory(bf::path& dir,
+getFilesInDirectory(pcl_fs::path& dir,
std::string& rel_path_so_far,
std::vector<std::string>& relative_paths,
std::string& ext)
{
- for (const auto& dir_entry : bf::directory_iterator(dir)) {
+ for (const auto& dir_entry : pcl_fs::directory_iterator(dir)) {
// check if its a directory, then get models in it
- if (bf::is_directory(dir_entry)) {
+ if (pcl_fs::is_directory(dir_entry)) {
std::string so_far =
rel_path_so_far + (dir_entry.path().filename()).string() + "/";
- bf::path curr_path = dir_entry.path();
+ pcl_fs::path curr_path = dir_entry.path();
getFilesInDirectory(curr_path, so_far, relative_paths, ext);
}
else {
// Compute the plane coefficients
Eigen::Vector4f model_coefficients;
- EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix;
model_coefficients[0] = table_coefficients_->values[0];
model_coefficients[1] = table_coefficients_->values[1];
// Compute the plane coefficients
Eigen::Vector4f model_coefficients;
- EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix;
model_coefficients[0] = table_coefficients_->values[0];
model_coefficients[1] = table_coefficients_->values[1];
// Compute the plane coefficients
Eigen::Vector4f model_coefficients;
- EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix;
model_coefficients[0] = table_coefficients_->values[0];
model_coefficients[1] = table_coefficients_->values[1];
// Compute the plane coefficients
Eigen::Vector4f model_coefficients;
- EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix;
model_coefficients[0] = table_coefficients_->values[0];
model_coefficients[1] = table_coefficients_->values[1];
*/
#include <pcl/common/common.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/time.h>
#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <boost/filesystem.hpp>
-
#include <QMainWindow>
#include <QMutex>
#include <QTimer>
QString dir_;
std::vector<std::string> pcd_files_;
- std::vector<boost::filesystem::path> pcd_paths_;
+ std::vector<pcl_fs::path> pcd_paths_;
/** \brief The current displayed frame */
unsigned int current_frame_;
set(SUBSUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search apps)
set(SUBSUBSYS_EXT_DEPS vtk ${QTX})
set(REASON "")
+set(DEFAULT OFF)
# QVTK?
if(NOT HAVE_QVTK)
- set(DEFAULT AUTO_OFF)
set(REASON "VTK was not built with Qt support.")
-elseif(NOT ${DEFAULT} STREQUAL "AUTO_OFF")
- set(DEFAULT TRUE)
- set(REASON)
endif()
-# Default to not building for now
-if(${DEFAULT} STREQUAL "TRUE")
- set(DEFAULT FALSE)
+if(NOT HAVE_QVTK)
+ set(REASON "VTK was not built with Qt support.")
endif()
PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
return()
endif()
-include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/include")
-
# Set Qt files and resources here
set(uis
main_window.ui
)
set(incs
- "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/main_window.h"
+ "include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/main_window.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/scene_tree.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/parameter_dialog.h"
"include/pcl/${SUBSYS_NAME}/${SUBSUBSYS_NAME}/thread_controller.h"
${incs}
${impl_incs})
-target_link_libraries("${EXE_NAME}" pcl_common pcl_io pcl_kdtree pcl_filters pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search ${QTX}::Widgets)
+target_link_libraries("${EXE_NAME}" pcl_common pcl_io pcl_kdtree pcl_filters pcl_registration pcl_visualization pcl_segmentation pcl_surface pcl_features pcl_sample_consensus pcl_search ${QTX}::Widgets)
+target_include_directories(${EXE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
# Install include files
PCL_ADD_INCLUDES("${SUBSUBSYS_NAME}" "${SUBSUBSYS_NAME}" ${incs})
set(SUBSUBSYS_NAME point_cloud_editor)
set(SUBSUBSYS_DESC "Point Cloud Editor - Simple editor for 3D point clouds")
set(SUBSUBSYS_DEPS common filters io apps)
-set(SUBSUBSYS_EXT_DEPS vtk ${QTX} OpenGL)
-
-# Default to not building for now
-if(${DEFAULT} STREQUAL "TRUE")
- set(DEFAULT FALSE)
-endif()
+set(SUBSUBSYS_EXT_DEPS vtk ${QTX} OpenGL OpenGL_GLU)
+set(REASON "")
+set(DEFAULT OFF)
PCL_SUBSUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSUBSYS_NAME}" "${SUBSUBSYS_DESC}" ${DEFAULT} "${REASON}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSUBSYS_NAME} PARENT_NAME ${SUBSYS_NAME} DEPS ${SUBSUBSYS_DEPS} EXT_DEPS ${SUBSUBSYS_EXT_DEPS})
src/denoiseParameterForm.cpp
)
-include_directories(
- "${CMAKE_CURRENT_BINARY_DIR}"
- "${CMAKE_CURRENT_SOURCE_DIR}/include"
-)
-
set(EXE_NAME "pcl_${SUBSUBSYS_NAME}")
PCL_ADD_EXECUTABLE(
${EXE_NAME}
${INCS})
target_link_libraries("${EXE_NAME}" ${QTX}::Widgets ${QTX}::OpenGL ${OPENGL_LIBRARIES} ${BOOST_LIBRARIES} pcl_common pcl_io pcl_filters)
+target_include_directories(${EXE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
if (${QTX} MATCHES "Qt6")
target_link_libraries("${EXE_NAME}" ${QTX}::OpenGLWidgets)
endif()
/// @brief highlight all the points in the rubber band.
/// @detail draw the cloud using a stencil buffer. During this time, the
- /// points that are highlighted will not be recorded by the selecion object.
+ /// points that are highlighted will not be recorded by the selection object.
/// @param viewport the viewport obtained from GL
void
highlightPoints (GLint* viewport) const;
/// @file selection.h
/// @details A Selection object maintains the set of indices of points from a
-/// point cloud that have been identifed by the selection tools.
+/// point cloud that have been identified by the selection tools.
/// @author Yue Li and Matthew Hielsberg
#pragma once
/// mouse screen coordinates. Then depending on the passed modifiers, the
/// transformation matrix is computed correspondingly. If CONTROL is pressed
/// the selection will be translated (panned) parallel to the view plane. If
- /// ALT is pressed the selection witll be translated along the z-axis
+ /// ALT is pressed the selection will be translated along the z-axis
/// perpendicular to the view plane. If no key modifiers is pressed the
/// selection will be rotated.
/// @param x the x value of the mouse screen coordinates.
* Author: Aitor Aldoma
*/
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/recognition/face_detection/rf_face_detector_trainer.h>
#include <pcl/apps/face_detection/face_detection_apps_utils.h>
// clang-format on
-#include <boost/filesystem.hpp>
-
-namespace bf = boost::filesystem;
-
bool SHOW_GT = false;
bool VIDEO = false;
bool result = face_detection_apps_utils::readMatrixFromFile(pose_file, pose_mat);
if (result) {
- Eigen::Vector3f ea = pose_mat.block<3, 3>(0, 0).eulerAngles(0, 1, 2);
+ Eigen::Vector3f ea = pose_mat.topLeftCorner<3, 3>().eulerAngles(0, 1, 2);
Eigen::Vector3f trans_vector =
Eigen::Vector3f(pose_mat(0, 3), pose_mat(1, 3), pose_mat(2, 3));
std::cout << ea << std::endl;
Eigen::AngleAxisf(ea[1], Eigen::Vector3f::UnitY()) *
Eigen::AngleAxisf(ea[2], Eigen::Vector3f::UnitZ());
- // matrixxx = pose_mat.block<3,3>(0,0);
+ // matrixxx = pose_mat.topLeftCorner<3,3>();
vec = matrixxx * vec;
cylinder_coeff.values[3] = vec[0];
// recognize all files in directory...
std::string start;
std::string ext = std::string("pcd");
- bf::path dir = test_directory;
+ pcl_fs::path dir = test_directory;
std::vector<std::string> files;
face_detection_apps_utils::getFilesInDirectory(dir, start, files, ext);
OpenNIFrameSource::OpenNIFrameSource camera;
OpenNIFrameSource::PointCloudPtr scene_vis;
- pcl::visualization::PCLVisualizer vis("Face dection");
+ pcl::visualization::PCLVisualizer vis("Face detection");
vis.addCoordinateSystem(0.1, "global");
// keyboard callback to stop getting frames and finalize application
* Christian Potthast (potthast@usc.edu)
*/
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/time.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <boost/filesystem.hpp>
-
#include <mutex>
using namespace std::chrono_literals;
#endif
using namespace pcl::console;
-using namespace boost::filesystem;
template <typename PointType>
class OpenNIGrabFrame {
bool paused,
bool visualizer)
{
- boost::filesystem::path path(filename);
+ pcl_fs::path path(filename);
if (filename.empty()) {
dir_name_ = ".";
else {
dir_name_ = path.parent_path().string();
- if (!dir_name_.empty() && !boost::filesystem::exists(path.parent_path())) {
+ if (!dir_name_.empty() && !pcl_fs::exists(path.parent_path())) {
std::cerr << "directory \"" << path.parent_path() << "\" does not exist!\n";
exit(1);
}
#include <pcl/visualization/image_viewer.h>
#include <pcl/point_types.h>
-#include <boost/filesystem.hpp>
-
#include <mutex>
using namespace pcl::console;
-using namespace boost::filesystem;
class OpenNIGrabFrame {
public:
depth_image->getWidth(),
depth_image->getHeight(),
std::numeric_limits<unsigned short>::min(),
- // Scale so that the colors look brigher on screen
+ // Scale so that the colors look brighter on screen
std::numeric_limits<unsigned short>::max() / 10,
true);
viewer_.showCloud(getLatestPointCloud());
- boost::asio::io_service io_service;
+ boost::asio::io_context io_service;
tcp::endpoint endpoint(tcp::v4(), static_cast<unsigned short>(port_));
tcp::acceptor acceptor(io_service, endpoint);
tcp::socket socket(io_service);
if (bEnDecode) {
// ENCODING
try {
- boost::asio::io_service io_service;
+ boost::asio::io_context io_service;
tcp::endpoint endpoint(tcp::v4(), 6666);
tcp::acceptor acceptor(io_service, endpoint);
std::cout << "Waiting for connection.." << std::endl;
- acceptor.accept(*socketStream.rdbuf());
+ acceptor.accept(socketStream.rdbuf()->socket());
std::cout << "Connected!" << std::endl;
if (bEnDecode) {
// ENCODING
try {
- boost::asio::io_service io_service;
+ boost::asio::io_context io_service;
tcp::endpoint endpoint(tcp::v4(), 6666);
tcp::acceptor acceptor(io_service, endpoint);
std::cout << "Waiting for connection.." << std::endl;
- acceptor.accept(*socketStream.rdbuf());
+ acceptor.accept(socketStream.rdbuf()->socket());
std::cout << "Connected!" << std::endl;
#include <vtkRendererCollection.h>
#include <vtkRenderWindow.h>
-// #include <boost/filesystem.hpp> // for boost::filesystem::directory_iterator
-#include <boost/signals2/connection.hpp> // for boost::signals2::connection
-
void
displayPlanarRegions(
std::vector<pcl::PlanarRegion<PointT>,
QFileDialog::ShowDirsOnly |
QFileDialog::DontResolveSymlinks);
- boost::filesystem::directory_iterator end_itr;
+ pcl_fs::directory_iterator end_itr;
- if (boost::filesystem::is_directory(dir_.toStdString())) {
- for (boost::filesystem::directory_iterator itr(dir_.toStdString()); itr != end_itr;
- ++itr) {
+ if (pcl_fs::is_directory(dir_.toStdString())) {
+ for (pcl_fs::directory_iterator itr(dir_.toStdString()); itr != end_itr; ++itr) {
std::string ext = itr->path().extension().string();
if (ext == ".pcd") {
pcd_files_.push_back(itr->path().string());
PCDVideoPlayer::selectFilesButtonPressed()
{
pcd_files_.clear(); // Clear the std::vector
- pcd_paths_.clear(); // Clear the boost filesystem paths
+ pcd_paths_.clear(); // Clear the filesystem paths
QStringList qt_pcd_files = QFileDialog::getOpenFileNames(
this, "Select one or more PCD files to open", "/home", "PointClouds (*.pcd)");
trans_view(x, y) = float(view_transform->GetElement(x, y));
// NOTE: vtk view coordinate system is different than the standard camera
- // coordinates (z forward, y down, x right) thus, the fliping in y and z
+ // coordinates (z forward, y down, x right) thus, the flipping in y and z
for (auto& point : cloud->points) {
point.getVector4fMap() = trans_view * point.getVector4fMap();
point.y *= -1.0f;
transOCtoCC->Concatenate(cam_tmp->GetViewTransformMatrix());
// NOTE: vtk view coordinate system is different than the standard camera
- // coordinates (z forward, y down, x right) thus, the fliping in y and z
+ // coordinates (z forward, y down, x right) thus, the flipping in y and z
vtkSmartPointer<vtkMatrix4x4> cameraSTD = vtkSmartPointer<vtkMatrix4x4>::New();
cameraSTD->Identity();
cameraSTD->SetElement(0, 0, 1);
#include <pcl/common/centroid.h> // for computeMeanAndCovarianceMatrix
#include <pcl/common/distances.h>
#include <pcl/common/intersections.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/features/integral_image_normal.h>
#include <pcl/io/pcd_io.h>
#include <pcl/sample_consensus/sac_model_plane.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/ModelCoefficients.h>
-#include <boost/filesystem.hpp> // for directory_iterator
-
#include <mutex>
using PointT = pcl::PointXYZRGB;
// Get list of stereo files
std::vector<std::string> left_images;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr(argv[1]); itr != end_itr; ++itr) {
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr(argv[1]); itr != end_itr; ++itr) {
left_images.push_back(itr->path().string());
}
sort(left_images.begin(), left_images.end());
std::vector<std::string> right_images;
- for (boost::filesystem::directory_iterator itr(argv[2]); itr != end_itr; ++itr) {
+ for (pcl_fs::directory_iterator itr(argv[2]); itr != end_itr; ++itr) {
right_images.push_back(itr->path().string());
}
sort(right_images.begin(), right_images.end());
set(SUBSYS_NAME benchmarks)
set(SUBSYS_DESC "Point cloud library benchmarks")
set(SUBSYS_DEPS common filters features search kdtree io)
-set(DEFAULT OFF)
-set(build TRUE)
-set(REASON "Disabled by default")
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
if(NOT build)
return()
ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd"
"${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+PCL_ADD_BENCHMARK(filters_radius_outlier_removal FILES filters/radius_outlier_removal.cpp
+ LINK_WITH pcl_io pcl_filters
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd"
+ "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+
PCL_ADD_BENCHMARK(search_radius_search FILES search/radius_search.cpp
LINK_WITH pcl_io pcl_search
ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd"
--- /dev/null
+#include <pcl/filters/radius_outlier_removal.h>
+#include <pcl/io/pcd_io.h> // for PCDReader
+
+#include <benchmark/benchmark.h>
+
+static void
+BM_RadiusOutlierRemoval(benchmark::State& state, const std::string& file)
+{
+ // Perform setup here
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PCDReader reader;
+ reader.read(file, *cloud);
+
+ pcl::RadiusOutlierRemoval<pcl::PointXYZ> ror;
+ ror.setInputCloud(cloud);
+ ror.setRadiusSearch(0.02);
+ ror.setMinNeighborsInRadius(14);
+
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_voxelized(
+ new pcl::PointCloud<pcl::PointXYZ>);
+ for (auto _ : state) {
+ // This code gets timed
+ ror.filter(*cloud_voxelized);
+ }
+}
+
+static void
+BM_RadiusOutlierRemovalOpenMP(benchmark::State& state, const std::string& file)
+{
+ // Perform setup here
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PCDReader reader;
+ reader.read(file, *cloud);
+
+ pcl::RadiusOutlierRemoval<pcl::PointXYZ> ror;
+ ror.setInputCloud(cloud);
+ ror.setRadiusSearch(0.02);
+ ror.setMinNeighborsInRadius(14);
+ ror.setNumberOfThreads(0);
+
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_voxelized(
+ new pcl::PointCloud<pcl::PointXYZ>);
+ for (auto _ : state) {
+ // This code gets timed
+ ror.filter(*cloud_voxelized);
+ }
+}
+
+int
+main(int argc, char** argv)
+{
+ constexpr int runs = 100;
+
+ if (argc < 3) {
+ std::cerr
+ << "No test files given. Please download `table_scene_mug_stereo_textured.pcd` "
+ "and `milk_cartoon_all_small_clorox.pcd`, and pass their paths to the test."
+ << std::endl;
+ return (-1);
+ }
+
+ benchmark::RegisterBenchmark(
+ "BM_RadiusOutlierRemoval_milk", &BM_RadiusOutlierRemoval, argv[2])
+ ->Unit(benchmark::kMillisecond)
+ ->Iterations(runs);
+
+ benchmark::RegisterBenchmark(
+ "BM_RadiusOutlierRemoval_mug", &BM_RadiusOutlierRemoval, argv[1])
+ ->Unit(benchmark::kMillisecond)
+ ->Iterations(runs);
+
+ benchmark::RegisterBenchmark(
+ "BM_RadiusOutlierRemovalOpenMP_milk", &BM_RadiusOutlierRemovalOpenMP, argv[2])
+ ->Unit(benchmark::kMillisecond)
+ ->Iterations(runs);
+
+ benchmark::RegisterBenchmark(
+ "BM_RadiusOutlierRemovalOpenMP_mug", &BM_RadiusOutlierRemovalOpenMP, argv[1])
+ ->Unit(benchmark::kMillisecond)
+ ->Iterations(runs);
+
+ benchmark::Initialize(&argc, argv);
+ benchmark::RunSpecifiedBenchmarks();
+}
int searchIdx = indices[radiusSearchIdx++ % indices.size()];
double searchRadius = 0.1; // or any fixed radius like 0.05
- std::vector<int> k_indices;
+ pcl::Indices k_indices;
std::vector<float> k_sqr_distances;
auto start_time = std::chrono::high_resolution_clock::now();
flann/flann.hpp
HINTS
${PC_FLANN_INCLUDE_DIRS}
- ${FLANN_ROOT}
- $ENV{FLANN_ROOT}
PATHS
$ENV{PROGRAMFILES}/Flann
$ENV{PROGRAMW6432}/Flann
flann_cpp
HINTS
${PC_FLANN_LIBRARY_DIRS}
- ${FLANN_ROOT}
- $ENV{FLANN_ROOT}
PATHS
$ENV{PROGRAMFILES}/Flann
$ENV{PROGRAMW6432}/Flann
flann_cpp-gd flann_cppd
HINTS
${PC_FLANN_LIBRARY_DIRS}
- ${FLANN_ROOT}
- $ENV{FLANN_ROOT}
PATHS
$ENV{PROGRAMFILES}/Flann
$ENV{PROGRAMW6432}/Flann
flann_cpp_s
HINTS
${PC_FLANN_LIBRARY_DIRS}
- ${FLANN_ROOT}
- $ENV{FLANN_ROOT}
PATHS
$ENV{PROGRAMFILES}/Flann
$ENV{PROGRAMW6432}/Flann
flann_cpp_s-gd flann_cpp_sd
HINTS
${PC_FLANN_LIBRARY_DIRS}
- ${FLANN_ROOT}
- $ENV{FLANN_ROOT}
PATHS
$ENV{PROGRAMFILES}/Flann
$ENV{PROGRAMW6432}/Flann
+++ /dev/null
-# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
-# file Copyright.txt or https://cmake.org/licensing for details.
-
-#.rst:
-# FindGLEW
-# --------
-#
-# Find the OpenGL Extension Wrangler Library (GLEW)
-#
-# IMPORTED Targets
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines the :prop_tgt:`IMPORTED` target ``GLEW::GLEW``,
-# if GLEW has been found.
-#
-# Result Variables
-# ^^^^^^^^^^^^^^^^
-#
-# This module defines the following variables:
-#
-# ::
-#
-# GLEW_INCLUDE_DIRS - include directories for GLEW
-# GLEW_LIBRARIES - libraries to link against GLEW
-# GLEW_FOUND - true if GLEW has been found and can be used
-
-find_path(GLEW_INCLUDE_DIR GL/glew.h)
-
-if(NOT GLEW_LIBRARY)
- find_library(GLEW_LIBRARY_RELEASE NAMES GLEW glew32 glew glew32s PATH_SUFFIXES lib64 libx32)
- find_library(GLEW_LIBRARY_DEBUG NAMES GLEWd glew32d glewd PATH_SUFFIXES lib64)
-
- include(SelectLibraryConfigurations)
- select_library_configurations(GLEW)
-endif ()
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(GLEW
- REQUIRED_VARS GLEW_INCLUDE_DIR GLEW_LIBRARY)
-
-if(GLEW_FOUND)
- set(GLEW_INCLUDE_DIRS ${GLEW_INCLUDE_DIR})
-
- if(NOT GLEW_LIBRARIES)
- set(GLEW_LIBRARIES ${GLEW_LIBRARY})
- endif()
-
- if (NOT TARGET GLEW::GLEW)
- add_library(GLEW::GLEW UNKNOWN IMPORTED)
- set_target_properties(GLEW::GLEW PROPERTIES
- INTERFACE_INCLUDE_DIRECTORIES "${GLEW_INCLUDE_DIRS}")
-
- if(GLEW_LIBRARY_RELEASE)
- set_property(TARGET GLEW::GLEW APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
- set_target_properties(GLEW::GLEW PROPERTIES IMPORTED_LOCATION_RELEASE "${GLEW_LIBRARY_RELEASE}")
- endif()
-
- if(GLEW_LIBRARY_DEBUG)
- set_property(TARGET GLEW::GLEW APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG)
- set_target_properties(GLEW::GLEW PROPERTIES IMPORTED_LOCATION_DEBUG "${GLEW_LIBRARY_DEBUG}")
- endif()
-
- if(NOT GLEW_LIBRARY_RELEASE AND NOT GLEW_LIBRARY_DEBUG)
- set_property(TARGET GLEW::GLEW APPEND PROPERTY IMPORTED_LOCATION "${GLEW_LIBRARY}")
- endif()
- endif()
-endif()
-
-mark_as_advanced(GLEW_INCLUDE_DIR)
\ No newline at end of file
endif()
find_path(GTEST_INCLUDE_DIR gtest/gtest.h
- HINTS "${GTEST_ROOT}" "$ENV{GTEST_ROOT}"
PATHS "$ENV{PROGRAMFILES}/gtest" "$ENV{PROGRAMW6432}/gtest"
PATHS "$ENV{PROGRAMFILES}/gtest-1.7.0" "$ENV{PROGRAMW6432}/gtest-1.7.0"
PATH_SUFFIXES gtest include/gtest include)
find_path(GTEST_SRC_DIR src/gtest-all.cc
- HINTS "${GTEST_ROOT}" "$ENV{GTEST_ROOT}"
PATHS "$ENV{PROGRAMFILES}/gtest" "$ENV{PROGRAMW6432}/gtest"
PATHS "$ENV{PROGRAMFILES}/gtest-1.7.0" "$ENV{PROGRAMW6432}/gtest-1.7.0"
+ PATH /usr/src/googletest
PATH /usr/src/googletest/googletest
PATH /usr/src/gtest
PATH_SUFFIXES gtest src/gtest googletest/googletest)
+++ /dev/null
-# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
-# file Copyright.txt or https://cmake.org/licensing for details.
-
-#[=======================================================================[.rst:
-
-FindOpenMP is added to PCL local Cmake modules due to bug in earlier version.
-TODO: Can be removed when Cmake 3.11 is required.
-See https://gitlab.kitware.com/cmake/cmake/-/issues/20364
-
-FindOpenMP
-----------
-
-Finds Open Multi-Processing (OpenMP) support.
-
-This module can be used to detect OpenMP support in a compiler. If
-the compiler supports OpenMP, the flags required to compile with
-OpenMP support are returned in variables for the different languages.
-The variables may be empty if the compiler does not need a special
-flag to support OpenMP.
-
-Variables
-^^^^^^^^^
-
-The module exposes the components ``C``, ``CXX``, and ``Fortran``.
-Each of these controls the various languages to search OpenMP support for.
-
-Depending on the enabled components the following variables will be set:
-
-``OpenMP_FOUND``
- Variable indicating that OpenMP flags for all requested languages have been found.
- If no components are specified, this is true if OpenMP settings for all enabled languages
- were detected.
-``OpenMP_VERSION``
- Minimal version of the OpenMP standard detected among the requested languages,
- or all enabled languages if no components were specified.
-
-This module will set the following variables per language in your
-project, where ``<lang>`` is one of C, CXX, or Fortran:
-
-``OpenMP_<lang>_FOUND``
- Variable indicating if OpenMP support for ``<lang>`` was detected.
-``OpenMP_<lang>_FLAGS``
- OpenMP compiler flags for ``<lang>``, separated by spaces.
-``OpenMP_<lang>_INCLUDE_DIRS``
- Directories that must be added to the header search path for ``<lang>``
- when using OpenMP.
-
-For linking with OpenMP code written in ``<lang>``, the following
-variables are provided:
-
-``OpenMP_<lang>_LIB_NAMES``
- :ref:`;-list <CMake Language Lists>` of libraries for OpenMP programs for ``<lang>``.
-``OpenMP_<libname>_LIBRARY``
- Location of the individual libraries needed for OpenMP support in ``<lang>``.
-``OpenMP_<lang>_LIBRARIES``
- A list of libraries needed to link with OpenMP code written in ``<lang>``.
-
-Additionally, the module provides :prop_tgt:`IMPORTED` targets:
-
-``OpenMP::OpenMP_<lang>``
- Target for using OpenMP from ``<lang>``.
-
-Specifically for Fortran, the module sets the following variables:
-
-``OpenMP_Fortran_HAVE_OMPLIB_HEADER``
- Boolean indicating if OpenMP is accessible through ``omp_lib.h``.
-``OpenMP_Fortran_HAVE_OMPLIB_MODULE``
- Boolean indicating if OpenMP is accessible through the ``omp_lib`` Fortran module.
-
-The module will also try to provide the OpenMP version variables:
-
-``OpenMP_<lang>_SPEC_DATE``
- Date of the OpenMP specification implemented by the ``<lang>`` compiler.
-``OpenMP_<lang>_VERSION_MAJOR``
- Major version of OpenMP implemented by the ``<lang>`` compiler.
-``OpenMP_<lang>_VERSION_MINOR``
- Minor version of OpenMP implemented by the ``<lang>`` compiler.
-``OpenMP_<lang>_VERSION``
- OpenMP version implemented by the ``<lang>`` compiler.
-
-The specification date is formatted as given in the OpenMP standard:
-``yyyymm`` where ``yyyy`` and ``mm`` represents the year and month of
-the OpenMP specification implemented by the ``<lang>`` compiler.
-
-For some compilers, it may be necessary to add a header search path to find
-the relevant OpenMP headers. This location may be language-specific. Where
-this is needed, the module may attempt to find the location, but it can be
-provided directly by setting the ``OpenMP_<lang>_INCLUDE_DIR`` cache variable.
-Note that this variable is an _input_ control to the module. Project code
-should use the ``OpenMP_<lang>_INCLUDE_DIRS`` _output_ variable if it needs
-to know what include directories are needed.
-#]=======================================================================]
-
-cmake_policy(PUSH)
-cmake_policy(SET CMP0012 NEW) # if() recognizes numbers and booleans
-cmake_policy(SET CMP0054 NEW) # if() quoted variables not dereferenced
-cmake_policy(SET CMP0057 NEW) # if IN_LIST
-
-function(_OPENMP_FLAG_CANDIDATES LANG)
- if(NOT OpenMP_${LANG}_FLAG)
- unset(OpenMP_FLAG_CANDIDATES)
-
- set(OMP_FLAG_GNU "-fopenmp")
- set(OMP_FLAG_Clang "-fopenmp=libomp" "-fopenmp=libiomp5" "-fopenmp" "-Xclang -fopenmp")
- set(OMP_FLAG_AppleClang "-Xclang -fopenmp")
- set(OMP_FLAG_HP "+Oopenmp")
- if(WIN32)
- set(OMP_FLAG_Intel "-Qopenmp")
- elseif(CMAKE_${LANG}_COMPILER_ID STREQUAL "Intel" AND
- "${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS "15.0.0.20140528")
- set(OMP_FLAG_Intel "-openmp")
- else()
- set(OMP_FLAG_Intel "-qopenmp")
- endif()
- set(OMP_FLAG_MSVC "-openmp")
- set(OMP_FLAG_PathScale "-openmp")
- set(OMP_FLAG_NAG "-openmp")
- set(OMP_FLAG_Absoft "-openmp")
- set(OMP_FLAG_PGI "-mp")
- set(OMP_FLAG_Flang "-fopenmp")
- set(OMP_FLAG_SunPro "-xopenmp")
- set(OMP_FLAG_XL "-qsmp=omp")
- # Cray compiler activate OpenMP with -h omp, which is enabled by default.
- set(OMP_FLAG_Cray " " "-h omp")
-
- # If we know the correct flags, use those
- if(DEFINED OMP_FLAG_${CMAKE_${LANG}_COMPILER_ID})
- set(OpenMP_FLAG_CANDIDATES "${OMP_FLAG_${CMAKE_${LANG}_COMPILER_ID}}")
- # Fall back to reasonable default tries otherwise
- else()
- set(OpenMP_FLAG_CANDIDATES "-openmp" "-fopenmp" "-mp" " ")
- endif()
- set(OpenMP_${LANG}_FLAG_CANDIDATES "${OpenMP_FLAG_CANDIDATES}" PARENT_SCOPE)
- else()
- set(OpenMP_${LANG}_FLAG_CANDIDATES "${OpenMP_${LANG}_FLAG}" PARENT_SCOPE)
- endif()
-endfunction()
-
-# sample openmp source code to test
-set(OpenMP_C_CXX_TEST_SOURCE
-"
-#include <omp.h>
-int main(void) {
-#ifdef _OPENMP
- omp_get_max_threads();
- return 0;
-#elif defined(__HIP_DEVICE_COMPILE__)
- return 0;
-#else
- breaks_on_purpose
-#endif
-}
-")
-
-# in Fortran, an implementation may provide an omp_lib.h header
-# or omp_lib module, or both (OpenMP standard, section 3.1)
-# Furthermore !$ is the Fortran equivalent of #ifdef _OPENMP (OpenMP standard, 2.2.2)
-# Without the conditional compilation, some compilers (e.g. PGI) might compile OpenMP code
-# while not actually enabling OpenMP, building code sequentially
-set(OpenMP_Fortran_TEST_SOURCE
- "
- program test
- @OpenMP_Fortran_INCLUDE_LINE@
- !$ integer :: n
- n = omp_get_num_threads()
- end program test
- "
-)
-
-function(_OPENMP_WRITE_SOURCE_FILE LANG SRC_FILE_CONTENT_VAR SRC_FILE_NAME SRC_FILE_FULLPATH)
- set(WORK_DIR ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/FindOpenMP)
- if("${LANG}" STREQUAL "C")
- set(SRC_FILE "${WORK_DIR}/${SRC_FILE_NAME}.c")
- file(WRITE "${SRC_FILE}" "${OpenMP_C_CXX_${SRC_FILE_CONTENT_VAR}}")
- elseif("${LANG}" STREQUAL "CXX")
- set(SRC_FILE "${WORK_DIR}/${SRC_FILE_NAME}.cpp")
- file(WRITE "${SRC_FILE}" "${OpenMP_C_CXX_${SRC_FILE_CONTENT_VAR}}")
- elseif("${LANG}" STREQUAL "Fortran")
- set(SRC_FILE "${WORK_DIR}/${SRC_FILE_NAME}.f90")
- file(WRITE "${SRC_FILE}_in" "${OpenMP_Fortran_${SRC_FILE_CONTENT_VAR}}")
- configure_file("${SRC_FILE}_in" "${SRC_FILE}" @ONLY)
- endif()
- set(${SRC_FILE_FULLPATH} "${SRC_FILE}" PARENT_SCOPE)
-endfunction()
-
-include(CMakeParseImplicitLinkInfo)
-
-function(_OPENMP_GET_FLAGS LANG FLAG_MODE OPENMP_FLAG_VAR OPENMP_LIB_NAMES_VAR)
- _OPENMP_FLAG_CANDIDATES("${LANG}")
- _OPENMP_WRITE_SOURCE_FILE("${LANG}" "TEST_SOURCE" OpenMPTryFlag _OPENMP_TEST_SRC)
-
- unset(OpenMP_VERBOSE_COMPILE_OPTIONS)
- separate_arguments(OpenMP_VERBOSE_OPTIONS NATIVE_COMMAND "${CMAKE_${LANG}_VERBOSE_FLAG}")
- foreach(_VERBOSE_OPTION IN LISTS OpenMP_VERBOSE_OPTIONS)
- if(NOT _VERBOSE_OPTION MATCHES "^-Wl,")
- list(APPEND OpenMP_VERBOSE_COMPILE_OPTIONS ${_VERBOSE_OPTION})
- endif()
- endforeach()
-
- foreach(OPENMP_FLAG IN LISTS OpenMP_${LANG}_FLAG_CANDIDATES)
- set(OPENMP_FLAGS_TEST "${OPENMP_FLAG}")
- if(OpenMP_VERBOSE_COMPILE_OPTIONS)
- string(APPEND OPENMP_FLAGS_TEST " ${OpenMP_VERBOSE_COMPILE_OPTIONS}")
- endif()
- string(REGEX REPLACE "[-/=+]" "" OPENMP_PLAIN_FLAG "${OPENMP_FLAG}")
- try_compile( OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG} ${CMAKE_BINARY_DIR} ${_OPENMP_TEST_SRC}
- CMAKE_FLAGS "-DCOMPILE_DEFINITIONS:STRING=${OPENMP_FLAGS_TEST}"
- LINK_LIBRARIES ${CMAKE_${LANG}_VERBOSE_FLAG}
- OUTPUT_VARIABLE OpenMP_TRY_COMPILE_OUTPUT
- )
-
- if(OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG})
- set("${OPENMP_FLAG_VAR}" "${OPENMP_FLAG}" PARENT_SCOPE)
-
- if(CMAKE_${LANG}_VERBOSE_FLAG)
- unset(OpenMP_${LANG}_IMPLICIT_LIBRARIES)
- unset(OpenMP_${LANG}_IMPLICIT_LINK_DIRS)
- unset(OpenMP_${LANG}_IMPLICIT_FWK_DIRS)
- unset(OpenMP_${LANG}_LOG_VAR)
-
- file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
- "Detecting ${LANG} OpenMP compiler ABI info compiled with the following output:\n${OpenMP_TRY_COMPILE_OUTPUT}\n\n")
-
- cmake_parse_implicit_link_info("${OpenMP_TRY_COMPILE_OUTPUT}"
- OpenMP_${LANG}_IMPLICIT_LIBRARIES
- OpenMP_${LANG}_IMPLICIT_LINK_DIRS
- OpenMP_${LANG}_IMPLICIT_FWK_DIRS
- OpenMP_${LANG}_LOG_VAR
- "${CMAKE_${LANG}_IMPLICIT_OBJECT_REGEX}"
- )
-
- file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
- "Parsed ${LANG} OpenMP implicit link information from above output:\n${OpenMP_${LANG}_LOG_VAR}\n\n")
-
- unset(_OPENMP_LIB_NAMES)
- foreach(_OPENMP_IMPLICIT_LIB IN LISTS OpenMP_${LANG}_IMPLICIT_LIBRARIES)
- get_filename_component(_OPENMP_IMPLICIT_LIB_DIR "${_OPENMP_IMPLICIT_LIB}" DIRECTORY)
- get_filename_component(_OPENMP_IMPLICIT_LIB_NAME "${_OPENMP_IMPLICIT_LIB}" NAME)
- get_filename_component(_OPENMP_IMPLICIT_LIB_PLAIN "${_OPENMP_IMPLICIT_LIB}" NAME_WE)
- string(REGEX REPLACE "([][+.*?()^$])" "\\\\\\1" _OPENMP_IMPLICIT_LIB_PLAIN_ESC "${_OPENMP_IMPLICIT_LIB_PLAIN}")
- string(REGEX REPLACE "([][+.*?()^$])" "\\\\\\1" _OPENMP_IMPLICIT_LIB_PATH_ESC "${_OPENMP_IMPLICIT_LIB}")
- if(NOT ( "${_OPENMP_IMPLICIT_LIB}" IN_LIST CMAKE_${LANG}_IMPLICIT_LINK_LIBRARIES
- OR "${CMAKE_${LANG}_STANDARD_LIBRARIES}" MATCHES "(^| )(-Wl,)?(-l)?(${_OPENMP_IMPLICIT_LIB_PLAIN_ESC}|${_OPENMP_IMPLICIT_LIB_PATH_ESC})( |$)"
- OR "${CMAKE_${LANG}_LINK_EXECUTABLE}" MATCHES "(^| )(-Wl,)?(-l)?(${_OPENMP_IMPLICIT_LIB_PLAIN_ESC}|${_OPENMP_IMPLICIT_LIB_PATH_ESC})( |$)" ) )
- if(_OPENMP_IMPLICIT_LIB_DIR)
- set(OpenMP_${_OPENMP_IMPLICIT_LIB_PLAIN}_LIBRARY "${_OPENMP_IMPLICIT_LIB}" CACHE FILEPATH
- "Path to the ${_OPENMP_IMPLICIT_LIB_PLAIN} library for OpenMP")
- else()
- find_library(OpenMP_${_OPENMP_IMPLICIT_LIB_PLAIN}_LIBRARY
- NAMES "${_OPENMP_IMPLICIT_LIB_NAME}"
- DOC "Path to the ${_OPENMP_IMPLICIT_LIB_PLAIN} library for OpenMP"
- HINTS ${OpenMP_${LANG}_IMPLICIT_LINK_DIRS}
- CMAKE_FIND_ROOT_PATH_BOTH
- NO_DEFAULT_PATH
- )
- endif()
- mark_as_advanced(OpenMP_${_OPENMP_IMPLICIT_LIB_PLAIN}_LIBRARY)
- list(APPEND _OPENMP_LIB_NAMES ${_OPENMP_IMPLICIT_LIB_PLAIN})
- endif()
- endforeach()
- set("${OPENMP_LIB_NAMES_VAR}" "${_OPENMP_LIB_NAMES}" PARENT_SCOPE)
- else()
- # We do not know how to extract implicit OpenMP libraries for this compiler.
- # Assume that it handles them automatically, e.g. the Intel Compiler on
- # Windows should put the dependency in its object files.
- set("${OPENMP_LIB_NAMES_VAR}" "" PARENT_SCOPE)
- endif()
- break()
- elseif(CMAKE_${LANG}_COMPILER_ID STREQUAL "AppleClang"
- AND CMAKE_${LANG}_COMPILER_VERSION VERSION_GREATER_EQUAL "7.0")
-
- # Check for separate OpenMP library on AppleClang 7+
- find_library(OpenMP_libomp_LIBRARY
- NAMES omp gomp iomp5
- HINTS ${CMAKE_${LANG}_IMPLICIT_LINK_DIRECTORIES}
- )
- mark_as_advanced(OpenMP_libomp_LIBRARY)
-
- if(OpenMP_libomp_LIBRARY)
- # Try without specifying include directory first. We only want to
- # explicitly add a search path if the header can't be found on the
- # default header search path already.
- try_compile( OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG} ${CMAKE_BINARY_DIR} ${_OPENMP_TEST_SRC}
- CMAKE_FLAGS "-DCOMPILE_DEFINITIONS:STRING=${OPENMP_FLAGS_TEST}"
- LINK_LIBRARIES ${CMAKE_${LANG}_VERBOSE_FLAG} ${OpenMP_libomp_LIBRARY}
- OUTPUT_VARIABLE OpenMP_TRY_COMPILE_OUTPUT
- )
- if(NOT OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG})
- find_path(OpenMP_${LANG}_INCLUDE_DIR omp.h)
- mark_as_advanced(OpenMP_${LANG}_INCLUDE_DIR)
- set(OpenMP_${LANG}_INCLUDE_DIR "${OpenMP_${LANG}_INCLUDE_DIR}" PARENT_SCOPE)
- if(OpenMP_${LANG}_INCLUDE_DIR)
- try_compile( OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG} ${CMAKE_BINARY_DIR} ${_OPENMP_TEST_SRC}
- CMAKE_FLAGS "-DCOMPILE_DEFINITIONS:STRING=${OPENMP_FLAGS_TEST}"
- "-DINCLUDE_DIRECTORIES:STRING=${OpenMP_${LANG}_INCLUDE_DIR}"
- LINK_LIBRARIES ${CMAKE_${LANG}_VERBOSE_FLAG} ${OpenMP_libomp_LIBRARY}
- OUTPUT_VARIABLE OpenMP_TRY_COMPILE_OUTPUT
- )
- endif()
- endif()
- if(OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG})
- set("${OPENMP_FLAG_VAR}" "${OPENMP_FLAG}" PARENT_SCOPE)
- set("${OPENMP_LIB_NAMES_VAR}" "libomp" PARENT_SCOPE)
- break()
- endif()
- endif()
- elseif(CMAKE_${LANG}_COMPILER_ID STREQUAL "Clang" AND WIN32)
- # Check for separate OpenMP library for Clang on Windows
- find_library(OpenMP_libomp_LIBRARY
- NAMES libomp libgomp libiomp5
- HINTS ${CMAKE_${LANG}_IMPLICIT_LINK_DIRECTORIES}
- )
- mark_as_advanced(OpenMP_libomp_LIBRARY)
- if(OpenMP_libomp_LIBRARY)
- try_compile( OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG} ${CMAKE_BINARY_DIR} ${_OPENMP_TEST_SRC}
- CMAKE_FLAGS "-DCOMPILE_DEFINITIONS:STRING=${OPENMP_FLAGS_TEST}"
- LINK_LIBRARIES ${CMAKE_${LANG}_VERBOSE_FLAG} ${OpenMP_libomp_LIBRARY}
- OUTPUT_VARIABLE OpenMP_TRY_COMPILE_OUTPUT
- )
- if(OpenMP_COMPILE_RESULT_${FLAG_MODE}_${OPENMP_PLAIN_FLAG})
- set("${OPENMP_FLAG_VAR}" "${OPENMP_FLAG}" PARENT_SCOPE)
- set("${OPENMP_LIB_NAMES_VAR}" "libomp" PARENT_SCOPE)
- break()
- endif()
- endif()
- else()
- file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
- "Detecting ${LANG} OpenMP failed with the following output:\n${OpenMP_TRY_COMPILE_OUTPUT}\n\n")
- endif()
- set("${OPENMP_LIB_NAMES_VAR}" "NOTFOUND" PARENT_SCOPE)
- set("${OPENMP_FLAG_VAR}" "NOTFOUND" PARENT_SCOPE)
- endforeach()
-
- unset(OpenMP_VERBOSE_COMPILE_OPTIONS)
-endfunction()
-
-set(OpenMP_C_CXX_CHECK_VERSION_SOURCE
-"
-#include <stdio.h>
-#include <omp.h>
-const char ompver_str[] = { 'I', 'N', 'F', 'O', ':', 'O', 'p', 'e', 'n', 'M',
- 'P', '-', 'd', 'a', 't', 'e', '[',
- ('0' + ((_OPENMP/100000)%10)),
- ('0' + ((_OPENMP/10000)%10)),
- ('0' + ((_OPENMP/1000)%10)),
- ('0' + ((_OPENMP/100)%10)),
- ('0' + ((_OPENMP/10)%10)),
- ('0' + ((_OPENMP/1)%10)),
- ']', '\\0' };
-int main(void)
-{
- puts(ompver_str);
- return 0;
-}
-")
-
-set(OpenMP_Fortran_CHECK_VERSION_SOURCE
-"
- program omp_ver
- @OpenMP_Fortran_INCLUDE_LINE@
- integer, parameter :: zero = ichar('0')
- integer, parameter :: ompv = openmp_version
- character, dimension(24), parameter :: ompver_str =&
- (/ 'I', 'N', 'F', 'O', ':', 'O', 'p', 'e', 'n', 'M', 'P', '-',&
- 'd', 'a', 't', 'e', '[',&
- char(zero + mod(ompv/100000, 10)),&
- char(zero + mod(ompv/10000, 10)),&
- char(zero + mod(ompv/1000, 10)),&
- char(zero + mod(ompv/100, 10)),&
- char(zero + mod(ompv/10, 10)),&
- char(zero + mod(ompv/1, 10)), ']' /)
- print *, ompver_str
- end program omp_ver
-")
-
-function(_OPENMP_GET_SPEC_DATE LANG SPEC_DATE)
- _OPENMP_WRITE_SOURCE_FILE("${LANG}" "CHECK_VERSION_SOURCE" OpenMPCheckVersion _OPENMP_TEST_SRC)
-
- unset(_includeDirFlags)
- if(OpenMP_${LANG}_INCLUDE_DIR)
- set(_includeDirFlags "-DINCLUDE_DIRECTORIES:STRING=${OpenMP_${LANG}_INCLUDE_DIR}")
- endif()
-
- set(BIN_FILE "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/FindOpenMP/ompver_${LANG}.bin")
- string(REGEX REPLACE "[-/=+]" "" OPENMP_PLAIN_FLAG "${OPENMP_FLAG}")
- try_compile(OpenMP_SPECTEST_${LANG}_${OPENMP_PLAIN_FLAG} "${CMAKE_BINARY_DIR}" "${_OPENMP_TEST_SRC}"
- CMAKE_FLAGS "-DCOMPILE_DEFINITIONS:STRING=${OpenMP_${LANG}_FLAGS}" ${_includeDirFlags}
- COPY_FILE ${BIN_FILE}
- OUTPUT_VARIABLE OpenMP_TRY_COMPILE_OUTPUT)
-
- if(${OpenMP_SPECTEST_${LANG}_${OPENMP_PLAIN_FLAG}})
- file(STRINGS ${BIN_FILE} specstr LIMIT_COUNT 1 REGEX "INFO:OpenMP-date")
- set(regex_spec_date ".*INFO:OpenMP-date\\[0*([^]]*)\\].*")
- if("${specstr}" MATCHES "${regex_spec_date}")
- set(${SPEC_DATE} "${CMAKE_MATCH_1}" PARENT_SCOPE)
- endif()
- else()
- file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
- "Detecting ${LANG} OpenMP version failed with the following output:\n${OpenMP_TRY_COMPILE_OUTPUT}\n\n")
- endif()
-endfunction()
-
-macro(_OPENMP_SET_VERSION_BY_SPEC_DATE LANG)
- set(OpenMP_SPEC_DATE_MAP
- # Preview versions
- "201611=5.0" # OpenMP 5.0 preview 1
- # Combined versions, 2.5 onwards
- "201811=5.0"
- "201511=4.5"
- "201307=4.0"
- "201107=3.1"
- "200805=3.0"
- "200505=2.5"
- # C/C++ version 2.0
- "200203=2.0"
- # Fortran version 2.0
- "200011=2.0"
- # Fortran version 1.1
- "199911=1.1"
- # C/C++ version 1.0 (there's no 1.1 for C/C++)
- "199810=1.0"
- # Fortran version 1.0
- "199710=1.0"
- )
- if(MSVC)
- list(APPEND OpenMP_SPEC_DATE_MAP "2019=2.0")
- endif()
-
- if(OpenMP_${LANG}_SPEC_DATE)
- string(REGEX MATCHALL "${OpenMP_${LANG}_SPEC_DATE}=([0-9]+)\\.([0-9]+)" _version_match "${OpenMP_SPEC_DATE_MAP}")
- else()
- set(_version_match "")
- endif()
- if(NOT _version_match STREQUAL "")
- set(OpenMP_${LANG}_VERSION_MAJOR ${CMAKE_MATCH_1})
- set(OpenMP_${LANG}_VERSION_MINOR ${CMAKE_MATCH_2})
- set(OpenMP_${LANG}_VERSION "${OpenMP_${LANG}_VERSION_MAJOR}.${OpenMP_${LANG}_VERSION_MINOR}")
- else()
- unset(OpenMP_${LANG}_VERSION_MAJOR)
- unset(OpenMP_${LANG}_VERSION_MINOR)
- unset(OpenMP_${LANG}_VERSION)
- endif()
- unset(_version_match)
- unset(OpenMP_SPEC_DATE_MAP)
-endmacro()
-
-foreach(LANG IN ITEMS C CXX)
- if(CMAKE_${LANG}_COMPILER_LOADED)
- if(NOT DEFINED OpenMP_${LANG}_FLAGS OR "${OpenMP_${LANG}_FLAGS}" STREQUAL "NOTFOUND"
- OR NOT DEFINED OpenMP_${LANG}_LIB_NAMES OR "${OpenMP_${LANG}_LIB_NAMES}" STREQUAL "NOTFOUND")
- _OPENMP_GET_FLAGS("${LANG}" "${LANG}" OpenMP_${LANG}_FLAGS_WORK OpenMP_${LANG}_LIB_NAMES_WORK)
- set(OpenMP_${LANG}_FLAGS "${OpenMP_${LANG}_FLAGS_WORK}"
- CACHE STRING "${LANG} compiler flags for OpenMP parallelization" FORCE)
- set(OpenMP_${LANG}_LIB_NAMES "${OpenMP_${LANG}_LIB_NAMES_WORK}"
- CACHE STRING "${LANG} compiler libraries for OpenMP parallelization" FORCE)
- mark_as_advanced(OpenMP_${LANG}_FLAGS OpenMP_${LANG}_LIB_NAMES)
- endif()
- endif()
-endforeach()
-
-if(CMAKE_Fortran_COMPILER_LOADED)
- if(NOT DEFINED OpenMP_Fortran_FLAGS OR "${OpenMP_Fortran_FLAGS}" STREQUAL "NOTFOUND"
- OR NOT DEFINED OpenMP_Fortran_LIB_NAMES OR "${OpenMP_Fortran_LIB_NAMES}" STREQUAL "NOTFOUND"
- OR NOT DEFINED OpenMP_Fortran_HAVE_OMPLIB_MODULE)
- set(OpenMP_Fortran_INCLUDE_LINE "use omp_lib\n implicit none")
- _OPENMP_GET_FLAGS("Fortran" "FortranHeader" OpenMP_Fortran_FLAGS_WORK OpenMP_Fortran_LIB_NAMES_WORK)
- if(OpenMP_Fortran_FLAGS_WORK)
- set(OpenMP_Fortran_HAVE_OMPLIB_MODULE TRUE CACHE BOOL INTERNAL "")
- endif()
-
- set(OpenMP_Fortran_FLAGS "${OpenMP_Fortran_FLAGS_WORK}"
- CACHE STRING "Fortran compiler flags for OpenMP parallelization")
- set(OpenMP_Fortran_LIB_NAMES "${OpenMP_Fortran_LIB_NAMES_WORK}"
- CACHE STRING "Fortran compiler libraries for OpenMP parallelization")
- mark_as_advanced(OpenMP_Fortran_FLAGS OpenMP_Fortran_LIB_NAMES)
- endif()
-
- if(NOT DEFINED OpenMP_Fortran_FLAGS OR "${OpenMP_Fortran_FLAGS}" STREQUAL "NOTFOUND"
- OR NOT DEFINED OpenMP_Fortran_LIB_NAMES OR "${OpenMP_Fortran_LIB_NAMES}" STREQUAL "NOTFOUND"
- OR NOT DEFINED OpenMP_Fortran_HAVE_OMPLIB_HEADER)
- set(OpenMP_Fortran_INCLUDE_LINE "implicit none\n include 'omp_lib.h'")
- _OPENMP_GET_FLAGS("Fortran" "FortranModule" OpenMP_Fortran_FLAGS_WORK OpenMP_Fortran_LIB_NAMES_WORK)
- if(OpenMP_Fortran_FLAGS_WORK)
- set(OpenMP_Fortran_HAVE_OMPLIB_HEADER TRUE CACHE BOOL INTERNAL "")
- endif()
-
- set(OpenMP_Fortran_FLAGS "${OpenMP_Fortran_FLAGS_WORK}"
- CACHE STRING "Fortran compiler flags for OpenMP parallelization")
-
- set(OpenMP_Fortran_LIB_NAMES "${OpenMP_Fortran_LIB_NAMES}"
- CACHE STRING "Fortran compiler libraries for OpenMP parallelization")
- endif()
-
- if(OpenMP_Fortran_HAVE_OMPLIB_MODULE)
- set(OpenMP_Fortran_INCLUDE_LINE "use omp_lib\n implicit none")
- else()
- set(OpenMP_Fortran_INCLUDE_LINE "implicit none\n include 'omp_lib.h'")
- endif()
-endif()
-
-if(NOT OpenMP_FIND_COMPONENTS)
- set(OpenMP_FINDLIST C CXX Fortran)
-else()
- set(OpenMP_FINDLIST ${OpenMP_FIND_COMPONENTS})
-endif()
-
-unset(_OpenMP_MIN_VERSION)
-
-include(FindPackageHandleStandardArgs)
-
-foreach(LANG IN LISTS OpenMP_FINDLIST)
- if(CMAKE_${LANG}_COMPILER_LOADED)
- if (NOT OpenMP_${LANG}_SPEC_DATE AND OpenMP_${LANG}_FLAGS)
- _OPENMP_GET_SPEC_DATE("${LANG}" OpenMP_${LANG}_SPEC_DATE_INTERNAL)
- set(OpenMP_${LANG}_SPEC_DATE "${OpenMP_${LANG}_SPEC_DATE_INTERNAL}" CACHE
- INTERNAL "${LANG} compiler's OpenMP specification date")
- endif()
- _OPENMP_SET_VERSION_BY_SPEC_DATE("${LANG}")
-
- set(OpenMP_${LANG}_FIND_QUIETLY ${OpenMP_FIND_QUIETLY})
- set(OpenMP_${LANG}_FIND_REQUIRED ${OpenMP_FIND_REQUIRED})
- set(OpenMP_${LANG}_FIND_VERSION ${OpenMP_FIND_VERSION})
- set(OpenMP_${LANG}_FIND_VERSION_EXACT ${OpenMP_FIND_VERSION_EXACT})
-
- set(_OPENMP_${LANG}_REQUIRED_VARS OpenMP_${LANG}_FLAGS)
- if("${OpenMP_${LANG}_LIB_NAMES}" STREQUAL "NOTFOUND")
- set(_OPENMP_${LANG}_REQUIRED_LIB_VARS OpenMP_${LANG}_LIB_NAMES)
- else()
- foreach(_OPENMP_IMPLICIT_LIB IN LISTS OpenMP_${LANG}_LIB_NAMES)
- list(APPEND _OPENMP_${LANG}_REQUIRED_LIB_VARS OpenMP_${_OPENMP_IMPLICIT_LIB}_LIBRARY)
- endforeach()
- endif()
-
- if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.17")
- find_package_handle_standard_args(OpenMP_${LANG}
- NAME_MISMATCHED
- REQUIRED_VARS OpenMP_${LANG}_FLAGS ${_OPENMP_${LANG}_REQUIRED_LIB_VARS}
- VERSION_VAR OpenMP_${LANG}_VERSION
- )
- else()
- find_package_handle_standard_args(OpenMP_${LANG}
- REQUIRED_VARS OpenMP_${LANG}_FLAGS ${_OPENMP_${LANG}_REQUIRED_LIB_VARS}
- VERSION_VAR OpenMP_${LANG}_VERSION
- )
- endif()
-
- if(OpenMP_${LANG}_FOUND)
- if(DEFINED OpenMP_${LANG}_VERSION)
- if(NOT _OpenMP_MIN_VERSION OR _OpenMP_MIN_VERSION VERSION_GREATER OpenMP_${LANG}_VERSION)
- set(_OpenMP_MIN_VERSION OpenMP_${LANG}_VERSION)
- endif()
- endif()
- set(OpenMP_${LANG}_LIBRARIES "")
- foreach(_OPENMP_IMPLICIT_LIB IN LISTS OpenMP_${LANG}_LIB_NAMES)
- list(APPEND OpenMP_${LANG}_LIBRARIES "${OpenMP_${_OPENMP_IMPLICIT_LIB}_LIBRARY}")
- endforeach()
- if(OpenMP_${LANG}_INCLUDE_DIR)
- set(OpenMP_${LANG}_INCLUDE_DIRS ${OpenMP_${LANG}_INCLUDE_DIR})
- else()
- set(OpenMP_${LANG}_INCLUDE_DIRS "")
- endif()
-
- if(NOT TARGET OpenMP::OpenMP_${LANG})
- add_library(OpenMP::OpenMP_${LANG} INTERFACE IMPORTED)
- endif()
- if(OpenMP_${LANG}_FLAGS)
- separate_arguments(_OpenMP_${LANG}_OPTIONS NATIVE_COMMAND "${OpenMP_${LANG}_FLAGS}")
- set_property(TARGET OpenMP::OpenMP_${LANG} PROPERTY
- INTERFACE_COMPILE_OPTIONS "$<$<COMPILE_LANGUAGE:${LANG}>:${_OpenMP_${LANG}_OPTIONS}>")
- unset(_OpenMP_${LANG}_OPTIONS)
- endif()
- if(OpenMP_${LANG}_INCLUDE_DIRS)
- set_property(TARGET OpenMP::OpenMP_${LANG} PROPERTY
- INTERFACE_INCLUDE_DIRECTORIES "$<BUILD_INTERFACE:${OpenMP_${LANG}_INCLUDE_DIRS}>")
- endif()
- if(OpenMP_${LANG}_LIBRARIES)
- set_property(TARGET OpenMP::OpenMP_${LANG} PROPERTY
- INTERFACE_LINK_LIBRARIES "${OpenMP_${LANG}_LIBRARIES}")
- endif()
- endif()
- endif()
-endforeach()
-
-unset(_OpenMP_REQ_VARS)
-foreach(LANG IN ITEMS C CXX Fortran)
- if((NOT OpenMP_FIND_COMPONENTS AND CMAKE_${LANG}_COMPILER_LOADED) OR LANG IN_LIST OpenMP_FIND_COMPONENTS)
- list(APPEND _OpenMP_REQ_VARS "OpenMP_${LANG}_FOUND")
- endif()
-endforeach()
-
-find_package_handle_standard_args(OpenMP
- REQUIRED_VARS ${_OpenMP_REQ_VARS}
- VERSION_VAR ${_OpenMP_MIN_VERSION}
- HANDLE_COMPONENTS)
-
-set(OPENMP_FOUND ${OpenMP_FOUND})
-
-if(CMAKE_Fortran_COMPILER_LOADED AND OpenMP_Fortran_FOUND)
- if(NOT DEFINED OpenMP_Fortran_HAVE_OMPLIB_MODULE)
- set(OpenMP_Fortran_HAVE_OMPLIB_MODULE FALSE CACHE BOOL INTERNAL "")
- endif()
- if(NOT DEFINED OpenMP_Fortran_HAVE_OMPLIB_HEADER)
- set(OpenMP_Fortran_HAVE_OMPLIB_HEADER FALSE CACHE BOOL INTERNAL "")
- endif()
-endif()
-
-if(NOT ( CMAKE_C_COMPILER_LOADED OR CMAKE_CXX_COMPILER_LOADED OR CMAKE_Fortran_COMPILER_LOADED ))
- message(SEND_ERROR "FindOpenMP requires the C, CXX or Fortran languages to be enabled")
-endif()
-
-unset(OpenMP_C_CXX_TEST_SOURCE)
-unset(OpenMP_Fortran_TEST_SOURCE)
-unset(OpenMP_C_CXX_CHECK_VERSION_SOURCE)
-unset(OpenMP_Fortran_CHECK_VERSION_SOURCE)
-unset(OpenMP_Fortran_INCLUDE_LINE)
-
-cmake_policy(POP)
/usr/include/openni
/usr/include/ni
/opt/local/include/ni
- "${OPENNI_ROOT}"
- "$ENV{OPENNI_ROOT}"
PATHS "$ENV{OPEN_NI_INSTALL_PATH${OPENNI_SUFFIX}}/Include"
PATH_SUFFIXES openni include Include)
HINTS ${PC_OPENNI_LIBDIR}
${PC_OPENNI_LIBRARY_DIRS}
/usr/lib
- "${OPENNI_ROOT}"
- "$ENV{OPENNI_ROOT}"
PATHS "$ENV{OPEN_NI_LIB${OPENNI_SUFFIX}}"
PATH_SUFFIXES lib Lib Lib64)
find_file(QHULL_HEADER
NAMES libqhull_r.h
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}" "${QHULL_INCLUDE_DIR}"
+ HINTS "${QHULL_INCLUDE_DIR}"
PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
PATH_SUFFIXES qhull_r src/libqhull_r libqhull_r include)
find_library(QHULL_LIBRARY_SHARED
NAMES qhull_r qhull
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
PATH_SUFFIXES project build bin lib)
find_library(QHULL_LIBRARY_DEBUG
NAMES qhull_rd qhull_d
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
PATH_SUFFIXES project build bin lib debug/lib)
find_library(QHULL_LIBRARY_STATIC
NAMES qhullstatic_r
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
PATH_SUFFIXES project build bin lib)
find_library(QHULL_LIBRARY_DEBUG_STATIC
NAMES qhullstatic_rd
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
PATH_SUFFIXES project build bin lib debug/lib)
find_package(PkgConfig QUIET)
pkg_check_modules(PC_SPHINX sphinx-build)
-find_package(PythonInterp)
-
-if(PYTHONINTERP_FOUND)
- get_filename_component(PYTHON_DIR "${PYTHON_EXECUTABLE}" PATH)
+find_package(Python)
+if(Python_Interpreter_FOUND)
+ get_filename_component(PYTHON_DIR "${Python_EXECUTABLE}" PATH)
endif()
find_program(SPHINX_EXECUTABLE NAMES sphinx-build
FunctionEnd
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Uninstall sutff
+; Uninstall stuff
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
###########################################
endif()
endif()
-set(Boost_ADDITIONAL_VERSIONS
- "1.84.0" "1.84" "1.83.0" "1.83" "1.82.0" "1.82" "1.81.0" "1.81" "1.80.0" "1.80"
- "1.79.0" "1.79" "1.78.0" "1.78" "1.77.0" "1.77" "1.76.0" "1.76" "1.75.0" "1.75"
- "1.74.0" "1.74" "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
- "1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65")
-
-# Optional boost modules
-find_package(Boost 1.65.0 QUIET COMPONENTS serialization mpi)
-if(Boost_SERIALIZATION_FOUND)
- set(BOOST_SERIALIZATION_FOUND TRUE)
+if(CMAKE_CXX_STANDARD MATCHES "14")
+ # Optional boost modules
+ set(BOOST_OPTIONAL_MODULES serialization mpi)
+ # Required boost modules
+ set(BOOST_REQUIRED_MODULES filesystem iostreams system)
+else()
+ # Optional boost modules
+ set(BOOST_OPTIONAL_MODULES filesystem serialization mpi)
+ # Required boost modules
+ set(BOOST_REQUIRED_MODULES iostreams system)
endif()
-# Required boost modules
-set(BOOST_REQUIRED_MODULES filesystem iostreams system)
-find_package(Boost 1.65.0 REQUIRED COMPONENTS ${BOOST_REQUIRED_MODULES})
+find_package(Boost 1.71.0 QUIET COMPONENTS ${BOOST_OPTIONAL_MODULES} CONFIG)
+find_package(Boost 1.71.0 REQUIRED COMPONENTS ${BOOST_REQUIRED_MODULES} CONFIG)
if(Boost_FOUND)
set(BOOST_FOUND TRUE)
set(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE OFF CACHE BOOL "CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE")
endif()
-set(CUDA_FIND_QUIETLY TRUE)
-find_package(CUDA 9.0)
+include(CheckLanguage)
+check_language(CUDA)
+if(CMAKE_CUDA_COMPILER)
+ enable_language(CUDA)
+
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.17)
+ find_package(CUDAToolkit QUIET)
+ set(CUDA_TOOLKIT_INCLUDE ${CUDAToolkit_INCLUDE_DIRS})
+ else()
+ set(CUDA_FIND_QUIETLY TRUE)
+ find_package(CUDA 9.0)
+ endif()
+
+ set(CUDA_FOUND TRUE)
+ set(CUDA_VERSION_STRING ${CMAKE_CUDA_COMPILER_VERSION})
+else()
+ message(STATUS "No CUDA compiler found")
+endif()
if(CUDA_FOUND)
message(STATUS "Found CUDA Toolkit v${CUDA_VERSION_STRING}")
- enable_language(CUDA)
set(HAVE_CUDA TRUE)
if (CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA")
cmake_policy(SET CMP0104 NEW)
set(CMAKE_CUDA_ARCHITECTURES ${CUDA_ARCH_BIN})
message(STATUS "CMAKE_CUDA_ARCHITECTURES: ${CMAKE_CUDA_ARCHITECTURES}")
-
- #Add empty project as its not required with newer CMake
- add_library(pcl_cuda INTERFACE)
else()
# Generate SASS
set(CMAKE_CUDA_ARCHITECTURES ${CUDA_ARCH_BIN})
# Generate PTX for last architecture
list(GET CUDA_ARCH_BIN -1 ver)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode arch=compute_${ver},code=compute_${ver}")
- message(STATUS "CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
-
- add_library(pcl_cuda INTERFACE)
- target_include_directories(pcl_cuda INTERFACE ${CUDA_TOOLKIT_INCLUDE})
-
+ message(STATUS "CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
endif ()
+else()
+ message(STATUS "CUDA was not found.")
endif()
set_property(CACHE PCL_QHULL_REQUIRED_TYPE PROPERTY STRINGS DONTCARE SHARED STATIC)
mark_as_advanced(PCL_QHULL_REQUIRED_TYPE)
+option(PCL_PREFER_BOOST_FILESYSTEM "Prefer boost::filesystem over std::filesystem (if compiled as C++17 or higher, std::filesystem is chosen by default)" OFF)
+mark_as_advanced(PCL_PREFER_BOOST_FILESYSTEM)
+
+set(PCL_XYZ_POINT_TYPES "(pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZL)(pcl::PointXYZRGBA)(pcl::PointXYZRGB)(pcl::PointXYZRGBL)(pcl::PointXYZLAB)(pcl::PointXYZHSV)(pcl::InterestPoint)(pcl::PointNormal)(pcl::PointXYZRGBNormal)(pcl::PointXYZINormal)(pcl::PointXYZLNormal)(pcl::PointWithRange)(pcl::PointWithViewpoint)(pcl::PointWithScale)(pcl::PointSurfel)(pcl::PointDEM)" CACHE STRING "Point types with xyz information for which PCL classes will be instantiated. Alternative to PCL_ONLY_CORE_POINT_TYPES. You can remove unneeded types to reduce compile time and library size.")
+mark_as_advanced(PCL_XYZ_POINT_TYPES)
+
+set(PCL_NORMAL_POINT_TYPES "(pcl::Normal)(pcl::PointNormal)(pcl::PointXYZRGBNormal)(pcl::PointXYZINormal)(pcl::PointXYZLNormal)(pcl::PointSurfel)" CACHE STRING "Point types with normal information for which PCL classes will be instantiated. Alternative to PCL_ONLY_CORE_POINT_TYPES. You can remove unneeded types to reduce compile time and library size.")
+mark_as_advanced(PCL_NORMAL_POINT_TYPES)
+
# Precompile for a minimal set of point types instead of all.
+if(CMAKE_COMPILER_IS_MSVC OR CMAKE_COMPILER_IS_MINGW)
+option(PCL_ONLY_CORE_POINT_TYPES "Compile explicitly only for a small subset of point types (e.g., pcl::PointXYZ instead of PCL_XYZ_POINT_TYPES)." ON)
+else()
option(PCL_ONLY_CORE_POINT_TYPES "Compile explicitly only for a small subset of point types (e.g., pcl::PointXYZ instead of PCL_XYZ_POINT_TYPES)." OFF)
+endif()
mark_as_advanced(PCL_ONLY_CORE_POINT_TYPES)
# Precompile for a minimal set of point types instead of all.
# Project folders
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
-option(BUILD_tools "Useful PCL-based command line tools" ON)
-
option(WITH_DOCS "Build doxygen documentation" OFF)
# set index size
# Set whether visualizations tests should be run
# (Used to prevent visualizations tests from executing in CI where visualization is unavailable)
option(PCL_DISABLE_VISUALIZATION_TESTS "Disable running visualizations tests. If disabled, tests will still be built." OFF)
+
+# This leads to smaller libraries, possibly faster code, and fixes some bugs. See https://gcc.gnu.org/wiki/Visibility
+option(PCL_SYMBOL_VISIBILITY_HIDDEN "Hide all binary symbols by default, export only those explicitly marked (gcc and clang only). Experimental!" OFF)
+mark_as_advanced(PCL_SYMBOL_VISIBILITY_HIDDEN)
+if(PCL_SYMBOL_VISIBILITY_HIDDEN)
+ set(CMAKE_CXX_VISIBILITY_PRESET hidden)
+ set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
+endif()
endforeach()
#Boost modules
-set(PCLCONFIG_AVAILABLE_BOOST_MODULES "system filesystem iostreams")
+set(PCLCONFIG_AVAILABLE_BOOST_MODULES "system iostreams")
+if(Boost_FILESYSTEM_FOUND)
+ string(APPEND PCLCONFIG_AVAILABLE_BOOST_MODULES " filesystem")
+endif()
if(Boost_SERIALIZATION_FOUND)
string(APPEND PCLCONFIG_AVAILABLE_BOOST_MODULES " serialization")
endif()
-if(Boost_CHRONO_FOUND)
- string(APPEND PCLCONFIG_AVAILABLE_BOOST_MODULES " chrono")
-endif()
configure_file("${PCL_SOURCE_DIR}/PCLConfig.cmake.in"
"${PCL_BINARY_DIR}/PCLConfig.cmake" @ONLY)
"${PCL_BINARY_DIR}/PCLConfig.cmake"
"${PCL_BINARY_DIR}/PCLConfigVersion.cmake"
COMPONENT pclconfig
- DESTINATION ${PCLCONFIG_INSTALL_DIR})
\ No newline at end of file
+ DESTINATION ${PCLCONFIG_INSTALL_DIR})
if(NOT _status)
set(${_var} FALSE)
PCL_SET_SUBSYS_STATUS(${_name} FALSE "Requires ${_dep}.")
- else()
- PCL_GET_SUBSYS_INCLUDE_DIR(_include_dir ${_dep})
- include_directories(${PROJECT_SOURCE_DIR}/${_include_dir}/include)
endif()
endforeach()
endif()
endif()
endforeach()
endif()
- if(ARGS_OPT_DEPS)
- foreach(_dep ${ARGS_OPT_DEPS})
- PCL_GET_SUBSYS_INCLUDE_DIR(_include_dir ${_dep})
- include_directories(${PROJECT_SOURCE_DIR}/${_include_dir}/include)
- endforeach()
- endif()
endif()
endmacro()
function(PCL_ADD_LIBRARY _name)
set(options)
set(oneValueArgs COMPONENT)
- set(multiValueArgs SOURCES)
+ set(multiValueArgs SOURCES INCLUDES)
cmake_parse_arguments(ARGS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if(ARGS_UNPARSED_ARGUMENTS)
message(FATAL_ERROR "PCL_ADD_LIBRARY requires parameter COMPONENT.")
endif()
- add_library(${_name} ${PCL_LIB_TYPE} ${ARGS_SOURCES})
- PCL_ADD_VERSION_INFO(${_name})
- target_compile_features(${_name} PUBLIC ${PCL_CXX_COMPILE_FEATURES})
+ if(NOT ARGS_SOURCES)
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.19)
+ add_library(${_name} INTERFACE ${ARGS_INCLUDES})
- target_link_libraries(${_name} Threads::Threads)
- if(TARGET OpenMP::OpenMP_CXX)
- target_link_libraries(${_name} OpenMP::OpenMP_CXX)
- endif()
+ set_target_properties(${_name} PROPERTIES FOLDER "Libraries")
+ else()
+ add_library(${_name} INTERFACE)
+ endif()
+
+ target_include_directories(${_name} INTERFACE
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${INCLUDE_INSTALL_ROOT}>
+ )
+ else()
+ add_library(${_name} ${PCL_LIB_TYPE} ${ARGS_SOURCES})
+ PCL_ADD_VERSION_INFO(${_name})
+ target_compile_features(${_name} PUBLIC ${PCL_CXX_COMPILE_FEATURES})
+
+ target_include_directories(${_name} PUBLIC
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${INCLUDE_INSTALL_ROOT}>
+ )
+
+ target_link_libraries(${_name} Threads::Threads)
+ if(TARGET OpenMP::OpenMP_CXX)
+ target_link_libraries(${_name} OpenMP::OpenMP_CXX)
+ endif()
- if((UNIX AND NOT ANDROID) OR MINGW)
- target_link_libraries(${_name} m ${ATOMIC_LIBRARY})
- endif()
+ if((UNIX AND NOT ANDROID) OR MINGW)
+ target_link_libraries(${_name} m ${ATOMIC_LIBRARY})
+ endif()
- if(MINGW)
- target_link_libraries(${_name} gomp)
- endif()
+ if(MINGW)
+ target_link_libraries(${_name} gomp)
+ endif()
- if(MSVC)
- target_link_libraries(${_name} delayimp.lib) # because delay load is enabled for openmp.dll
- endif()
+ if(MSVC)
+ target_link_libraries(${_name} delayimp.lib) # because delay load is enabled for openmp.dll
+ endif()
+
+ set_target_properties(${_name} PROPERTIES
+ VERSION ${PCL_VERSION}
+ SOVERSION ${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}
+ DEFINE_SYMBOL "PCLAPI_EXPORTS")
- set_target_properties(${_name} PROPERTIES
- VERSION ${PCL_VERSION}
- SOVERSION ${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}
- DEFINE_SYMBOL "PCLAPI_EXPORTS")
- set_target_properties(${_name} PROPERTIES FOLDER "Libraries")
+ set_target_properties(${_name} PROPERTIES FOLDER "Libraries")
+ endif()
install(TARGETS ${_name}
RUNTIME DESTINATION ${BIN_INSTALL_DIR} COMPONENT pcl_${ARGS_COMPONENT}
endif()
REMOVE_VTK_DEFINITIONS()
+ if(NOT ARGS_SOURCES)
+ add_library(${_name} INTERFACE)
+
+ target_include_directories(${_name} INTERFACE
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${INCLUDE_INSTALL_ROOT}>
+ )
- add_library(${_name} ${PCL_LIB_TYPE} ${ARGS_SOURCES})
-
- PCL_ADD_VERSION_INFO(${_name})
-
- target_compile_options(${_name} PRIVATE $<$<COMPILE_LANGUAGE:CUDA>: ${GEN_CODE} --expt-relaxed-constexpr>)
-
- target_include_directories(${_name} PRIVATE ${CUDA_TOOLKIT_INCLUDE})
-
- if(MSVC)
- target_link_libraries(${_name} delayimp.lib) # because delay load is enabled for openmp.dll
+ else()
+ add_library(${_name} ${PCL_LIB_TYPE} ${ARGS_SOURCES})
+
+ PCL_ADD_VERSION_INFO(${_name})
+
+ target_compile_options(${_name} PRIVATE $<$<COMPILE_LANGUAGE:CUDA>: ${GEN_CODE} --expt-relaxed-constexpr>)
+
+ target_include_directories(${_name} PUBLIC
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:${INCLUDE_INSTALL_ROOT}>
+ )
+
+ target_include_directories(${_name} PRIVATE ${CUDA_TOOLKIT_INCLUDE})
+
+ if(MSVC)
+ target_link_libraries(${_name} delayimp.lib) # because delay load is enabled for openmp.dll
+ endif()
+
+ set_target_properties(${_name} PROPERTIES
+ VERSION ${PCL_VERSION}
+ SOVERSION ${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}
+ DEFINE_SYMBOL "PCLAPI_EXPORTS")
+ set_target_properties(${_name} PROPERTIES FOLDER "Libraries")
endif()
- set_target_properties(${_name} PROPERTIES
- VERSION ${PCL_VERSION}
- SOVERSION ${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}
- DEFINE_SYMBOL "PCLAPI_EXPORTS")
- set_target_properties(${_name} PROPERTIES FOLDER "Libraries")
-
install(TARGETS ${_name}
RUNTIME DESTINATION ${BIN_INSTALL_DIR} COMPONENT pcl_${ARGS_COMPONENT}
LIBRARY DESTINATION ${LIB_INSTALL_DIR} COMPONENT pcl_${ARGS_COMPONENT}
#Only applies to MSVC
if(MSVC)
- #Requires CMAKE version 3.13.0
- if(CMAKE_VERSION VERSION_LESS "3.13.0" AND (NOT ArgumentWarningShown))
- message(WARNING "Arguments for unit test projects are not added - this requires at least CMake 3.13. Can be added manually in \"Project settings -> Debugging -> Command arguments\"")
- SET (ArgumentWarningShown TRUE PARENT_SCOPE)
- else()
- #Only add if there are arguments to test
- if(ARGS_ARGUMENTS)
- string (REPLACE ";" " " ARGS_ARGUMENTS_STR "${ARGS_ARGUMENTS}")
- set_target_properties(${_exename} PROPERTIES VS_DEBUGGER_COMMAND_ARGUMENTS ${ARGS_ARGUMENTS_STR})
- endif()
+ #Only add if there are arguments to test
+ if(ARGS_ARGUMENTS)
+ string (REPLACE ";" " " ARGS_ARGUMENTS_STR "${ARGS_ARGUMENTS}")
+ set_target_properties(${_exename} PROPERTIES VS_DEBUGGER_COMMAND_ARGUMENTS ${ARGS_ARGUMENTS_STR})
endif()
endif()
if(MSVC)
#Requires CMAKE version 3.13.0
get_target_property(BenchmarkArgumentWarningShown run_benchmarks PCL_BENCHMARK_ARGUMENTS_WARNING_SHOWN)
- if(CMAKE_VERSION VERSION_LESS "3.13.0" AND (NOT BenchmarkArgumentWarningShown))
- message(WARNING "Arguments for benchmark projects are not added - this requires at least CMake 3.13. Can be added manually in \"Project settings -> Debugging -> Command arguments\"")
- set_target_properties(run_benchmarks PROPERTIES PCL_BENCHMARK_ARGUMENTS_WARNING_SHOWN TRUE)
- else()
- #Only add if there are arguments to test
- if(ARGS_ARGUMENTS)
- string (REPLACE ";" " " ARGS_ARGUMENTS_STR "${ARGS_ARGUMENTS}")
- set_target_properties(benchmark_${_name} PROPERTIES VS_DEBUGGER_COMMAND_ARGUMENTS ${ARGS_ARGUMENTS_STR})
- endif()
+ #Only add if there are arguments to test
+ if(ARGS_ARGUMENTS)
+ string (REPLACE ";" " " ARGS_ARGUMENTS_STR "${ARGS_ARGUMENTS}")
+ set_target_properties(benchmark_${_name} PROPERTIES VS_DEBUGGER_COMMAND_ARGUMENTS ${ARGS_ARGUMENTS_STR})
endif()
endif()
set(SUBSYS_DESC "Point cloud common library")
set(SUBSYS_DEPS)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} EXT_DEPS eigen3 boost)
include/pcl/types.h
include/pcl/point_cloud.h
include/pcl/point_struct_traits.h
- include/pcl/point_traits.h
include/pcl/type_traits.h
include/pcl/point_types_conversion.h
include/pcl/point_representation.h
include/pcl/PointIndices.h
include/pcl/register_point_struct.h
include/pcl/conversions.h
- include/pcl/make_shared.h
)
set(common_incs
- include/pcl/common/boost.h
include/pcl/common/angles.h
include/pcl/common/bivariate_polynomial.h
include/pcl/common/centroid.h
include/pcl/common/file_io.h
include/pcl/common/intersections.h
include/pcl/common/norms.h
+ include/pcl/common/pcl_filesystem.h
include/pcl/common/piecewise_linear_function.h
include/pcl/common/polynomial_calculations.h
include/pcl/common/poses_from_matches.h
set(LIB_NAME "pcl_${SUBSYS_NAME}")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${kissfft_srcs} ${incs} ${common_incs} ${impl_incs} ${tools_incs} ${kissfft_incs} ${common_incs_impl} ${range_image_incs} ${range_image_incs_impl})
-target_include_directories(${LIB_NAME} PUBLIC
- $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
- $<INSTALL_INTERFACE:include>
-)
-
target_link_libraries(${LIB_NAME} Boost::boost Eigen3::Eigen)
if(MSVC AND NOT (MSVC_VERSION LESS 1915))
s << " " << v.offset << std::endl;
s << "datatype: ";
switch(v.datatype) {
+ case ::pcl::PCLPointField::PointFieldTypes::BOOL: s << " BOOL" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::INT8: s << " INT8" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::UINT8: s << " UINT8" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::INT16: s << " INT16" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::UINT16: s << " UINT16" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::INT32: s << " INT32" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::UINT32: s << " UINT32" << std::endl; break;
+ case ::pcl::PCLPointField::PointFieldTypes::INT64: s << " INT64" << std::endl; break;
+ case ::pcl::PCLPointField::PointFieldTypes::UINT64: s << " UINT64" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::FLOAT32: s << " FLOAT32" << std::endl; break;
case ::pcl::PCLPointField::PointFieldTypes::FLOAT64: s << " FLOAT64" << std::endl; break;
default: s << " " << static_cast<int>(v.datatype) << std::endl;
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-
-#ifndef Q_MOC_RUN
-// Marking all Boost headers as system headers to remove warnings
-#include <boost/fusion/sequence/intrinsic/at_key.hpp>
-#include <boost/mpl/size.hpp>
-#include <boost/signals2.hpp>
-#include <boost/signals2/slot.hpp>
-#include <boost/algorithm/string.hpp>
-#endif
* www.srgb.com, www.color.org/srgb.html
*/
template <typename T, std::uint8_t bits = 8>
- PCL_EXPORTS inline std::array<T, 1 << bits>
+ inline std::array<T, 1 << bits>
RGB2sRGB_LUT() noexcept
{
static_assert(std::is_floating_point<T>::value, "LUT value must be a floating point");
* Reference: Billmeyer and Saltzman’s Principles of Color Technology
*/
template <typename T, std::size_t discretizations = 4000>
- PCL_EXPORTS inline const std::array<T, discretizations>&
+ inline const std::array<T, discretizations>&
XYZ2LAB_LUT() noexcept
{
static_assert(std::is_floating_point<T>::value, "LUT value must be a floating point");
getMaxDistance (const pcl::PointCloud<PointT> &cloud, const Indices &indices,
const Eigen::Vector4f &pivot_pt, Eigen::Vector4f &max_pt);
- /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
+ /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud. This is the axis aligned bounding box (AABB).
* \param[in] cloud the point cloud data message
* \param[out] min_pt the resultant minimum bounds
* \param[out] max_pt the resultant maximum bounds
template <typename PointT> inline void
getMinMax3D (const pcl::PointCloud<PointT> &cloud, PointT &min_pt, PointT &max_pt);
- /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
+ /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud. This is the axis aligned bounding box (AABB).
* \param[in] cloud the point cloud data message
* \param[out] min_pt the resultant minimum bounds
* \param[out] max_pt the resultant maximum bounds
getMinMax3D (const pcl::PointCloud<PointT> &cloud,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
- /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
+ /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud. This is the axis aligned bounding box (AABB).
* \param[in] cloud the point cloud data message
* \param[in] indices the vector of point indices to use from \a cloud
* \param[out] min_pt the resultant minimum bounds
getMinMax3D (const pcl::PointCloud<PointT> &cloud, const Indices &indices,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
- /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
+ /** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud. This is the axis aligned bounding box (AABB).
* \param[in] cloud the point cloud data message
* \param[in] indices the vector of point indices to use from \a cloud
* \param[out] min_pt the resultant minimum bounds
//obb_rotational_matrix.col(1)==middle_axis
//obb_rotational_matrix.col(2)==minor_axis
- //Trasforming the point cloud in the (Centroid, ma-mi-mi_axis) reference
+ //Transforming the point cloud in the (Centroid, ma-mi-mi_axis) reference
//with homogeneous matrix
//[R^t , -R^t*Centroid ]
//[0 , 1 ]
Eigen::Matrix<Scalar, 4, 4> transform = Eigen::Matrix<Scalar, 4, 4>::Identity();
- transform.topLeftCorner(3, 3) = obb_rotational_matrix.transpose();
- transform.topRightCorner(3, 1) =-transform.topLeftCorner(3, 3)*centroid;
+ transform.template topLeftCorner<3, 3>() = obb_rotational_matrix.transpose();
+ transform.template topRightCorner<3, 1>() =-transform.template topLeftCorner<3, 3>()*centroid;
//when Scalar==double on a Windows 10 machine and MSVS:
//if you substitute the following Scalars with floats you get a 20% worse processing time, if with 2 PointT 55% worse
//obb_rotational_matrix.col(1)==middle_axis
//obb_rotational_matrix.col(2)==minor_axis
- //Trasforming the point cloud in the (Centroid, ma-mi-mi_axis) reference
+ //Transforming the point cloud in the (Centroid, ma-mi-mi_axis) reference
//with homogeneous matrix
//[R^t , -R^t*Centroid ]
//[0 , 1 ]
Eigen::Matrix<Scalar, 4, 4> transform = Eigen::Matrix<Scalar, 4, 4>::Identity();
- transform.topLeftCorner(3, 3) = obb_rotational_matrix.transpose();
- transform.topRightCorner(3, 1) =-transform.topLeftCorner(3, 3)*centroid;
+ transform.template topLeftCorner<3, 3>() = obb_rotational_matrix.transpose();
+ transform.template topRightCorner<3, 1>() =-transform.template topLeftCorner<3, 3>()*centroid;
//when Scalar==double on a Windows 10 machine and MSVS:
//if you substitute the following Scalars with floats you get a 20% worse processing time, if with 2 PointT 55% worse
computeRoots (scaledMat, eigenvalues);
eigenvalue = eigenvalues (0) * scale;
-
- scaledMat.diagonal ().array () -= eigenvalues (0);
-
- eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;
+ if ( (eigenvalues (1) - eigenvalues (0)) > Eigen::NumTraits < Scalar > ::epsilon ()) {
+ // usual case: first and second are not equal (so first and third are also not equal).
+ // second and third could be equal, but that does not matter here
+ scaledMat.diagonal ().array () -= eigenvalues (0);
+ eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;
+ }
+ else if ( (eigenvalues (2) - eigenvalues (0)) > Eigen::NumTraits < Scalar > ::epsilon ()) {
+ // first and second equal: choose any unit vector that is orthogonal to third eigenvector
+ scaledMat.diagonal ().array () -= eigenvalues (2);
+ eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector.unitOrthogonal ();
+ }
+ else {
+ // all three equal: just use an arbitrary unit vector
+ eigenvector << Scalar (1.0), Scalar (0.0), Scalar (0.0);
+ }
}
#ifndef PCL_COMMON_FILE_IO_IMPL_HPP_
#define PCL_COMMON_FILE_IO_IMPL_HPP_
-#include <boost/filesystem.hpp>
+#include <pcl/common/pcl_filesystem.h>
+
#include <boost/range/iterator_range.hpp>
#include <algorithm>
void getAllPcdFilesInDirectory(const std::string& directory, std::vector<std::string>& file_names)
{
- boost::filesystem::path p(directory);
- if(boost::filesystem::is_directory(p))
+ pcl_fs::path p(directory);
+ if(pcl_fs::is_directory(p))
{
- for(const auto& entry : boost::make_iterator_range(boost::filesystem::directory_iterator(p), {}))
+ for(const auto& entry : boost::make_iterator_range(pcl_fs::directory_iterator(p), {}))
{
- if (boost::filesystem::is_regular_file(entry))
+ if (pcl_fs::is_regular_file(entry))
{
if (entry.path().extension() == ".pcd")
file_names.emplace_back(entry.path().filename().string());
if (!cloud.empty ())
PCL_WARN ("[pcl::common::CloudGenerator] Cloud data will be erased with new data\n!");
- cloud.width = width;
- cloud.height = height;
- cloud.resize (cloud.width * cloud.height);
+ cloud.resize (width, height);
cloud.is_dense = true;
for (auto &point : cloud)
while (pointIt)
{
- unsigned yIdx = pointIt.getCurrentPointIndex () / cloud->width;
- unsigned xIdx = pointIt.getCurrentPointIndex () % cloud->width;
+ Scalar yIdx = pointIt.getCurrentPointIndex () / cloud->width;
+ Scalar xIdx = pointIt.getCurrentPointIndex () % cloud->width;
const PointT& point = *pointIt;
if (std::isfinite (point.x))
{
- Scalar xx = point.x * point.x;
- Scalar xy = point.x * point.y;
- Scalar xz = point.x * point.z;
- Scalar yy = point.y * point.y;
- Scalar yz = point.y * point.z;
- Scalar zz = point.z * point.z;
+ Scalar xx = static_cast<Scalar>(point.x) * static_cast<Scalar>(point.x);
+ Scalar xy = static_cast<Scalar>(point.x) * static_cast<Scalar>(point.y);
+ Scalar xz = static_cast<Scalar>(point.x) * static_cast<Scalar>(point.z);
+ Scalar yy = static_cast<Scalar>(point.y) * static_cast<Scalar>(point.y);
+ Scalar yz = static_cast<Scalar>(point.y) * static_cast<Scalar>(point.z);
+ Scalar zz = static_cast<Scalar>(point.z) * static_cast<Scalar>(point.z);
Scalar xx_yy = xIdx * xIdx + yIdx * yIdx;
A.coeffRef (0) += xx;
double rel1 = eigen_vals.coeff (0) / eigen_vals.coeff (1);
double rel2 = eigen_vals.coeff (1) / eigen_vals.coeff (2);
- transform.translation () = centroid.head (3);
+ transform.translation () = centroid.template head<3> ();
transform.linear () = eigen_vects;
return (std::min (rel1, rel2));
* \note Described in: "Intersection of Two Planes, John Krumm, Microsoft Research, Redmond, WA, USA"
* \param[in] plane_a coefficients of plane A and plane B in the form ax + by + cz + d = 0
* \param[in] plane_b coefficients of line where line.tail<3>() = direction vector and
- * line.head<3>() the point on the line clossest to (0, 0, 0)
+ * line.head<3>() the point on the line closest to (0, 0, 0)
* \param[out] line the intersected line to be filled
* \param[in] angular_tolerance tolerance in radians
* \return true if succeeded/planes aren't parallel
*/
- PCL_EXPORTS template <typename Scalar> bool
+ template <typename Scalar> bool
planeWithPlaneIntersection (const Eigen::Matrix<Scalar, 4, 1> &plane_a,
const Eigen::Matrix<Scalar, 4, 1> &plane_b,
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> &line,
double angular_tolerance = 0.1);
- PCL_EXPORTS inline bool
+ inline bool
planeWithPlaneIntersection (const Eigen::Vector4f &plane_a,
const Eigen::Vector4f &plane_b,
Eigen::VectorXf &line,
return (planeWithPlaneIntersection<float> (plane_a, plane_b, line, angular_tolerance));
}
- PCL_EXPORTS inline bool
+ inline bool
planeWithPlaneIntersection (const Eigen::Vector4d &plane_a,
const Eigen::Vector4d &plane_b,
Eigen::VectorXd &line,
* \param[out] intersection_point the three coordinates x, y, z of the intersection point
* \return true if succeeded/planes aren't parallel
*/
- PCL_EXPORTS template <typename Scalar> bool
+ template <typename Scalar> bool
threePlanesIntersection (const Eigen::Matrix<Scalar, 4, 1> &plane_a,
const Eigen::Matrix<Scalar, 4, 1> &plane_b,
const Eigen::Matrix<Scalar, 4, 1> &plane_c,
double determinant_tolerance = 1e-6);
- PCL_EXPORTS inline bool
+ inline bool
threePlanesIntersection (const Eigen::Vector4f &plane_a,
const Eigen::Vector4f &plane_b,
const Eigen::Vector4f &plane_c,
intersection_point, determinant_tolerance));
}
- PCL_EXPORTS inline bool
+ inline bool
threePlanesIntersection (const Eigen::Vector4d &plane_a,
const Eigen::Vector4d &plane_b,
const Eigen::Vector4d &plane_c,
* \ingroup common
*/
template <typename PointT>
- PCL_EXPORTS bool
+ bool
concatenate (const pcl::PointCloud<PointT> &cloud1,
const pcl::PointCloud<PointT> &cloud2,
pcl::PointCloud<PointT> &cloud_out)
* \return true if successful, false otherwise
* \ingroup common
*/
- PCL_EXPORTS inline bool
+ inline bool
concatenate (const pcl::PCLPointCloud2 &cloud1,
const pcl::PCLPointCloud2 &cloud2,
pcl::PCLPointCloud2 &cloud_out)
* \return true if successful, false otherwise
* \ingroup common
*/
- PCL_EXPORTS inline bool
+ inline bool
concatenate (const pcl::PolygonMesh &mesh1,
const pcl::PolygonMesh &mesh2,
pcl::PolygonMesh &mesh_out)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (C) 2024 Kino <cybao292261@163.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#pragma once
+
+#include <pcl/pcl_config.h> // for PCL_PREFER_BOOST_FILESYSTEM
+
+#if (__cplusplus >= 201703L) && !defined(PCL_PREFER_BOOST_FILESYSTEM)
+#define PCL_USING_STD_FILESYSTEM
+#include <filesystem>
+namespace pcl_fs = std::filesystem;
+#else
+#define PCL_USING_BOOST_FILESYSTEM
+#include <boost/filesystem.hpp>
+namespace pcl_fs = boost::filesystem;
+#endif
// Use e.g. like this:
// PCL_INFO_STREAM("Info: this is a point: " << pcl::PointXYZ(1.0, 2.0, 3.0) << std::endl);
// PCL_ERROR_STREAM("Error: an Eigen vector: " << std::endl << Eigen::Vector3f(1.0, 2.0, 3.0) << std::endl);
+// NOLINTBEGIN(bugprone-macro-parentheses)
#define PCL_LOG_STREAM(LEVEL, STREAM, CSTR, ATTR, FG, ARGS) if(pcl::console::isVerbosityLevelEnabled(pcl::console::LEVEL)) { fflush(stdout); pcl::console::change_text_color(CSTR, pcl::console::ATTR, pcl::console::FG); STREAM << ARGS; pcl::console::reset_text_color(CSTR); }
+// NOLINTEND(bugprone-macro-parentheses)
#define PCL_ALWAYS_STREAM(ARGS) PCL_LOG_STREAM(L_ALWAYS, std::cout, stdout, TT_RESET, TT_WHITE, ARGS)
#define PCL_ERROR_STREAM(ARGS) PCL_LOG_STREAM(L_ERROR, std::cerr, stderr, TT_BRIGHT, TT_RED, ARGS)
#define PCL_WARN_STREAM(ARGS) PCL_LOG_STREAM(L_WARN, std::cerr, stderr, TT_BRIGHT, TT_YELLOW, ARGS)
#define PCL_INFO(...) pcl::console::print (pcl::console::L_INFO, __VA_ARGS__)
#define PCL_DEBUG(...) pcl::console::print (pcl::console::L_DEBUG, __VA_ARGS__)
#define PCL_VERBOSE(...) pcl::console::print (pcl::console::L_VERBOSE, __VA_ARGS__)
+#define PCL_HIGH(...) pcl::console::print_highlight (__VA_ARGS__)
#define PCL_ASSERT_ERROR_PRINT_CHECK(pred, msg) \
do \
#include <algorithm>
#include <iterator>
+#include <numeric> // for accumulate
namespace pcl
{
}
}
// Disable thrown exception per #595: http://dev.pointclouds.org/issues/595
- PCL_WARN ("Failed to find match for field '%s'.\n", pcl::traits::name<PointT, Tag>::value);
+ PCL_WARN ("Failed to find exact match for field '%s'.\n", pcl::traits::name<PointT, Tag>::value);
//throw pcl::InvalidConversionException (ss.str ());
}
return (a.serialized_offset < b.serialized_offset);
}
+ // Helps converting PCLPointCloud2 to templated point cloud. Casts fields if datatype is different.
+ template<typename PointT>
+ struct FieldCaster
+ {
+ FieldCaster (const std::vector<pcl::PCLPointField>& fields, const pcl::PCLPointCloud2& msg, const std::uint8_t* msg_data, std::uint8_t* cloud_data)
+ : fields_ (fields), msg_(msg), msg_data_(msg_data), cloud_data_(cloud_data)
+ {}
+
+ template<typename Tag> void
+ operator () ()
+ {
+ // first check whether any field matches exactly. Then there is nothing to do because the contents are memcpy-ed elsewhere
+ for (const auto& field : fields_) {
+ if (FieldMatches<PointT, Tag>()(field)) {
+ return;
+ }
+ }
+ for (const auto& field : fields_)
+ {
+ // The following check is similar to FieldMatches, but it tests for different datatypes
+ if ((field.name == pcl::traits::name<PointT, Tag>::value) &&
+ (field.datatype != pcl::traits::datatype<PointT, Tag>::value) &&
+ ((field.count == pcl::traits::datatype<PointT, Tag>::size) ||
+ (field.count == 0 && pcl::traits::datatype<PointT, Tag>::size == 1))) {
+#define PCL_CAST_POINT_FIELD(TYPE) case ::pcl::traits::asEnum_v<TYPE>: \
+ PCL_WARN("Will try to cast field '%s' (original type is " #TYPE "). You may loose precision during casting. Make sure that this is acceptable or choose a different point type.\n", pcl::traits::name<PointT, Tag>::value); \
+ for (std::size_t row = 0; row < msg_.height; ++row) { \
+ const std::uint8_t* row_data = msg_data_ + row * msg_.row_step; \
+ for (std::size_t col = 0; col < msg_.width; ++col) { \
+ const std::uint8_t* msg_data = row_data + col * msg_.point_step; \
+ for(std::uint32_t i=0; i<pcl::traits::datatype<PointT, Tag>::size; ++i) { \
+ *(reinterpret_cast<typename pcl::traits::datatype<PointT, Tag>::decomposed::type*>(cloud_data + pcl::traits::offset<PointT, Tag>::value) + i) = *(reinterpret_cast<const TYPE*>(msg_data + field.offset) + i); \
+ } \
+ cloud_data += sizeof (PointT); \
+ } \
+ } \
+ break;
+ // end of PCL_CAST_POINT_FIELD definition
+
+ std::uint8_t* cloud_data = cloud_data_;
+ switch(field.datatype) {
+ PCL_CAST_POINT_FIELD(bool)
+ PCL_CAST_POINT_FIELD(std::int8_t)
+ PCL_CAST_POINT_FIELD(std::uint8_t)
+ PCL_CAST_POINT_FIELD(std::int16_t)
+ PCL_CAST_POINT_FIELD(std::uint16_t)
+ PCL_CAST_POINT_FIELD(std::int32_t)
+ PCL_CAST_POINT_FIELD(std::uint32_t)
+ PCL_CAST_POINT_FIELD(std::int64_t)
+ PCL_CAST_POINT_FIELD(std::uint64_t)
+ PCL_CAST_POINT_FIELD(float)
+ PCL_CAST_POINT_FIELD(double)
+ default: std::cout << "Unknown datatype: " << field.datatype << std::endl;
+ }
+ return;
+ }
+#undef PCL_CAST_POINT_FIELD
+ }
+ }
+
+ const std::vector<pcl::PCLPointField>& fields_;
+ const pcl::PCLPointCloud2& msg_;
+ const std::uint8_t* msg_data_;
+ std::uint8_t* cloud_data_;
+ };
} //namespace detail
template<typename PointT> void
// check if there is data to copy
if (msg.width * msg.height == 0)
{
- PCL_WARN("[pcl::fromPCLPointCloud2] No data to copy.\n");
return;
}
}
}
}
+ // if any fields in msg and cloud have different datatypes but the same name, we cast them:
+ detail::FieldCaster<PointT> caster (msg.fields, msg, msg_data, reinterpret_cast<std::uint8_t*>(cloud.data()));
+ for_each_type< typename traits::fieldList<PointT>::type > (caster);
}
/** \brief Convert a PCLPointCloud2 binary data blob into a pcl::PointCloud<T> object using a field_map.
fromPCLPointCloud2 (msg, cloud, field_map);
}
+ namespace detail {
+ /** \brief Used together with `pcl::for_each_type`, copies all point fields from `cloud_data` (respecting each field offset) to `msg_data` (tightly packed).
+ */
+ template<typename PointT>
+ struct FieldCopier {
+ FieldCopier(std::uint8_t*& msg_data, const std::uint8_t*& cloud_data) : msg_data_ (msg_data), cloud_data_ (cloud_data) {};
+
+ template<typename U> void operator() () {
+ memcpy(msg_data_, cloud_data_ + pcl::traits::offset<PointT, U>::value, sizeof(typename pcl::traits::datatype<PointT, U>::type));
+ msg_data_ += sizeof(typename pcl::traits::datatype<PointT, U>::type);
+ }
+
+ std::uint8_t*& msg_data_;
+ const std::uint8_t*& cloud_data_;
+ };
+
+ /** \brief Used together with `pcl::for_each_type`, creates list of all fields, and list of size of each field.
+ */
+ template<typename PointT>
+ struct FieldAdderAdvanced
+ {
+ FieldAdderAdvanced (std::vector<pcl::PCLPointField>& fields, std::vector<std::size_t>& field_sizes) : fields_ (fields), field_sizes_ (field_sizes) {};
+
+ template<typename U> void operator() ()
+ {
+ pcl::PCLPointField f;
+ f.name = pcl::traits::name<PointT, U>::value;
+ f.offset = pcl::traits::offset<PointT, U>::value;
+ f.datatype = pcl::traits::datatype<PointT, U>::value;
+ f.count = pcl::traits::datatype<PointT, U>::size;
+ fields_.push_back (f);
+ field_sizes_.push_back (sizeof(typename pcl::traits::datatype<PointT, U>::type)); // If field is an array, then this is the size of all array elements
+ }
+
+ std::vector<pcl::PCLPointField>& fields_;
+ std::vector<std::size_t>& field_sizes_;
+ };
+ } // namespace detail
+
/** \brief Convert a pcl::PointCloud<T> object to a PCLPointCloud2 binary data blob.
* \param[in] cloud the input pcl::PointCloud<T>
* \param[out] msg the resultant PCLPointCloud2 binary blob
+ * \param[in] padding Many point types have padding to ensure alignment and SIMD compatibility. Setting this to true will copy the padding to the `PCLPointCloud2` (the default in older PCL versions). Setting this to false will make the data blob in `PCLPointCloud2` smaller, while still keeping all information (useful e.g. when sending msg over network or storing it). The amount of padding depends on the point type, and can in some cases be up to 50 percent.
*/
template<typename PointT> void
- toPCLPointCloud2 (const pcl::PointCloud<PointT>& cloud, pcl::PCLPointCloud2& msg)
+ toPCLPointCloud2 (const pcl::PointCloud<PointT>& cloud, pcl::PCLPointCloud2& msg, bool padding)
{
// Ease the user's burden on specifying width/height for unorganized datasets
if (cloud.width == 0 && cloud.height == 0)
msg.height = cloud.height;
msg.width = cloud.width;
}
-
- // Fill point cloud binary data (padding and all)
- std::size_t data_size = sizeof (PointT) * cloud.size ();
- msg.data.resize (data_size);
- if (data_size)
- {
- memcpy(msg.data.data(), cloud.data(), data_size);
- }
-
// Fill fields metadata
msg.fields.clear ();
- for_each_type<typename traits::fieldList<PointT>::type> (detail::FieldAdder<PointT>(msg.fields));
+ std::vector<std::size_t> field_sizes;
+ for_each_type<typename pcl::traits::fieldList<PointT>::type>(pcl::detail::FieldAdderAdvanced<PointT>(msg.fields, field_sizes));
+ // Check if padding should be kept, or if the point type does not contain padding (then the single memcpy is faster)
+ if (padding || std::accumulate(field_sizes.begin(), field_sizes.end(), static_cast<std::size_t>(0)) == sizeof (PointT)) {
+ // Fill point cloud binary data (padding and all)
+ std::size_t data_size = sizeof (PointT) * cloud.size ();
+ msg.data.resize (data_size);
+ if (data_size)
+ {
+ memcpy(msg.data.data(), cloud.data(), data_size);
+ }
+
+ msg.point_step = sizeof (PointT);
+ msg.row_step = (sizeof (PointT) * msg.width);
+ } else {
+ std::size_t point_size = 0;
+ for(std::size_t i=0; i<msg.fields.size(); ++i) {
+ msg.fields[i].offset = point_size; // Adjust offset when padding is removed
+ point_size += field_sizes[i];
+ }
+ msg.data.resize (point_size * cloud.size());
+ std::uint8_t* msg_data = &msg.data[0];
+ const std::uint8_t* cloud_data=reinterpret_cast<const std::uint8_t*>(&cloud[0]);
+ const std::uint8_t* end = cloud_data + sizeof (PointT) * cloud.size ();
+ pcl::detail::FieldCopier<PointT> copier(msg_data, cloud_data); // copier takes msg_data and cloud_data as references, so the two are shared
+ for (; cloud_data<end; cloud_data+=sizeof(PointT)) {
+ for_each_type< typename traits::fieldList<PointT>::type > (copier);
+ }
+ msg.point_step = point_size;
+ msg.row_step = point_size * msg.width;
+ }
msg.header = cloud.header;
- msg.point_step = sizeof (PointT);
- msg.row_step = (sizeof (PointT) * msg.width);
msg.is_dense = cloud.is_dense;
/// @todo msg.is_bigendian = ?;
}
+ /** \brief Convert a pcl::PointCloud<T> object to a PCLPointCloud2 binary data blob.
+ * \param[in] cloud the input pcl::PointCloud<T>
+ * \param[out] msg the resultant PCLPointCloud2 binary blob
+ */
+ template<typename PointT> void
+ toPCLPointCloud2 (const pcl::PointCloud<PointT>& cloud, pcl::PCLPointCloud2& msg)
+ {
+ toPCLPointCloud2 (cloud, msg, true); // true is the default in older PCL version
+ }
+
/** \brief Copy the RGB fields of a PointCloud into pcl::PCLImage format
* \param[in] cloud the point cloud message
* \param[out] msg the resultant pcl::PCLImage
#include <stdexcept>
#include <sstream>
#include <boost/current_function.hpp>
+#include <pcl/pcl_exports.h> // for PCL_EXPORTS
/** PCL_THROW_EXCEPTION a helper macro to be used for throwing exceptions.
* This is an example on how to use:
* PCL_THROW_EXCEPTION(IOException,
* "encountered an error while opening " << filename << " PCD file");
*/
+// NOLINTBEGIN(bugprone-macro-parentheses)
#define PCL_THROW_EXCEPTION(ExceptionName, message) \
{ \
std::ostringstream s; \
s << message; \
throw ExceptionName(s.str(), __FILE__, BOOST_CURRENT_FUNCTION, __LINE__); \
}
+// NOLINTEND(bugprone-macro-parentheses)
namespace pcl
{
* \brief A base class for all pcl exceptions which inherits from std::runtime_error
* \author Eitan Marder-Eppstein, Suat Gedikli, Nizar Sallem
*/
- class PCLException : public std::runtime_error
+ class PCL_EXPORTS PCLException : public std::runtime_error
{
public:
/** \class InvalidConversionException
* \brief An exception that is thrown when a PCLPointCloud2 message cannot be converted into a PCL type
*/
- class InvalidConversionException : public PCLException
+ class PCL_EXPORTS InvalidConversionException : public PCLException
{
public:
/** \class IsNotDenseException
* \brief An exception that is thrown when a PointCloud is not dense but is attempted to be used as dense
*/
- class IsNotDenseException : public PCLException
+ class PCL_EXPORTS IsNotDenseException : public PCLException
{
public:
* \brief An exception that is thrown when a sample consensus model doesn't
* have the correct number of samples defined in model_types.h
*/
- class InvalidSACModelTypeException : public PCLException
+ class PCL_EXPORTS InvalidSACModelTypeException : public PCLException
{
public:
/** \class IOException
* \brief An exception that is thrown during an IO error (typical read/write errors)
*/
- class IOException : public PCLException
+ class PCL_EXPORTS IOException : public PCLException
{
public:
* \brief An exception thrown when init can not be performed should be used in all the
* PCLBase class inheritants.
*/
- class InitFailedException : public PCLException
+ class PCL_EXPORTS InitFailedException : public PCLException
{
public:
InitFailedException (const std::string& error_description = "",
* \brief An exception that is thrown when an organized point cloud is needed
* but not provided.
*/
- class UnorganizedPointCloudException : public PCLException
+ class PCL_EXPORTS UnorganizedPointCloudException : public PCLException
{
public:
/** \class KernelWidthTooSmallException
* \brief An exception that is thrown when the kernel size is too small
*/
- class KernelWidthTooSmallException : public PCLException
+ class PCL_EXPORTS KernelWidthTooSmallException : public PCLException
{
public:
: pcl::PCLException (error_description, file_name, function_name, line_number) { }
} ;
- class UnhandledPointTypeException : public PCLException
+ class PCL_EXPORTS UnhandledPointTypeException : public PCLException
{
public:
UnhandledPointTypeException (const std::string& error_description,
: pcl::PCLException (error_description, file_name, function_name, line_number) { }
};
- class ComputeFailedException : public PCLException
+ class PCL_EXPORTS ComputeFailedException : public PCLException
{
public:
ComputeFailedException (const std::string& error_description,
/** \class BadArgumentException
* \brief An exception that is thrown when the arguments number or type is wrong/unhandled.
*/
- class BadArgumentException : public PCLException
+ class PCL_EXPORTS BadArgumentException : public PCLException
{
public:
BadArgumentException (const std::string& error_description,
//
// ((x)(y)(z))((1)(2)(3))((dracula)(radu))
//
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(__clang__)
#define PCL_INSTANTIATE_PRODUCT_IMPL(r, product) \
BOOST_PP_CAT(PCL_INSTANTIATE_, BOOST_PP_SEQ_HEAD(product)) \
BOOST_PP_EXPAND(BOOST_PP_SEQ_TO_TUPLE(BOOST_PP_SEQ_TAIL(product)))
#pragma once
#include <pcl/memory.h> // for PCL_MAKE_ALIGNED_OPERATOR_NEW
+#include <pcl/pcl_config.h> // for PCL_XYZ_POINT_TYPES, PCL_NORMAL_POINT_TYPES
#include <pcl/pcl_macros.h> // for PCL_EXPORTS
#include <pcl/PCLPointField.h> // for PCLPointField
#include <pcl/point_types.h> // implementee
(pcl::PointXYZRGBNormal) \
(pcl::PointSurfel) \
-// Define all point types that include XYZ data
-#define PCL_XYZ_POINT_TYPES \
- (pcl::PointXYZ) \
- (pcl::PointXYZI) \
- (pcl::PointXYZL) \
- (pcl::PointXYZRGBA) \
- (pcl::PointXYZRGB) \
- (pcl::PointXYZRGBL) \
- (pcl::PointXYZLAB) \
- (pcl::PointXYZHSV) \
- (pcl::InterestPoint) \
- (pcl::PointNormal) \
- (pcl::PointXYZRGBNormal) \
- (pcl::PointXYZINormal) \
- (pcl::PointXYZLNormal) \
- (pcl::PointWithRange) \
- (pcl::PointWithViewpoint) \
- (pcl::PointWithScale) \
- (pcl::PointSurfel) \
- (pcl::PointDEM)
-
// Define all point types with XYZ and label
#define PCL_XYZL_POINT_TYPES \
(pcl::PointXYZL) \
(pcl::PointXYZRGBL) \
(pcl::PointXYZLNormal)
-// Define all point types that include normal[3] data
-#define PCL_NORMAL_POINT_TYPES \
- (pcl::Normal) \
- (pcl::PointNormal) \
- (pcl::PointXYZRGBNormal) \
- (pcl::PointXYZINormal) \
- (pcl::PointXYZLNormal) \
- (pcl::PointSurfel)
-
// Define all point types that represent features
#define PCL_FEATURE_POINT_TYPES \
(pcl::PFHSignature125) \
inline constexpr PointXYZLAB() : PointXYZLAB{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f} {}
inline constexpr PointXYZLAB (float _x, float _y, float _z,
- float _L, float _a, float _b) :
- _PointXYZLAB{ {{_x, _y, _z, 1.0f}}, {{_L, _a, _b}} } {}
+ float _l, float _a, float _b) :
+ _PointXYZLAB{ {{_x, _y, _z, 1.0f}}, {{_l, _a, _b}} } {}
friend std::ostream& operator << (std::ostream& os, const PointXYZLAB& p);
PCL_MAKE_ALIGNED_OPERATOR_NEW
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2019-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-PCL_DEPRECATED_HEADER(1, 15, "Use <pcl/memory.h> instead.")
-
-#include <pcl/memory.h>
-
*/
#include <pcl/type_traits.h> // for has_custom_allocator
+#include <pcl/pcl_config.h> // for PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC
#include <Eigen/Core> // for EIGEN_MAKE_ALIGNED_OPERATOR_NEW
#include <type_traits> // for std::enable_if_t, std::false_type, std::true_type
#include <utility> // for std::forward
+#if !defined(PCL_SILENCE_MALLOC_WARNING) && !defined(__NVCC__)
+#if PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC
+// EIGEN_DEFAULT_ALIGN_BYTES and EIGEN_MALLOC_ALREADY_ALIGNED will be set after including Eigen/Core
+// this condition is the same as in the function aligned_malloc in Memory.h in the Eigen code
+#if (defined(EIGEN_DEFAULT_ALIGN_BYTES) && EIGEN_DEFAULT_ALIGN_BYTES==0) || (defined(EIGEN_MALLOC_ALREADY_ALIGNED) && EIGEN_MALLOC_ALREADY_ALIGNED)
+#if defined(_MSC_VER)
+#error "Potential runtime error due to aligned malloc mismatch! You likely have to compile your code with AVX enabled or define EIGEN_MAX_ALIGN_BYTES=32 (to silence this message at your own risk, define PCL_SILENCE_MALLOC_WARNING=1)"
+#else // defined(_MSC_VER)
+#warning "Potential runtime error due to aligned malloc mismatch! You likely have to compile your code with AVX enabled or define EIGEN_MAX_ALIGN_BYTES=32 (to silence this message at your own risk, define PCL_SILENCE_MALLOC_WARNING=1)"
+#endif // defined(_MSC_VER)
+#endif
+#else // PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC
+#if (defined(EIGEN_DEFAULT_ALIGN_BYTES) && EIGEN_DEFAULT_ALIGN_BYTES!=0) && (defined(EIGEN_MALLOC_ALREADY_ALIGNED) && !EIGEN_MALLOC_ALREADY_ALIGNED)
+#if defined(_MSC_VER)
+#error "Potential runtime error due to aligned malloc mismatch! PCL was likely compiled without AVX support but you enabled AVX for your code (to silence this message at your own risk, define PCL_SILENCE_MALLOC_WARNING=1)"
+#else // defined(_MSC_VER)
+#warning "Potential runtime error due to aligned malloc mismatch! PCL was likely compiled without AVX support but you enabled AVX for your code (to silence this message at your own risk, define PCL_SILENCE_MALLOC_WARNING=1)"
+#endif // defined(_MSC_VER)
+#endif
+#endif // PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC
+#endif // !defined(PCL_SILENCE_MALLOC_WARNING)
+
/**
* \brief Macro to signal a class requires a custom allocator
*
#pragma once
+#include <pcl/pcl_config.h> // for PCL_SYMBOL_VISIBILITY_HIDDEN
+
// This header is created to include to NVCC compiled sources.
// Header 'pcl_macros' is not suitable since it includes <Eigen/Core>,
// which can't be eaten by nvcc (it's too weak)
#define PCL_EXPORTS
#endif
#else
- #define PCL_EXPORTS
+ #ifdef PCL_SYMBOL_VISIBILITY_HIDDEN
+ #define PCL_EXPORTS __attribute__ ((visibility ("default")))
+ #else
+ #define PCL_EXPORTS
+ #endif
#endif
// MSVC < 2019 have issues:
// * can't short-circuiting logic in macros
// * don't define standard macros
-// => this leads to annyoing C4067 warnings (see https://developercommunity.visualstudio.com/content/problem/327796/-has-cpp-attribute-emits-warning-is-wrong-highligh.html)
+// => this leads to annoying C4067 warnings (see https://developercommunity.visualstudio.com/content/problem/327796/-has-cpp-attribute-emits-warning-is-wrong-highligh.html)
#if defined(_MSC_VER)
// nvcc on msvc can't work with [[deprecated]]
#if !defined(__CUDACC__)
#define _PCL_DEPRECATED_HEADER_IMPL(Message)
#endif
+// NOLINTBEGIN(bugprone-macro-parentheses)
/**
* \brief A handy way to inform the user of the removal deadline
*/
#define _PCL_PREPARE_REMOVAL_MESSAGE(Major, Minor, Msg) \
Msg " (It will be removed in PCL " BOOST_PP_STRINGIZE(Major.Minor) ")"
+// NOLINTEND(bugprone-macro-parentheses)
/**
* \brief Tests for Minor < PCL_MINOR_VERSION
#define NOMINMAX
#endif
- #define __PRETTY_FUNCTION__ __FUNCTION__
- #define __func__ __FUNCTION__
+ #define __PRETTY_FUNCTION__ __FUNCSIG__
#endif
#endif // defined _WIN32
#endif
#define FIXED(s) \
- std::fixed << s << std::resetiosflags(std::ios_base::fixed)
+ std::fixed << (s) << std::resetiosflags(std::ios_base::fixed)
#ifndef ERASE_STRUCT
-#define ERASE_STRUCT(var) memset(&var, 0, sizeof(var))
+#define ERASE_STRUCT(var) memset(&(var), 0, sizeof(var))
#endif
#ifndef ERASE_ARRAY
-#define ERASE_ARRAY(var, size) memset(var, 0, size*sizeof(*var))
+#define ERASE_ARRAY(var, size) memset(var, 0, (size)*sizeof(*(var)))
#endif
#ifndef SET_ARRAY
-#define SET_ARRAY(var, value, size) { for (decltype(size) i = 0; i < size; ++i) var[i]=value; }
+#define SET_ARRAY(var, value, size) { for (decltype(size) i = 0; i < (size); ++i) (var)[i]=value; }
#endif
#ifndef PCL_EXTERN_C
#define PCL_EXPORTS
#endif
#else
- #define PCL_EXPORTS
+ #ifdef PCL_SYMBOL_VISIBILITY_HIDDEN
+ #define PCL_EXPORTS __attribute__ ((visibility ("default")))
+ #else
+ #define PCL_EXPORTS
+ #endif
#endif
#if defined WIN32 || defined _WIN32
* \author Patrick Mihelich, Radu B. Rusu
*/
template <typename PointT>
- class PCL_EXPORTS PointCloud
+ class PointCloud
{
public:
/** \brief Default constructor. Sets \ref is_dense to true, \ref width
return (s);
}
}
-
-#define PCL_INSTANTIATE_PointCloud(T) template class PCL_EXPORTS pcl::PointCloud<T>;
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#pragma once
-
-PCL_DEPRECATED_HEADER(1, 15, "Use <pcl/type_traits.h> instead.")
-
-#include <pcl/type_traits.h>
image_offset_y_ = (std::max) (0, center_pixel_y-pixel_radius_y);
points.clear ();
- points.resize (width*height, unobserved_point);
+ points.resize (static_cast<std::size_t>(width)*static_cast<std::size_t>(height), unobserved_point);
int top=height, right=-1, bottom=-1, left=width;
doZBuffer (point_cloud, noise_level, min_range, top, right, bottom, left);
bool
RangeImage::isObserved (int x, int y) const
{
- return !(!isInImage (x,y) || (std::isinf (getPoint (x,y).range) && getPoint (x,y).range < 0.0f));
+ return (isInImage (x,y) && (!std::isinf (getPoint (x,y).range) || getPoint (x,y).range >= 0.0f));
}
/////////////////////////////////////////////////////////////////////////
/** Constructor */
PCL_EXPORTS RangeImage ();
/** Destructor */
- PCL_EXPORTS virtual ~RangeImage () = default;
+ PCL_EXPORTS virtual ~RangeImage ();
// =====STATIC VARIABLES=====
/** The maximum number of openmp threads that can be used in this class */
// =====STATIC PROTECTED=====
- static const int lookup_table_size;
- static std::vector<float> asin_lookup_table;
- static std::vector<float> atan_lookup_table;
- static std::vector<float> cos_lookup_table;
+ PCL_EXPORTS static const int lookup_table_size;
+ PCL_EXPORTS static std::vector<float> asin_lookup_table;
+ PCL_EXPORTS static std::vector<float> atan_lookup_table;
+ PCL_EXPORTS static std::vector<float> cos_lookup_table;
/** Create lookup tables for trigonometric functions */
static void
createLookupTables ();
* \param sensor_pose the pose of the virtual depth camera
* \param coordinate_frame the used coordinate frame of the point cloud
* \param noise_level what is the typical noise of the sensor - is used for averaging in the z-buffer
- * \param min_range minimum range to consifder points
+ * \param min_range minimum range to consider points
*/
template <typename PointCloudType> void
createFromPointCloudWithFixedSize (const PointCloudType& point_cloud,
cloud1.is_dense = cloud1.is_dense && cloud2.is_dense;
cloud1.height = 1;
cloud1.width = size1 + size2;
+ cloud1.row_step = cloud1.width * cloud1.point_step; // changed width
if (simple_layout)
{
}
const auto data1_size = cloud1.data.size ();
cloud1.data.resize(data1_size + cloud2.data.size ());
- for (uindex_t cp = 0; cp < size2; ++cp)
+ for (std::size_t cp = 0; cp < size2; ++cp)
{
for (const auto& field_data: valid_fields)
{
const auto& i = field_data.idx1;
const auto& j = field_data.idx2;
- const auto& size = field_data.size;
+ const std::size_t size = field_data.size;
// Leave the data for the skip fields untouched in cloud1
// Copy only the required data from cloud2 to the correct location for cloud1
memcpy (reinterpret_cast<char*> (&cloud1.data[data1_size + cp * cloud1.point_step + cloud1.fields[i].offset]),
{
if (output.height < input.height || output.width < input.width)
{
- output.width = input.width;
- output.height = input.height;
- output.resize (input.height * input.width);
+ output.resize (static_cast<uindex_t>(input.width), static_cast<uindex_t>(input.height)); // Casting is necessary to resolve ambiguous call to resize
}
unaliased_input = &input;
}
{
if (output.height < input.height || output.width < input.width)
{
- output.width = input.width;
- output.height = input.height;
- output.resize (input.height * input.width);
+ output.resize (static_cast<uindex_t>(input.width), static_cast<uindex_t>(input.height)); // Casting is necessary to resolve ambiguous call to resize
}
unaliased_input = &input;
}
//by offset so that we can compute sizes correctly. There is no
//guarantee that the fields are in the correct order when they come in
std::vector<const pcl::PCLPointField*> cloud1_fields_sorted;
+ cloud1_fields_sorted.reserve(cloud1.fields.size());
+
for (const auto &field : cloud1.fields)
cloud1_fields_sorted.push_back (&field);
//the total size of extra data should be the size of data per point
//multiplied by the total number of points in the cloud
- std::uint32_t cloud1_unique_data_size = cloud1_unique_point_step * cloud1.width * cloud1.height;
+ const std::size_t cloud1_unique_data_size = static_cast<std::size_t>(cloud1_unique_point_step) * cloud1.width * cloud1.height;
// Point step must increase with the length of each matching field
cloud_out.point_step = cloud2.point_step + cloud1_unique_point_step;
}
// Iterate over each point and perform the appropriate memcpys
- int point_offset = 0;
- for (uindex_t cp = 0; cp < cloud_out.width * cloud_out.height; ++cp)
+ std::size_t point_offset = 0;
+ const std::size_t npts = static_cast<std::size_t>(cloud_out.width) * static_cast<std::size_t>(cloud_out.height);
+ for (std::size_t cp = 0; cp < npts; ++cp)
{
memcpy (&cloud_out.data[point_offset], &cloud2.data[cp * cloud2.point_step], cloud2.point_step);
int field_offset = cloud2.point_step;
return (false);
}
- std::size_t npts = in.width * in.height;
+ std::size_t npts = static_cast<std::size_t>(in.width) * static_cast<std::size_t>(in.height);
out = Eigen::MatrixXf::Ones (4, npts);
Eigen::Array4i xyz_offset (in.fields[x_idx].offset, in.fields[y_idx].offset, in.fields[z_idx].offset, 0);
cloud_out.row_step = cloud_in.point_step * static_cast<std::uint32_t> (indices.size ());
cloud_out.is_dense = cloud_in.is_dense;
- cloud_out.data.resize (cloud_out.width * cloud_out.height * cloud_out.point_step);
+ cloud_out.data.resize (static_cast<std::size_t>(cloud_out.width) * static_cast<std::size_t>(cloud_out.height) * static_cast<std::size_t>(cloud_out.point_step));
// Iterate over each point
for (std::size_t i = 0; i < indices.size (); ++i)
- memcpy (&cloud_out.data[i * cloud_out.point_step], &cloud_in.data[indices[i] * cloud_in.point_step], cloud_in.point_step);
+ memcpy (&cloud_out.data[i * cloud_out.point_step], &cloud_in.data[static_cast<std::size_t>(indices[i]) * cloud_in.point_step], cloud_in.point_step);
}
//////////////////////////////////////////////////////////////////////////
cloud_out.row_step = cloud_in.point_step * static_cast<std::uint32_t> (indices.size ());
cloud_out.is_dense = cloud_in.is_dense;
- cloud_out.data.resize (cloud_out.width * cloud_out.height * cloud_out.point_step);
+ cloud_out.data.resize (static_cast<std::size_t>(cloud_out.width) * cloud_out.height * cloud_out.point_step);
// Iterate over each point
for (std::size_t i = 0; i < indices.size (); ++i)
- memcpy (&cloud_out.data[i * cloud_out.point_step], &cloud_in.data[indices[i] * cloud_in.point_step], cloud_in.point_step);
+ memcpy (&cloud_out.data[i * cloud_out.point_step], &cloud_in.data[static_cast<std::size_t>(indices[i]) * cloud_in.point_step], cloud_in.point_step);
}
////////////////////////////////////////////////////////////////////////////////
}
// If we have a set of fake indices, but they do not match the number of points in the cloud, update them
- if (fake_indices_ && indices_->size () != (input_->width * input_->height))
+ if (fake_indices_ && indices_->size () != (static_cast<std::size_t>(input_->width) * static_cast<std::size_t>(input_->height)))
{
const auto indices_size = indices_->size ();
try
{
- indices_->resize (input_->width * input_->height);
+ indices_->resize (static_cast<std::size_t>(input_->width) * static_cast<std::size_t>(input_->height));
}
catch (const std::bad_alloc&)
{
# define COMMON_LVB_REVERSE_VIDEO 0
#endif
-WORD
+WORD
convertAttributesColor (int attribute, int fg, int bg=0)
{
static WORD wAttributes[7] = { 0, // TT_RESET
#else
char command[40];
// Command is the control command to the terminal
- sprintf (command, "%c[%d;%d;%dm", 0x1B, attribute, fg + 30, bg + 40);
+ snprintf (command, sizeof(command), "%c[%d;%d;%dm", 0x1B, attribute, fg + 30, bg + 40);
fprintf (stream, "%s", command);
#endif
}
#else
char command[17];
// Command is the control command to the terminal
- sprintf (command, "%c[%d;%dm", 0x1B, attribute, fg + 30);
+ snprintf (command, sizeof(command), "%c[%d;%dm", 0x1B, attribute, fg + 30);
fprintf (stream, "%s", command);
#endif
}
#else
char command[13];
// Command is the control command to the terminal
- sprintf (command, "%c[0;m", 0x1B);
+ snprintf (command, sizeof(command), "%c[0;m", 0x1B);
fprintf (stream, "%s", command);
#endif
}
va_start (ap, format);
vfprintf (stream, format, ap);
va_end (ap);
-
+
reset_text_color (stream);
}
void
pcl::console::print_info (const char *format, ...)
{
- if (!isVerbosityLevelEnabled (L_INFO)) return;
+ if (!isVerbosityLevelEnabled (L_INFO)) return;
reset_text_color (stdout);
va_start (ap, format);
vfprintf (stderr, format, ap);
va_end (ap);
-
+
reset_text_color (stderr);
}
va_start (ap, format);
vfprintf (stream, format, ap);
va_end (ap);
-
+
reset_text_color (stream);
}
va_start (ap, format);
vfprintf (stderr, format, ap);
va_end (ap);
-
+
reset_text_color (stderr);
}
va_start (ap, format);
vfprintf (stream, format, ap);
va_end (ap);
-
+
reset_text_color (stream);
}
va_start (ap, format);
vfprintf (stdout, format, ap);
va_end (ap);
-
+
reset_text_color (stdout);
}
va_start (ap, format);
vfprintf (stream, format, ap);
va_end (ap);
-
+
reset_text_color (stream);
}
va_start (ap, format);
vfprintf (stdout, format, ap);
va_end (ap);
-
+
reset_text_color (stdout);
}
va_start (ap, format);
vfprintf (stream, format, ap);
va_end (ap);
-
+
reset_text_color (stream);
}
{
static bool s_NeedVerbosityInit = true;
#ifdef VERBOSITY_LEVEL_ALWAYS
- static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_ALWAYS;
+ static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_ALWAYS;
#elif defined VERBOSITY_LEVEL_ERROR
static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_ERROR;
#elif defined VERBOSITY_LEVEL_WARN
#elif defined VERBOSITY_LEVEL_DEBUG
static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_DEBUG;
#elif defined VERBOSITY_LEVEL_VERBOSE
- static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_VERBOSE;
-#else
- static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_INFO;
+ static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_VERBOSE;
+#else
+ static VERBOSITY_LEVEL s_VerbosityLevel = pcl::console::L_INFO;
#endif
}
}
pcl::console::isVerbosityLevelEnabled (pcl::console::VERBOSITY_LEVEL level)
{
if (s_NeedVerbosityInit) pcl::console::initVerbosityLevel ();
- return level <= s_VerbosityLevel;
+ return level <= s_VerbosityLevel;
}
////////////////////////////////////////////////////////////////////////////////
-bool
+bool
pcl::console::initVerbosityLevel ()
{
#ifdef VERBOSITY_LEVEL_ALWAYS
- s_VerbosityLevel = pcl::console::L_ALWAYS;
+ s_VerbosityLevel = pcl::console::L_ALWAYS;
#elif defined VERBOSITY_LEVEL_ERROR
s_VerbosityLevel = pcl::console::L_ERROR;
#elif defined VERBOSITY_LEVEL_WARN
#elif defined VERBOSITY_LEVEL_DEBUG
s_VerbosityLevel = pcl::console::L_DEBUG;
#elif defined VERBOSITY_LEVEL_VERBOSE
- s_VerbosityLevel = pcl::console::L_VERBOSE;
-#else
+ s_VerbosityLevel = pcl::console::L_VERBOSE;
+#else
s_VerbosityLevel = pcl::console::L_INFO; // Default value
#endif
}
////////////////////////////////////////////////////////////////////////////////
-void
+void
pcl::console::print (pcl::console::VERBOSITY_LEVEL level, FILE *stream, const char *format, ...)
{
if (!isVerbosityLevelEnabled (level)) return;
va_start (ap, format);
vfprintf (stream, format, ap);
va_end (ap);
-
+
reset_text_color (stream);
}
////////////////////////////////////////////////////////////////////////////////
-void
+void
pcl::console::print (pcl::console::VERBOSITY_LEVEL level, const char *format, ...)
{
if (!isVerbosityLevelEnabled (level)) return;
default:
break;
}
-
+
va_list ap;
va_start (ap, format);
vfprintf (stream, format, ap);
va_end (ap);
-
+
reset_text_color (stream);
}
unobserved_point.range = -std::numeric_limits<float>::infinity ();
}
+RangeImage::~RangeImage () = default;
+
/////////////////////////////////////////////////////////////////////////
void
RangeImage::reset ()
width = right-left+1; height = bottom-top+1;
image_offset_x_ = left+oldRangeImage.image_offset_x_;
image_offset_y_ = top+oldRangeImage.image_offset_y_;
- points.resize (width*height);
+ points.resize (static_cast<std::size_t>(width)*static_cast<std::size_t>(height));
//std::cout << oldRangeImage.width<<"x"<<oldRangeImage.height<<" -> "<<width<<"x"<<height<<"\n";
void
RangeImage::getIntegralImage (float*& integral_image, int*& valid_points_num_image) const
{
- integral_image = new float[width*height];
+ integral_image = new float[static_cast<std::size_t>(width)*static_cast<std::size_t>(height)];
float* integral_image_ptr = integral_image;
- valid_points_num_image = new int[width*height];
+ valid_points_num_image = new int[static_cast<std::size_t>(width)*static_cast<std::size_t>(height)];
int* valid_points_num_image_ptr = valid_points_num_image;
for (int y = 0; y < static_cast<int> (height); ++y)
{
half_image.height = height/2;
half_image.is_dense = is_dense;
half_image.clear ();
- half_image.resize (half_image.width*half_image.height);
+ half_image.resize (half_image.width, half_image.height);
int src_start_x = 2*half_image.image_offset_x_ - image_offset_x_,
src_start_y = 2*half_image.image_offset_y_ - image_offset_y_;
sub_image.height = sub_image_height;
sub_image.is_dense = is_dense;
sub_image.clear ();
- sub_image.resize (sub_image.width*sub_image.height);
+ sub_image.resize (sub_image.width, sub_image.height);
int src_start_x = combine_pixels*sub_image.image_offset_x_ - image_offset_x_,
src_start_y = combine_pixels*sub_image.image_offset_y_ - image_offset_y_;
focal_length_x_reciprocal_ = focal_length_y_reciprocal_ = 1.0f / focal_length_x_;
center_x_ = static_cast<float> (di_width) / static_cast<float> (2 * skip);
center_y_ = static_cast<float> (di_height) / static_cast<float> (2 * skip);
- points.resize (width*height);
+ points.resize (static_cast<std::size_t>(width)*static_cast<std::size_t>(height));
//std::cout << PVARN (*this);
focal_length_y_reciprocal_ = 1.0f / focal_length_y_;
center_x_ = static_cast<float> (di_center_x) / static_cast<float> (skip);
center_y_ = static_cast<float> (di_center_y) / static_cast<float> (skip);
- points.resize (width * height);
+ points.resize (static_cast<std::size_t>(width) * static_cast<std::size_t>(height));
for (int y=0; y < static_cast<int> (height); ++y)
{
focal_length_y_reciprocal_ = 1.0f / focal_length_y_;
center_x_ = static_cast<float> (di_center_x) / static_cast<float> (skip);
center_y_ = static_cast<float> (di_center_y) / static_cast<float> (skip);
- points.resize (width * height);
+ points.resize (static_cast<std::size_t>(width) * static_cast<std::size_t>(height));
for (int y = 0; y < static_cast<int> (height); ++y)
{
set(SUBSYS_DESC "Point cloud CUDA libraries")
set(SUBSYS_DEPS)
-option(BUILD_CUDA "Build the CUDA-related subsystems" ${DEFAULT})
+option(BUILD_CUDA "Build the CUDA-related subsystems" OFF)
if(NOT (BUILD_CUDA AND CUDA_FOUND))
return()
else()
set(DEFAULT TRUE)
set(REASON)
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
endif()
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/pcl_macros.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/io/pcd_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
#include <Eigen/Core>
#include <Eigen/Geometry>
-#include <boost/filesystem.hpp>
-
#include <functional>
#include <iostream>
#include <mutex>
//bool repeat = false;
//std::string path = "./pcl_logo.pcd";
- //if (path.empty() || !boost::filesystem::exists (path))
+ //if (path.empty() || !pcl_fs::exists (path))
//{
// std::cerr << "did not find file" << std::endl;
//}
set(SUBSYS_DESC "Point cloud CUDA common library")
set(SUBSYS_DEPS)
-set(build TRUE)
PCL_SUBSYS_OPTION(build ${SUBSYS_NAME} ${SUBSYS_DESC} ON)
mark_as_advanced("BUILD_${SUBSYS_NAME}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
include/pcl/cuda/common/point_type_rgb.h
)
-include_directories(./include)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
+PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME})
set(EXT_DEPS CUDA)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC "${SUBSYS_DESC}"
PCL_DEPS "${SUBSYS_DEPS}" EXT_DEPS "" HEADER_ONLY)
# ---[ Point Cloud Library - pcl/cuda/io
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
mark_as_advanced("BUILD_${SUBSYS_NAME}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
-target_link_libraries(${LIB_NAME} pcl_common)
+target_link_libraries(${LIB_NAME} pcl_common pcl_io pcl_cuda_common)
set(EXT_DEPS "")
#set(EXT_DEPS CUDA)
# ---[ Point Cloud Library - pcl/cuda/io
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
mark_as_advanced("BUILD_${SUBSYS_NAME}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} EXT_DEPS ${SUBSYS_EXT_DEPS})
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
-target_link_libraries(${LIB_NAME} pcl_common)
+target_link_libraries(${LIB_NAME} pcl_common pcl_io pcl_cuda_common)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} EXT_DEPS ${SUBSYS_EXT_DEPS})
# ---[ Point Cloud Library - pcl/cuda/io
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
mark_as_advanced("BUILD_${SUBSYS_NAME}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
target_link_libraries("${LIB_NAME}" pcl_cuda_features)
# ---[ Point Cloud Library - pcl/cuda/io
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
mark_as_advanced("BUILD_${SUBSYS_NAME}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
-target_link_libraries(${LIB_NAME} pcl_common)
+target_link_libraries(${LIB_NAME} pcl_common pcl_io pcl_cuda_common)
set(EXT_DEPS "")
#set(EXT_DEPS CUDA)
Committing changes to the git master
------------------------------------
-In order to oversee the commit messages more easier and that the changelist looks homogenous please keep the following format:
+In order to oversee the commit messages more easier and that the changelist looks homogeneous please keep the following format:
"* <fixed|bugfix|changed|new> X in @<classname>@ (#<bug number>)"
variable value, we can either browse the CMake variables to look for it, or we can use the `Search:` field to type the variable name.
.. image:: images/windows/cmake_grouped_advanced.png
- :alt: CMake groupped and advanced variables
+ :alt: CMake grouped and advanced variables
:align: center
Let's check whether CMake did actually find the needed third party dependencies or not :
We finally managed to compile the Point Cloud Library (PCL) as binaries for
Windows. You can start using them in your project by following the
:ref:`using_pcl_pcl_config` tutorial.
+
+.. note::
+ You may get errors when your program is linked if you use specific point types that are not used so often (so for example `pcl::PointXYZ` and `pcl::PointXYZI` are usually not affected). Of course, the first thing you should check is whether you correctly link to all PCL libraries (`target_link_libraries(<my_executable> ${PCL_LIBRARIES})` in CMake). The next thing you can try is adding `#define PCL_NO_PRECOMPILE` before including any PCL headers. The background is that on Windows, PCL is always compiled with `PCL_ONLY_CORE_POINT_TYPES` enabled, otherwise some PCL modules (e.g. pcl_features) would fail to build due to limitations of the Windows linker. The effect is that the templated classes are only instantiated for some commonly used point types, not for all. For further explanations, see the :ref:`adding_custom_ptype` tutorial.
1 [20.000000%].
[pcl::IterativeClosestPoint::computeTransformation] Convergence reached.
Number of iterations: 1 out of 0. Transformation difference: 0.700001
- has converged:1 score: 1.95122e-14
+ ICP has converged, score: 1.95122e-14
1 4.47035e-08 -3.25963e-09 0.7
2.98023e-08 1 -1.08499e-07 -2.98023e-08
1.30385e-08 -1.67638e-08 1 1.86265e-08
Loaded 112586 data points from room_scan1.pcd
Loaded 112624 data points from room_scan2.pcd
Filtered cloud contains 12433 data points from room_scan2.pcd
- Normal Distributions Transform has converged:1 score: 0.638694
+ Normal Distributions Transform has converged, score: 0.638694
.. note::
If there are new features or bugfixes that are not yet part of a release,
- you can try to use --head, which downloads the master of PCL.
+ you can try to use `--head`, which downloads the master of PCL.
You can see the available PCL version and options in VCPKG `here <https://github.com/microsoft/vcpkg/blob/master/ports/pcl/vcpkg.json>`_.
./vcpkg install pcl[*]
+.. note::
+ If you want to use anything from the PCL visualization module (like the `PCLVisualizer` for example), you need to explicitly request this from vcpkg by `./vcpkg install pcl[visualization]` or `./vcpkg install pcl[*]` (it is disabled by default).
+
If you want to install with a different triplet type, the easiest way is:
./vcpkg install pcl --triplet triplet_type
To use PCL in CMake project, take a look at Kunal Tyagi's minimal example `in this repository <https://github.com/kunaltyagi/pcl-cmake-minimum>`_
+.. note::
+ You may get errors when your program is linked if you use specific point types that are not used so often (so for example `pcl::PointXYZ` and `pcl::PointXYZI` are usually not affected). Of course, the first thing you should check is whether you correctly link to all PCL libraries (`target_link_libraries(<my_executable> ${PCL_LIBRARIES})` in CMake). The next thing you can try is adding `#define PCL_NO_PRECOMPILE` before including any PCL headers. The background is that on Windows, PCL is always compiled with `PCL_ONLY_CORE_POINT_TYPES` enabled, otherwise some PCL modules (e.g. pcl_features) would fail to build due to limitations of the Windows linker. The effect is that the templated classes are only instantiated for some commonly used point types, not for all. For further explanations, see the :ref:`adding_custom_ptype` tutorial.
+
Install PCL dependencies for contributions
==========================================
if (argc < 3)
{
printf ("\nUsage: pcl_example_nurbs_fitting_surface pcd<PointXYZ>-in-file 3dm-out-file\n\n");
- exit (0);
+ exit (EXIT_FAILURE);
}
pcd_file = argv[1];
file_3dm = argv[2];
/** @brief Convenience typedef */
typedef pcl::PointCloud<PointT> PointCloudT;
-/** @brief Convenience typdef for the Ensenso grabber callback */
+/** @brief Convenience typedef for the Ensenso grabber callback */
typedef std::pair<pcl::PCLImage, pcl::PCLImage> PairOfImages;
typedef pcl::shared_ptr<PairOfImages> PairOfImagesPtr;
#include <string>
#include <sstream>
#include <iostream>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <boost/filesystem.hpp>
+
#include <boost/algorithm/string.hpp> // for split, is_any_of
-namespace bf = boost::filesystem;
inline void
-getModelsInDirectory (bf::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths)
+getModelsInDirectory (pcl_fs::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths)
{
- for (const auto& dir_entry : bf::directory_iterator(dir))
+ for (const auto& dir_entry : pcl_fs::directory_iterator(dir))
{
//check if its a directory, then get models in it
- if (bf::is_directory (dir_entry))
+ if (pcl_fs::is_directory (dir_entry))
{
std::string so_far = rel_path_so_far + dir_entry.path ().filename ().string () + "/";
- bf::path curr_path = dir_entry.path ();
+ pcl_fs::path curr_path = dir_entry.path ();
getModelsInDirectory (curr_path, so_far, relative_paths);
}
else
std::string directory (argv[1]);
//Find all raw* files in input_directory
- bf::path dir_path = directory;
+ pcl_fs::path dir_path = directory;
std::vector < std::string > files;
std::string start = "";
getModelsInDirectory (dir_path, start, files);
#include <string>
#include <sstream>
#include <iostream>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <boost/filesystem.hpp>
+
#include <boost/algorithm/string.hpp> // for split, is_any_of
-namespace bf = boost::filesystem;
inline void
-getModelsInDirectory (bf::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths)
+getModelsInDirectory (pcl_fs::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths)
{
- for (const auto& dir_entry : bf::directory_iterator(dir))
+ for (const auto& dir_entry : pcl_fs::directory_iterator(dir))
{
//check if its a directory, then get models in it
- if (bf::is_directory (dir_entry))
+ if (pcl_fs::is_directory (dir_entry))
{
std::string so_far = rel_path_so_far + dir_entry.path ().filename ().string () + "/";
- bf::path curr_path = dir_entry.path ();
+ pcl_fs::path curr_path = dir_entry.path ();
getModelsInDirectory (curr_path, so_far, relative_paths);
}
else
std::string directory (argv[1]);
//Find all raw* files in input_directory
- bf::path dir_path = directory;
+ pcl_fs::path dir_path = directory;
std::vector < std::string > files;
std::string start = "";
getModelsInDirectory (dir_path, start, files);
pcl::PointCloud<pcl::PointXYZ> Final;
icp.align(Final);
- std::cout << "has converged:" << icp.hasConverged() << " score: " <<
+ std::cout << "ICP has " << (icp.hasConverged()?"converged":"not converged") << ", score: " <<
icp.getFitnessScore() << std::endl;
std::cout << icp.getFinalTransformation() << std::endl;
pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud (new pcl::PointCloud<pcl::PointXYZ>);
ndt.align (*output_cloud, init_guess);
- std::cout << "Normal Distributions Transform has converged:" << ndt.hasConverged ()
- << " score: " << ndt.getFitnessScore () << std::endl;
+ std::cout << "Normal Distributions Transform has " << (ndt.hasConverged ()?"converged":"not converged")
+ << ", score: " << ndt.getFitnessScore () << std::endl;
// Transforming unfiltered, input cloud using found transform.
pcl::transformPointCloud (*input_cloud, *output_cloud, ndt.getFinalTransformation ());
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/io/pcd_io.h>
-#include <boost/filesystem.hpp>
+
#include <flann/flann.h>
#include <flann/io/hdf5.h>
#include <fstream>
* \param vfh the resultant VFH model
*/
bool
-loadHist (const boost::filesystem::path &path, vfh_model &vfh)
+loadHist (const pcl_fs::path &path, vfh_model &vfh)
{
int vfh_idx;
// Load the file as a PCD
* \param models the resultant vector of histogram models
*/
void
-loadFeatureModels (const boost::filesystem::path &base_dir, const std::string &extension,
+loadFeatureModels (const pcl_fs::path &base_dir, const std::string &extension,
std::vector<vfh_model> &models)
{
- if (!boost::filesystem::exists (base_dir) && !boost::filesystem::is_directory (base_dir))
+ if (!pcl_fs::exists (base_dir) && !pcl_fs::is_directory (base_dir))
return;
- for (boost::filesystem::directory_iterator it (base_dir); it != boost::filesystem::directory_iterator (); ++it)
+ for (pcl_fs::directory_iterator it (base_dir); it != pcl_fs::directory_iterator (); ++it)
{
- if (boost::filesystem::is_directory (it->status ()))
+ if (pcl_fs::is_directory (it->status ()))
{
std::stringstream ss;
ss << it->path ();
pcl::console::print_highlight ("Loading %s (%lu models loaded so far).\n", ss.str ().c_str (), (unsigned long)models.size ());
loadFeatureModels (it->path (), extension, models);
}
- if (boost::filesystem::is_regular_file (it->status ()) && it->path ().extension ().string () == extension)
+ if (pcl_fs::is_regular_file (it->status ()) && it->path ().extension ().string () == extension)
{
vfh_model m;
if (loadHist (base_dir / it->path ().filename (), m))
#include <pcl/common/common.h>
#include <pcl/common/centroid.h> // for compute3DCentroid
#include <pcl/visualization/pcl_visualizer.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/io/pcd_io.h>
#include <limits>
#include <flann/flann.h>
#include <flann/io/hdf5.h>
-#include <boost/filesystem.hpp>
+
#include <boost/algorithm/string/replace.hpp> // for replace_last
typedef std::pair<std::string, std::vector<float> > vfh_model;
* \param vfh the resultant VFH model
*/
bool
-loadHist (const boost::filesystem::path &path, vfh_model &vfh)
+loadHist (const pcl_fs::path &path, vfh_model &vfh)
{
int vfh_idx;
// Load the file as a PCD
flann::Matrix<float> k_distances;
flann::Matrix<float> data;
// Check if the data has already been saved to disk
- if (!boost::filesystem::exists ("training_data.h5") || !boost::filesystem::exists ("training_data.list"))
+ if (!pcl_fs::exists ("training_data.h5") || !pcl_fs::exists ("training_data.list"))
{
pcl::console::print_error ("Could not find training data models files %s and %s!\n",
training_data_h5_file_name.c_str (), training_data_list_file_name.c_str ());
}
// Check if the tree index has already been saved to disk
- if (!boost::filesystem::exists (kdtree_idx_file_name))
+ if (!pcl_fs::exists (kdtree_idx_file_name))
{
pcl::console::print_error ("Could not find kd-tree index in file %s!", kdtree_idx_file_name.c_str ());
return (-1);
.. literalinclude:: sources/vfh_recognition/build_tree.cpp
:language: cpp
- :lines: 113-118
+ :lines: 114-119
Since we're lazy, and we want to use this data (and not reload it again by crawling the directory structure in the testing phase), we dump the data to disk:
.. literalinclude:: sources/vfh_recognition/build_tree.cpp
:language: cpp
- :lines: 120-126
+ :lines: 121-127
Finally, we create the KdTree, and save its structure to disk:
.. literalinclude:: sources/vfh_recognition/build_tree.cpp
:language: cpp
- :lines: 129-133
+ :lines: 130-134
Here we will use a ``LinearIndex``, which does a brute-force search using a
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 132-143
+ :lines: 134-145
we load the first given user histogram (and ignore the rest). Then we proceed
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 162-163
+ :lines: 164-165
we load the training data from disk, together with the list of file names that
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 176-177
+ :lines: 178-179
Here we need to make sure that we use the **exact** distance metric
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 178
+ :lines: 180
Inside ``nearestKSearch``, we first convert the query point to FLANN format:
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 75-76
+ :lines: 77-78
Followed by obtaining the resultant nearest neighbor indices and distances for the query in:
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 77-80
+ :lines: 79-82
Lines:
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 177-191
+ :lines: 179-193
create a ``PCLVisualizer`` object, and sets up a set of different viewports (e.g., splits the screen into different chunks), which will be enabled in:
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 211
+ :lines: 213
Using the file names representing the models that we previously obtained in
``loadFileList``, we proceed at loading the model file names using:
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 219-226
+ :lines: 221-228
For visualization purposes, we demean the point cloud by computing its centroid and then subtracting it:
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 238-243
+ :lines: 240-245
Finally we check if the distance obtained by ``nearestKSearch`` is larger than the user given threshold, and if it is, we display a red line over the cloud that is being rendered in the viewport:
.. literalinclude:: sources/vfh_recognition/nearest_neighbors.cpp
:language: cpp
- :lines: 252-258
+ :lines: 254-260
Compiling and running the code
A theoretical primer explaining how Kd-trees work can be found in the `Kd-tree tutorial <https://pcl.readthedocs.io/projects/tutorials/en/master/kdtree_search.html>`_.
- The *kdtree* library provides the kd-tree data-structure, using `FLANN <http://www.cs.ubc.ca/~mariusm/index.php/FLANN/FLANN>`_, that allows for fast `nearest neighbor searches <https://en.wikipedia.org/wiki/Nearest_neighbor_search>`_.
+ The *kdtree* library provides the kd-tree data-structure, using `FLANN <http://www.cs.ubc.ca/research/flann/>`_, that allows for fast `nearest neighbor searches <https://en.wikipedia.org/wiki/Nearest_neighbor_search>`_.
A `Kd-tree <https://en.wikipedia.org/wiki/Kd-tree>`_ (k-dimensional tree) is a space-partitioning data structure that stores a set of k-dimensional points in a tree structure that enables efficient range searches and nearest neighbor searches. Nearest neighbor searches are a core operation when working with point cloud data and can be used to find correspondences between groups of points or feature descriptors or to define the local neighborhood around a point or points.
**Documentation:** https://pointclouds.org/documentation/group__kdtree.html
-**Tutorials:** http://pointclouds.org/documentation/tutorials/#kdtree-tutorial
+**Tutorials:** https://pcl.readthedocs.io/projects/tutorials/en/master/kdtree_search.html
**Interacts with:** Common_
The *octree* library provides efficient methods for creating a hierarchical tree data structure from point cloud data. This enables spatial partitioning, downsampling and search operations on the point data set. Each octree node has either eight children or no children. The root node describes a cubic bounding box which encapsulates all points. At every tree level, this space becomes subdivided by a factor of 2 which results in an increased voxel resolution.
- The *octree* implementation provides efficient nearest neighbor search routines, such as "Neighbors within Voxel Search”, “K Nearest Neighbor Search” and “Neighbors within Radius Search”. It automatically adjusts its dimension to the point data set. A set of leaf node classes provide additional functionality, such as spacial "occupancy" and "point density per voxel" checks. Functions for serialization and deserialization enable to efficiently encode the octree structure into a binary format. Furthermore, a memory pool implementation reduces expensive memory allocation and deallocation operations in scenarios where octrees needs to be created at high rate.
+ The *octree* implementation provides efficient nearest neighbor search routines, such as "Neighbors within Voxel Search”, “K Nearest Neighbor Search” and “Neighbors within Radius Search”. It automatically adjusts its dimension to the point data set. A set of leaf node classes provide additional functionality, such as spatial "occupancy" and "point density per voxel" checks. Functions for serialization and deserialization enable to efficiently encode the octree structure into a binary format. Furthermore, a memory pool implementation reduces expensive memory allocation and deallocation operations in scenarios where octrees needs to be created at high rate.
The following figure illustrates the voxel bounding boxes of an octree nodes at lowest tree level. The octree voxels are surrounding every 3D point from the Stanford bunny's surface. The red dots represent the point data. This image is created with the `octree_viewer`_.
set(SUBSYS_DESC "PCL examples")
set(SUBSYS_DEPS common io features search kdtree octree filters keypoints segmentation sample_consensus outofcore stereo geometry surface)
-set(DEFAULT FALSE)
-set(REASON "Code examples are disabled by default.")
-PCL_SUBSYS_OPTION(build ${SUBSYS_NAME} ${SUBSYS_DESC} ${DEFAULT} ${REASON})
+PCL_SUBSYS_OPTION(build ${SUBSYS_NAME} ${SUBSYS_DESC} OFF)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
if(NOT build)
# this variable will filled with all targets added with pcl_add_example
set(PCL_EXAMPLES_ALL_TARGETS)
-include_directories(${PCL_INCLUDE_DIRS})
# This looks for all examples/XXX/CMakeLists.txt
file (GLOB examples_sbudirs */CMakeLists.txt)
# Extract only relevant XXX and append it to PCL_EXAMPLES_SUBDIRS
endif()
add_custom_target(${SUBSYS_TARGET_NAME} DEPENDS ${PCL_EXAMPLES_ALL_TARGETS})
set_target_properties(${SUBSYS_TARGET_NAME} PROPERTIES FOLDER "Examples")
-
CloudType::Ptr cloud (new CloudType);
// Make the cloud a 10x10 grid
- cloud->height = 10;
- cloud->width = 10;
cloud->is_dense = true;
- cloud->resize(cloud->height * cloud->width);
+ cloud->resize(10, 10);
// Output the (0,0) point
std::cout << (*cloud)(0,0) << std::endl;
bool approx = false;
constexpr double decimation = 100;
- if(argc < 2){
+ if(argc < 3){
std::cerr << "Expected 2 arguments: inputfile outputfile" << std::endl;
+ return 0;
}
///The file to read from.
endif()
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS geometry)
-PCL_ADD_EXAMPLE(pcl_example_half_edge_mesh FILES example_half_edge_mesh.cpp LINK_WITH pcl_common)
+PCL_ADD_EXAMPLE(pcl_example_half_edge_mesh FILES example_half_edge_mesh.cpp LINK_WITH pcl_common pcl_geometry)
std::multimap<std::uint32_t, std::uint32_t>supervoxel_adjacency;
super.getSupervoxelAdjacency (supervoxel_adjacency);
- /// Get the cloud of supervoxel centroid with normals and the colored cloud with supervoxel coloring (this is used for visulization)
+ /// Get the cloud of supervoxel centroid with normals and the colored cloud with supervoxel coloring (this is used for visualization)
pcl::PointCloud<pcl::PointNormal>::Ptr sv_centroid_normal_cloud = pcl::SupervoxelClustering<PointT>::makeSupervoxelNormalCloud (supervoxel_clusters);
/// Set parameters for LCCP preprocessing and CPC (CPC inherits from LCCP, thus it includes LCCP's functionality)
std::multimap<std::uint32_t, std::uint32_t> supervoxel_adjacency;
super.getSupervoxelAdjacency (supervoxel_adjacency);
- /// Get the cloud of supervoxel centroid with normals and the colored cloud with supervoxel coloring (this is used for visulization)
+ /// Get the cloud of supervoxel centroid with normals and the colored cloud with supervoxel coloring (this is used for visualization)
pcl::PointCloud<pcl::PointNormal>::Ptr sv_centroid_normal_cloud = pcl::SupervoxelClustering<PointT>::makeSupervoxelNormalCloud (supervoxel_clusters);
/// The Main Step: Perform LCCPSegmentation
return (1);
}
- cloud->points.reserve (depth_dims[0] * depth_dims[1]);
+ cloud->reserve (static_cast<std::size_t>(depth_dims[0]) * static_cast<std::size_t>(depth_dims[1]));
cloud->width = depth_dims[0];
cloud->height = depth_dims[1];
cloud->is_dense = false;
set(SUBSYS_DESC "Point cloud features library")
set(SUBSYS_DEPS common search kdtree octree filters 2d)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS OpenMP)
return()
endif()
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
- "include/pcl/${SUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/board.h"
"include/pcl/${SUBSYS_NAME}/flare.h"
"include/pcl/${SUBSYS_NAME}/brisk_2d.h"
set(LIB_NAME "pcl_${SUBSYS_NAME}")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
-target_link_libraries("${LIB_NAME}" pcl_common pcl_search pcl_kdtree pcl_octree pcl_filters)
+target_link_libraries("${LIB_NAME}" pcl_common pcl_2d pcl_search pcl_kdtree pcl_octree pcl_filters)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install headers
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-#include <boost/property_map/property_map.hpp>
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-#include <Eigen/StdVector>
-#include <Eigen/Geometry>
#include <pcl/features/feature.h>
#define GRIDSIZE 64
-#define GRIDSIZE_H GRIDSIZE/2
+#define GRIDSIZE_H (GRIDSIZE/2)
#include <vector>
namespace pcl
points_ += number;
// set up the patterns
- pattern_points_ = new BriskPatternPoint[points_*scales_*n_rot_];
+ pattern_points_ = new BriskPatternPoint[static_cast<std::size_t>(points_)*static_cast<std::size_t>(scales_)*static_cast<std::size_t>(n_rot_)];
BriskPatternPoint* pattern_iterator = pattern_points_;
// define the scale discretization:
const float yf = brisk_point.y + key_y;
const int x = static_cast<int>(xf);
const int y = static_cast<int>(yf);
- const int& imagecols = image_width;
+ const std::ptrdiff_t imagecols = image_width;
// get the sigma:
const float sigma_half = brisk_point.sigma;
const int scaling2 = static_cast<int> (static_cast<float>(scaling) * area / 1024.0f);
// the integral image is larger:
- const int integralcols = imagecols + 1;
+ const std::ptrdiff_t integralcols = image_width + 1;
// calculate borders
const float x_1 = xf - sigma_half;
const auto height = static_cast<index_t>(input_cloud_->height);
// destination for intensity data; will be forwarded to BRISK
- std::vector<unsigned char> image_data (width*height);
+ std::vector<unsigned char> image_data (static_cast<std::size_t>(width)*static_cast<std::size_t>(height));
for (std::size_t i = 0; i < image_data.size (); ++i)
image_data[i] = static_cast<unsigned char> (intensity_ ((*input_cloud_)[i]));
// first, calculate the integral image over the whole image:
// current integral image
- std::vector<int> integral ((width+1)*(height+1), 0); // the integral image
+ std::vector<int> integral (static_cast<std::size_t>(width+1)*static_cast<std::size_t>(height+1), 0); // the integral image
for (index_t row_index = 1; row_index < height; ++row_index)
{
#define PCL_FEATURES_IMPL_CPPF_H_
#include <pcl/features/cppf.h>
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT>
#include <pcl/features/cvfh.h>
#include <pcl/features/normal_3d.h>
#include <pcl/common/centroid.h>
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointNT, typename PointOutT> void
static_cast<std::size_t>(normals.size()));
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
continue;
}
- // skip index 0, since nn_indices[0] == idx, worth it?
- for (std::size_t j = 1; j < nn_indices.size (); ++j)
+ for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
PointCloudIn &pc, std::vector<float> &hist)
{
const int binsize = 64;
- unsigned int sample_size = 20000;
+ const std::size_t sample_size = 20000;
// @TODO: Replace with c++ stdlib uniform_random_generator
srand (static_cast<unsigned int> (time (nullptr)));
const auto maxindex = pc.size ();
//float weights[10] = {1, 1, 1, 1, 1, 1, 1, 1 , 1 , 1};
float weights[10] = {0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f};
- hist.reserve (binsize * 10);
+ hist.reserve (static_cast<std::size_t>(binsize) * 10);
for (const float &i : h_a3_in)
hist.push_back (i * weights[0]);
for (const float &i : h_a3_out)
#include <pcl/features/flare.h>
#include <pcl/common/geometry.h>
+#include <pcl/search/kdtree.h> // for KdTree
+#include <pcl/search/organized.h> // for OrganizedNeighbor
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointNT, typename PointOutT, typename SignedDistanceT> bool
{
width_ = width;
height_ = height;
- first_order_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
- finite_values_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
+ const std::size_t ii_size = static_cast<std::size_t>(width_ + 1) * static_cast<std::size_t>(height_ + 1);
+ first_order_integral_image_.resize (ii_size);
+ finite_values_integral_image_.resize (ii_size);
if (compute_second_order_integral_images_)
- second_order_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
+ second_order_integral_image_.resize (ii_size);
}
computeIntegralImages (data, row_stride, element_stride);
}
{
width_ = width;
height_ = height;
- first_order_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
- finite_values_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
+ const std::size_t ii_size = static_cast<std::size_t>(width_ + 1) * static_cast<std::size_t>(height_ + 1);
+ first_order_integral_image_.resize (ii_size);
+ finite_values_integral_image_.resize (ii_size);
if (compute_second_order_integral_images_)
- second_order_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
+ second_order_integral_image_.resize (ii_size);
}
computeIntegralImages (data, row_stride, element_stride);
}
*
*/
#pragma once
+#include <pcl/common/eigen.h> // for eigen33
#include <pcl/features/integral_image_normal.h>
#include <algorithm>
Eigen::Vector3f center;
typename IntegralImage2D<float, 3>::SecondOrderType so_elements;
typename IntegralImage2D<float, 3>::ElementType tmp_center;
- typename IntegralImage2D<float, 3>::SecondOrderType tmp_so_elements;
center[0] = 0;
center[1] = 0;
#include <pcl/features/intensity_gradient.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
+#include <pcl/common/eigen.h> // for eigen33
//////////////////////////////////////////////////////////////////////////////////////////////
std::vector<float> nn_dists (k_);
output.is_dense = true;
+#ifdef _OPENMP
+ if (threads_ == 0) {
+ threads_ = omp_get_num_procs();
+ PCL_DEBUG ("[pcl::IntensityGradientEstimation::computeFeature] Setting number of threads to %u.\n", threads_);
+ }
+#endif // _OPENMP
+
// If the data is dense, we don't need to check for NaN
if (surface_->is_dense)
{
#include <pcl/common/io.h> // for copyPointCloud
#include <pcl/common/common.h> // for getMaxDistance
#include <pcl/common/transforms.h>
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointNT, typename PointOutT> void
static_cast<std::size_t>(normals.size()));
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
continue;
}
- for (std::size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
{
pcl::PointIndices pi;
- pcl::PointIndices pi_cvfh;
pcl::PointIndices pi_filtered;
clusters_.push_back (pi);
std::queue<std::pair<int, int> > empty;
std::swap (key_list_, empty);
- pfh_histogram_.setZero (nr_subdiv_ * nr_subdiv_ * nr_subdiv_);
+ pfh_histogram_.setZero (static_cast<Eigen::Index>(nr_subdiv_) * nr_subdiv_ * nr_subdiv_);
// Allocate enough space to hold the results
// \note This resize is irrelevant for a radiusSearch ().
pcl::PFHRGBEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
/// nr_subdiv^3 for RGB and nr_subdiv^3 for the angular features
- pfhrgb_histogram_.setZero (2 * nr_subdiv_ * nr_subdiv_ * nr_subdiv_);
+ pfhrgb_histogram_.setZero (static_cast<Eigen::Index>(2) * nr_subdiv_ * nr_subdiv_ * nr_subdiv_);
pfhrgb_tuple_.setZero (7);
// Allocate enough space to hold the results
#include <pcl/features/ppf.h>
#include <pcl/features/pfh.h>
#include <pcl/features/pfh_tools.h> // for computePairFeatures
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT>
#include <pcl/features/ppfrgb.h>
#include <pcl/features/pfhrgb.h>
+#include <pcl/search/kdtree.h> // for KdTree
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT>
#include <pcl/features/principal_curvatures.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
+#include <pcl/common/eigen.h> // for eigen33
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointNT, typename PointOutT> void
+pcl::PrincipalCurvaturesEstimation<PointInT, PointNT, PointOutT>::setNumberOfThreads (unsigned int nr_threads)
+{
+#ifdef _OPENMP
+ if (nr_threads == 0)
+ threads_ = omp_get_num_procs();
+ else
+ threads_ = nr_threads;
+ PCL_DEBUG ("[pcl::PrincipalCurvaturesEstimation::setNumberOfThreads] Setting number of threads to %u.\n", threads_);
+#else
+ threads_ = 1;
+ if (nr_threads != 1)
+ PCL_WARN ("[pcl::PrincipalCurvaturesEstimation::setNumberOfThreads] Parallelization is requested, but OpenMP is not available! Continuing without parallelization.\n");
+#endif // _OPENMP
+}
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT> void
const pcl::PointCloud<PointNT> &normals, int p_idx, const pcl::Indices &indices,
float &pcx, float &pcy, float &pcz, float &pc1, float &pc2)
{
- EIGEN_ALIGN16 Eigen::Matrix3f I = Eigen::Matrix3f::Identity ();
- Eigen::Vector3f n_idx (normals[p_idx].normal[0], normals[p_idx].normal[1], normals[p_idx].normal[2]);
- EIGEN_ALIGN16 Eigen::Matrix3f M = I - n_idx * n_idx.transpose (); // projection matrix (into tangent plane)
+ const auto n_idx = normals[p_idx].getNormalVector3fMap();
+ EIGEN_ALIGN16 const Eigen::Matrix3f I = Eigen::Matrix3f::Identity ();
+ EIGEN_ALIGN16 const Eigen::Matrix3f M = I - n_idx * n_idx.transpose (); // projection matrix (into tangent plane)
// Project normals into the tangent plane
- Eigen::Vector3f normal;
- projected_normals_.resize (indices.size ());
- xyz_centroid_.setZero ();
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > projected_normals (indices.size());
+ Eigen::Vector3f xyz_centroid = Eigen::Vector3f::Zero();
for (std::size_t idx = 0; idx < indices.size(); ++idx)
{
- normal[0] = normals[indices[idx]].normal[0];
- normal[1] = normals[indices[idx]].normal[1];
- normal[2] = normals[indices[idx]].normal[2];
-
- projected_normals_[idx] = M * normal;
- xyz_centroid_ += projected_normals_[idx];
+ const auto normal = normals[indices[idx]].getNormalVector3fMap();
+ projected_normals[idx] = M * normal;
+ xyz_centroid += projected_normals[idx];
}
// Estimate the XYZ centroid
- xyz_centroid_ /= static_cast<float> (indices.size ());
+ xyz_centroid /= static_cast<float> (indices.size ());
// Initialize to 0
- covariance_matrix_.setZero ();
+ EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix = Eigen::Matrix3f::Zero();
// For each point in the cloud
for (std::size_t idx = 0; idx < indices.size (); ++idx)
{
- demean_ = projected_normals_[idx] - xyz_centroid_;
+ const Eigen::Vector3f demean = projected_normals[idx] - xyz_centroid;
- double demean_xy = demean_[0] * demean_[1];
- double demean_xz = demean_[0] * demean_[2];
- double demean_yz = demean_[1] * demean_[2];
+ const double demean_xy = demean[0] * demean[1];
+ const double demean_xz = demean[0] * demean[2];
+ const double demean_yz = demean[1] * demean[2];
- covariance_matrix_(0, 0) += demean_[0] * demean_[0];
- covariance_matrix_(0, 1) += static_cast<float> (demean_xy);
- covariance_matrix_(0, 2) += static_cast<float> (demean_xz);
+ covariance_matrix(0, 0) += demean[0] * demean[0];
+ covariance_matrix(0, 1) += static_cast<float> (demean_xy);
+ covariance_matrix(0, 2) += static_cast<float> (demean_xz);
- covariance_matrix_(1, 0) += static_cast<float> (demean_xy);
- covariance_matrix_(1, 1) += demean_[1] * demean_[1];
- covariance_matrix_(1, 2) += static_cast<float> (demean_yz);
+ covariance_matrix(1, 0) += static_cast<float> (demean_xy);
+ covariance_matrix(1, 1) += demean[1] * demean[1];
+ covariance_matrix(1, 2) += static_cast<float> (demean_yz);
- covariance_matrix_(2, 0) += static_cast<float> (demean_xz);
- covariance_matrix_(2, 1) += static_cast<float> (demean_yz);
- covariance_matrix_(2, 2) += demean_[2] * demean_[2];
+ covariance_matrix(2, 0) += static_cast<float> (demean_xz);
+ covariance_matrix(2, 1) += static_cast<float> (demean_yz);
+ covariance_matrix(2, 2) += demean[2] * demean[2];
}
// Extract the eigenvalues and eigenvectors
- pcl::eigen33 (covariance_matrix_, eigenvalues_);
- pcl::computeCorrespondingEigenVector (covariance_matrix_, eigenvalues_ [2], eigenvector_);
-
- pcx = eigenvector_ [0];
- pcy = eigenvector_ [1];
- pcz = eigenvector_ [2];
- float indices_size = 1.0f / static_cast<float> (indices.size ());
- pc1 = eigenvalues_ [2] * indices_size;
- pc2 = eigenvalues_ [1] * indices_size;
+ Eigen::Vector3f eigenvalues;
+ Eigen::Vector3f eigenvector;
+ pcl::eigen33 (covariance_matrix, eigenvalues);
+ pcl::computeCorrespondingEigenVector (covariance_matrix, eigenvalues [2], eigenvector);
+
+ pcx = eigenvector [0];
+ pcy = eigenvector [1];
+ pcz = eigenvector [2];
+ const float indices_size = 1.0f / static_cast<float> (indices.size ());
+ pc1 = eigenvalues [2] * indices_size;
+ pc2 = eigenvalues [1] * indices_size;
}
// Save a few cycles by not checking every point for NaN/Inf values if the cloud is set to dense
if (input_->is_dense)
{
+#pragma omp parallel for \
+ default(none) \
+ shared(output) \
+ firstprivate(nn_indices, nn_dists) \
+ num_threads(threads_) \
+ schedule(dynamic, chunk_size_)
// Iterating over the entire index vector
- for (std::size_t idx = 0; idx < indices_->size (); ++idx)
+ for (std::ptrdiff_t idx = 0; idx < static_cast<std::ptrdiff_t> (indices_->size ()); ++idx)
{
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
}
else
{
+#pragma omp parallel for \
+ default(none) \
+ shared(output) \
+ firstprivate(nn_indices, nn_dists) \
+ num_threads(threads_) \
+ schedule(dynamic, chunk_size_)
// Iterating over the entire index vector
- for (std::size_t idx = 0; idx < indices_->size (); ++idx)
+ for (std::ptrdiff_t idx = 0; idx < static_cast<std::ptrdiff_t> (indices_->size ()); ++idx)
{
if (!isFinite ((*input_)[(*indices_)[idx]]) ||
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
bool& beam_valid = beams_valid[beam_idx++];
if (step==1)
{
- beam_valid = !(x2==x && y2==y);
+ beam_valid = (x2!=x || y2!=y);
}
else
if (!beam_valid)
}
if (std::abs (total_area) < std::numeric_limits <float>::epsilon ())
- total_area = 1.0f / total_area;
- else
total_area = 1.0f;
+ else
+ total_area = 1.0f / total_area;
Eigen::Matrix3f overall_scatter_matrix;
overall_scatter_matrix.setZero ();
//output_rf.confidence = getLocalRF ((*indices_)[i], rf);
//if (output_rf.confidence == std::numeric_limits<float>::max ())
- pcl::Indices n_indices;
- std::vector<float> n_sqr_distances;
- this->searchForNeighbors ((*indices_)[i], search_parameter_, n_indices, n_sqr_distances);
if (getLocalRF ((*indices_)[i], rf) == std::numeric_limits<float>::max ())
{
output.is_dense = false;
};
}
-#define PCL_INSTANTIATE_MomentOfInertiaEstimation(T) template class pcl::MomentOfInertiaEstimation<T>;
+#define PCL_INSTANTIATE_MomentOfInertiaEstimation(T) template class PCL_EXPORTS pcl::MomentOfInertiaEstimation<T>;
#ifdef PCL_NO_PRECOMPILE
#include <pcl/features/impl/moment_of_inertia_estimation.hpp>
namespace pcl
{
/** \brief PrincipalCurvaturesEstimation estimates the directions (eigenvectors) and magnitudes (eigenvalues) of
- * principal surface curvatures for a given point cloud dataset containing points and normals.
+ * principal surface curvatures for a given point cloud dataset containing points and normals. The output contains
+ * the principal curvature (eigenvector of the max eigenvalue), along with both the max (pc1) and min (pc2)
+ * eigenvalues. Parallel execution is supported through OpenMP.
*
* The recommended PointOutT is pcl::PrincipalCurvatures.
*
- * \note The code is stateful as we do not expect this class to be multicore parallelized. Please look at
- * \ref NormalEstimationOMP for an example on how to extend this to parallel implementations.
- *
- * \author Radu B. Rusu, Jared Glover
+ * \author Radu B. Rusu, Jared Glover, Alex Navarro
* \ingroup features
*/
template <typename PointInT, typename PointNT, typename PointOutT = pcl::PrincipalCurvatures>
using PointCloudOut = typename Feature<PointInT, PointOutT>::PointCloudOut;
using PointCloudIn = pcl::PointCloud<PointInT>;
- /** \brief Empty constructor. */
- PrincipalCurvaturesEstimation () :
- xyz_centroid_ (Eigen::Vector3f::Zero ()),
- demean_ (Eigen::Vector3f::Zero ()),
- covariance_matrix_ (Eigen::Matrix3f::Zero ()),
- eigenvector_ (Eigen::Vector3f::Zero ()),
- eigenvalues_ (Eigen::Vector3f::Zero ())
+ /** \brief Initialize the scheduler and set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value to automatic)
+ * \param chunk_size PCL will use dynamic scheduling with this chunk size. Setting it too
+ * low will lead to more parallelization overhead. Setting it too high
+ * will lead to a worse balancing between the threads.
+ */
+ PrincipalCurvaturesEstimation (unsigned int nr_threads = 1, int chunk_size = 256) :
+ chunk_size_(chunk_size)
{
feature_name_ = "PrincipalCurvaturesEstimation";
+
+ setNumberOfThreads(nr_threads);
};
/** \brief Perform Principal Components Analysis (PCA) on the point normals of a surface patch in the tangent
int p_idx, const pcl::Indices &indices,
float &pcx, float &pcy, float &pcz, float &pc1, float &pc2);
+ /** \brief Initialize the scheduler and set the number of threads to use. The default behavior is
+ * single threaded exectution
+ * \param nr_threads the number of hardware threads to use (0 sets the value to automatic)
+ */
+ void
+ setNumberOfThreads (unsigned int nr_threads);
+
protected:
+ /** \brief The number of threads the scheduler should use. */
+ unsigned int threads_;
+
+ /** \brief Chunk size for (dynamic) scheduling. */
+ int chunk_size_;
/** \brief Estimate the principal curvature (eigenvector of the max eigenvalue), along with both the max (pc1)
* and min (pc2) eigenvalues for all points given in <setInputCloud (), setIndices ()> using the surface in
*/
void
computeFeature (PointCloudOut &output) override;
-
- private:
- /** \brief A pointer to the input dataset that contains the point normals of the XYZ dataset. */
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > projected_normals_;
-
- /** \brief SSE aligned placeholder for the XYZ centroid of a surface patch. */
- Eigen::Vector3f xyz_centroid_;
-
- /** \brief Temporary point placeholder. */
- Eigen::Vector3f demean_;
-
- /** \brief Placeholder for the 3x3 covariance matrix at each surface patch. */
- EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix_;
-
- /** \brief SSE aligned eigenvectors placeholder for a covariance matrix. */
- Eigen::Vector3f eigenvector_;
- /** \brief eigenvalues placeholder for a covariance matrix. */
- Eigen::Vector3f eigenvalues_;
};
}
};
}
-#define PCL_INSTANTIATE_ROPSEstimation(InT, OutT) template class pcl::ROPSEstimation<InT, OutT>;
+#define PCL_INSTANTIATE_ROPSEstimation(InT, OutT) template class PCL_EXPORTS pcl::ROPSEstimation<InT, OutT>;
#ifdef PCL_NO_PRECOMPILE
#include <pcl/features/impl/rops_estimation.hpp>
{
surface_patch_pixel_size_ = other.surface_patch_pixel_size_;
delete[] surface_patch_;
- surface_patch_ = new float[surface_patch_pixel_size_*surface_patch_pixel_size_];
+ surface_patch_ = new float[static_cast<std::size_t>(surface_patch_pixel_size_)*static_cast<std::size_t>(surface_patch_pixel_size_)];
}
- std::copy(other.surface_patch_, other.surface_patch_ + surface_patch_pixel_size_*surface_patch_pixel_size_, surface_patch_);
+ std::copy(other.surface_patch_, other.surface_patch_ + static_cast<ptrdiff_t>(surface_patch_pixel_size_)*static_cast<ptrdiff_t>(surface_patch_pixel_size_), surface_patch_);
surface_patch_world_size_ = other.surface_patch_world_size_;
surface_patch_rotation_ = other.surface_patch_rotation_;
pcl::saveBinary(transformation_.matrix(), file);
file.write(reinterpret_cast<const char*>(&surface_patch_pixel_size_), sizeof(surface_patch_pixel_size_));
file.write(reinterpret_cast<const char*>(surface_patch_),
- surface_patch_pixel_size_*surface_patch_pixel_size_*sizeof(*surface_patch_));
+ sizeof(*surface_patch_)*surface_patch_pixel_size_*surface_patch_pixel_size_);
file.write(reinterpret_cast<const char*>(&surface_patch_world_size_), sizeof(surface_patch_world_size_));
file.write(reinterpret_cast<const char*>(&surface_patch_rotation_), sizeof(surface_patch_rotation_));
file.write(reinterpret_cast<const char*>(&descriptor_size_), sizeof(descriptor_size_));
pcl::loadBinary(position_.matrix(), file);
pcl::loadBinary(transformation_.matrix(), file);
file.read(reinterpret_cast<char*>(&surface_patch_pixel_size_), sizeof(surface_patch_pixel_size_));
- surface_patch_ = new float[surface_patch_pixel_size_*surface_patch_pixel_size_];
+ surface_patch_ = new float[static_cast<std::size_t>(surface_patch_pixel_size_)*static_cast<std::size_t>(surface_patch_pixel_size_)];
file.read(reinterpret_cast<char*>(surface_patch_),
- surface_patch_pixel_size_*surface_patch_pixel_size_*sizeof(*surface_patch_));
+ sizeof(*surface_patch_)*surface_patch_pixel_size_*surface_patch_pixel_size_);
file.read(reinterpret_cast<char*>(&surface_patch_world_size_), sizeof(surface_patch_world_size_));
file.read(reinterpret_cast<char*>(&surface_patch_rotation_), sizeof(surface_patch_rotation_));
file.read(reinterpret_cast<char*>(&descriptor_size_), sizeof(descriptor_size_));
float*
RangeImageBorderExtractor::updatedScoresAccordingToNeighborValues (const float* border_scores) const
{
- float* new_scores = new float[range_image_->width*range_image_->height];
+ float* new_scores = new float[static_cast<std::size_t>(range_image_->width)*static_cast<std::size_t>(range_image_->height)];
float* new_scores_ptr = new_scores;
for (int y=0; y < static_cast<int> (range_image_->height); ++y)
for (int x=0; x < static_cast<int> (range_image_->width); ++x)
RangeImageBorderExtractor::updatedScoresAccordingToNeighborValues (const std::vector<float>& border_scores) const
{
std::vector<float> new_border_scores;
- new_border_scores.reserve (range_image_->width*range_image_->height);
+ new_border_scores.reserve (static_cast<std::size_t>(range_image_->width)*static_cast<std::size_t>(range_image_->height));
for (int y=0; y < static_cast<int> (range_image_->height); ++y)
for (int x=0; x < static_cast<int> (range_image_->width); ++x)
new_border_scores.push_back (updatedScoreAccordingToNeighborValues(x, y, border_scores.data ()));
//MEASURE_FUNCTION_TIME;
- int width = range_image_->width,
+ std::size_t width = range_image_->width,
height = range_image_->height;
shadow_border_informations_ = new ShadowBorderIndices*[width*height];
for (int y = 0; y < static_cast<int> (height); ++y)
{
for (int x = 0; x < static_cast<int> (width); ++x)
{
- int index = y*width+x;
- ShadowBorderIndices*& shadow_border_indices = shadow_border_informations_[index];
+ ShadowBorderIndices*& shadow_border_indices = shadow_border_informations_[y*width+x];
shadow_border_indices = nullptr;
int shadow_border_idx;
const RangeImage& range_image = *range_image_;
- auto* blurred_directions = new Eigen::Vector3f[range_image.width*range_image.height];
- float* blurred_scores = new float[range_image.width*range_image.height];
+ auto* blurred_directions = new Eigen::Vector3f[static_cast<std::size_t>(range_image.width)*static_cast<std::size_t>(range_image.height)];
+ float* blurred_scores = new float[static_cast<std::size_t>(range_image.width)*static_cast<std::size_t>(range_image.height)];
for (int y=0; y<static_cast<int>(range_image.height); ++y)
{
for (int x=0; x<static_cast<int>(range_image.width); ++x)
set(SUBSYS_DESC "Point cloud filters library")
set(SUBSYS_DEPS common sample_consensus search kdtree octree)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS OpenMP)
)
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/conditional_removal.h"
"include/pcl/${SUBSYS_NAME}/crop_box.h"
"include/pcl/${SUBSYS_NAME}/clipper3D.h"
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_common pcl_sample_consensus pcl_search pcl_kdtree pcl_octree)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
/** \brief ApproximateVoxelGrid assembles a local 3D grid over a given PointCloud, and downsamples + filters the data.
* Thus, this is similar to the \ref VoxelGrid filter.
* This class works best if points that are stored in memory next to each other (in the input point cloud), are also somewhat close in 3D euclidean space (this is for example usually the case for organized point clouds). If the points have no storage order (e.g. in synthetic, randomly generated point clouds), this class will give very poor results, and \ref VoxelGrid should be used instead.
+ * \sa VoxelGrid
* \author James Bowman, Radu B. Rusu
* \ingroup filters
*/
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: io.h 5850 2012-06-06 14:04:59Z stfox88 $
- *
- */
-
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-
-// Marking all Boost headers as system headers to remove warnings
-#include <boost/random.hpp>
-#include <boost/random/normal_distribution.hpp>
-#include <boost/dynamic_bitset.hpp>
-#include <boost/mpl/size.hpp>
-#include <boost/fusion/sequence/intrinsic/at_key.hpp>
-#include <boost/optional.hpp>
#pragma once
#include <pcl/pcl_base.h>
+#include <pcl/search/search.h> // for pcl::search::Search
#include <boost/optional.hpp>
+#include <pcl/search/search.h>
namespace pcl
{
double search_radius_;
/** \brief number of threads */
- unsigned int threads_;
+ unsigned int threads_{1};
/** \brief convlving kernel */
KernelT kernel_;
* Based on the following publication:
* * "Geometrically Stable Sampling for the ICP Algorithm" - N. Gelfand, L. Ikemoto, S. Rusinkiewicz, M. Levoy
*
+ * \ingroup filters
* \author Alexandru E. Ichim, alex.e.ichim@gmail.com
*/
template <typename PointT, typename PointNT>
* This should be set to correspond to the dimensionality of the
* convex/concave hull produced by the pcl::ConvexHull and
* pcl::ConcaveHull classes.
- * \param[in] dim Dimensionailty of the hull used to filter points.
+ * \param[in] dim Dimensionality of the hull used to filter points.
*/
inline void
setDim (int dim)
#include <pcl/filters/bilateral.h>
#include <pcl/search/organized.h> // for OrganizedNeighbor
#include <pcl/search/kdtree.h> // for KdTree
+#include <pcl/common/point_tests.h> // for isXYZFinite
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> double
// For all the indices given (equal to the entire cloud if none given)
for (const auto& idx : (*indices_))
{
- // Perform a radius search to find the nearest neighbors
- tree_->radiusSearch (idx, sigma_s_ * 2, k_indices, k_distances);
+ if (input_->is_dense || pcl::isXYZFinite((*input_)[idx]))
+ {
+ // Perform a radius search to find the nearest neighbors
+ tree_->radiusSearch (idx, sigma_s_ * 2, k_indices, k_distances);
- // Overwrite the intensity value with the computed average
- output[idx].intensity = static_cast<float> (computePointWeight (idx, k_indices, k_distances));
+ // Overwrite the intensity value with the computed average
+ output[idx].intensity = static_cast<float> (computePointWeight (idx, k_indices, k_distances));
+ }
}
}
case CASE_LABEL: { \
pcl::traits::asType_t<CASE_LABEL> pt_val; \
memcpy(&pt_val, pt_data + this->offset_, sizeof(pt_val)); \
- return (pt_val > static_cast<pcl::traits::asType_t<CASE_LABEL>>(val)) - \
- (pt_val < static_cast<pcl::traits::asType_t<CASE_LABEL>>(val)); \
+ return (pt_val > static_cast<pcl::traits::asType_t<(CASE_LABEL)>>(val)) - \
+ (pt_val < static_cast<pcl::traits::asType_t<(CASE_LABEL)>>(val)); \
}
switch (datatype_)
#ifndef PCL_FILTERS_CONVOLUTION_3D_IMPL_HPP
#define PCL_FILTERS_CONVOLUTION_3D_IMPL_HPP
+#include <pcl/common/point_tests.h> // for isFinite
#include <pcl/search/organized.h>
#include <pcl/search/kdtree.h>
#include <pcl/pcl_config.h>
#include <pcl/point_types.h>
+#include <pcl/common/point_tests.h>
#include <cmath>
#include <cstdint>
default(none) \
shared(output) \
firstprivate(nn_indices, nn_distances) \
- num_threads(threads_)
+ num_threads(threads_) \
+ schedule(dynamic, 64)
for (std::int64_t point_idx = 0; point_idx < static_cast<std::int64_t> (surface_->size ()); ++point_idx)
{
const PointInT& point_in = surface_->points [point_idx];
for (std::size_t i = 0; i < 6; ++i)
{
for (std::size_t p_i = 0; p_i < candidate_indices.size (); ++p_i)
- L[i].push_back (std::make_pair (p_i, std::abs (v[p_i].dot (x.block<6, 1> (0, i)))));
+ L[i].emplace_back(p_i, std::abs (v[p_i].dot (x.block<6, 1> (0, i))));
// Sort in decreasing order
L[i].sort (sort_dot_list_function);
}
else
{
- output.is_dense = true;
applyFilter (indices);
pcl::copyPointCloud (*input_, indices, output);
}
template<typename PointT, typename NormalT> unsigned int
pcl::NormalSpaceSampling<PointT, NormalT>::findBin (const float *normal)
{
- const auto ix = static_cast<unsigned> (std::round (0.5f * (binsx_ - 1.f) * (normal[0] + 1.f)));
- const auto iy = static_cast<unsigned> (std::round (0.5f * (binsy_ - 1.f) * (normal[1] + 1.f)));
- const auto iz = static_cast<unsigned> (std::round (0.5f * (binsz_ - 1.f) * (normal[2] + 1.f)));
+ // in the case where normal[0] == 1.0f, ix will be binsx_, which is out of range.
+ // thus, use std::min to avoid this situation.
+ const auto ix = std::min (binsx_ - 1, static_cast<unsigned> (std::max (0.0f, std::floor (0.5f * (binsx_) * (normal[0] + 1.f)))));
+ const auto iy = std::min (binsy_ - 1, static_cast<unsigned> (std::max (0.0f, std::floor (0.5f * (binsy_) * (normal[1] + 1.f)))));
+ const auto iz = std::min (binsz_ - 1, static_cast<unsigned> (std::max (0.0f, std::floor (0.5f * (binsz_) * (normal[2] + 1.f)))));
return ix * (binsy_*binsz_) + iy * binsz_ + iz;
}
{
if (!input_->isOrganized ())
{
- PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should be at least 2!\n", getClassName ().c_str ());
+ PCL_ERROR ("[pcl::filters::%s::initCompute] Number of levels should be at least 2!\n", getClassName ().c_str ());
return (false);
}
if (levels_ < 2)
{
- PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should be at least 2!\n", getClassName ().c_str ());
+ PCL_ERROR ("[pcl::filters::%s::initCompute] Number of levels should be at least 2!\n", getClassName ().c_str ());
return (false);
}
if (levels_ > 4)
{
- PCL_ERROR ("[pcl::fileters::%s::initCompute] Number of levels should not exceed 4!\n", getClassName ().c_str ());
+ PCL_ERROR ("[pcl::filters::%s::initCompute] Number of levels should not exceed 4!\n", getClassName ().c_str ());
return (false);
}
return;
}
+ // Note: k includes the query point, so is always at least 1
+ const int mean_k = min_pts_radius_ + 1;
+ const double nn_dists_max = search_radius_ * search_radius_;
+
// The arrays to be used
- Indices nn_indices (indices_->size ());
- std::vector<float> nn_dists (indices_->size ());
+ Indices nn_indices (mean_k);
+ std::vector<float> nn_dists(mean_k);
+ // Set to keep all points and in the filtering set those we don't want to keep, assuming
+ // we want to keep the majority of the points.
+ // 0 = remove, 1 = keep
+ std::vector<std::uint8_t> to_keep(indices_->size(), 1);
indices.resize (indices_->size ());
removed_indices_->resize (indices_->size ());
int oii = 0, rii = 0; // oii = output indices iterator, rii = removed indices iterator
// If the data is dense => use nearest-k search
if (input_->is_dense)
{
- // Note: k includes the query point, so is always at least 1
- int mean_k = min_pts_radius_ + 1;
- double nn_dists_max = search_radius_ * search_radius_;
-
- for (const auto& index : (*indices_))
+ #pragma omp parallel for \
+ schedule(dynamic,64) \
+ firstprivate(nn_indices, nn_dists) \
+ shared(to_keep) \
+ num_threads(num_threads_)
+ for (ptrdiff_t i = 0; i < static_cast<ptrdiff_t>(indices_->size()); i++)
{
+ const auto& index = (*indices_)[i];
// Perform the nearest-k search
- int k = searcher_->nearestKSearch (index, mean_k, nn_indices, nn_dists);
+ const int k = searcher_->nearestKSearch (index, mean_k, nn_indices, nn_dists);
// Check the number of neighbors
// Note: nn_dists is sorted, so check the last item
- bool chk_neighbors = true;
if (k == mean_k)
{
- if (negative_)
- {
- chk_neighbors = false;
- if (nn_dists_max < nn_dists[k-1])
- {
- chk_neighbors = true;
- }
- }
- else
- {
- chk_neighbors = true;
- if (nn_dists_max < nn_dists[k-1])
+ // if negative_ is false and a neighbor is further away than max distance, remove the point
+ // or
+ // if negative is true and a neighbor is closer than max distance, remove the point
+ if ((!negative_ && nn_dists_max < nn_dists[k-1]) || (negative_ && nn_dists_max >= nn_dists[k - 1]))
{
- chk_neighbors = false;
+ to_keep[i] = 0;
+ continue;
}
- }
}
else
{
- chk_neighbors = negative_;
- }
-
- // Points having too few neighbors are outliers and are passed to removed indices
- // Unless negative was set, then it's the opposite condition
- if (!chk_neighbors)
- {
- if (extract_removed_indices_)
- (*removed_indices_)[rii++] = index;
- continue;
+ if (!negative_)
+ {
+ // if too few neighbors, remove the point
+ to_keep[i] = 0;
+ continue;
+ }
}
-
- // Otherwise it was a normal point for output (inlier)
- indices[oii++] = index;
}
}
// NaN or Inf values could exist => use radius search
else
{
- for (const auto& index : (*indices_))
+ #pragma omp parallel for \
+ schedule(dynamic, 64) \
+ firstprivate(nn_indices, nn_dists) \
+ shared(to_keep) \
+ num_threads(num_threads_)
+ for (ptrdiff_t i = 0; i < static_cast<ptrdiff_t>(indices_->size()); i++)
{
+ const auto& index = (*indices_)[i];
+ if (!pcl::isXYZFinite((*input_)[index]))
+ {
+ to_keep[i] = 0;
+ continue;
+ }
+
// Perform the radius search
// Note: k includes the query point, so is always at least 1
- int k = searcher_->radiusSearch (index, search_radius_, nn_indices, nn_dists);
+ // last parameter (max_nn) is the maximum number of neighbors returned. If enough neighbors are found so that point can not be an outlier, we stop searching.
+ const int k = searcher_->radiusSearch (index, search_radius_, nn_indices, nn_dists, min_pts_radius_ + 1);
- // Points having too few neighbors are outliers and are passed to removed indices
- // Unless negative was set, then it's the opposite condition
+ // Points having too few neighbors are removed
+ // or if negative_ is true, then if it has too many neighbors
if ((!negative_ && k <= min_pts_radius_) || (negative_ && k > min_pts_radius_))
+ {
+ to_keep[i] = 0;
+ continue;
+ }
+ }
+ }
+
+ for (index_t i=0; i < static_cast<index_t>(to_keep.size()); i++)
+ {
+ if (to_keep[i] == 0)
{
if (extract_removed_indices_)
- (*removed_indices_)[rii++] = index;
+ (*removed_indices_)[rii++] = (*indices_)[i];
continue;
}
// Otherwise it was a normal point for output (inlier)
- indices[oii++] = index;
+ indices[oii++] = (*indices_)[i];
}
- }
// Resize the output arrays
indices.resize (oii);
removed_indices_->resize (N - sample_size);
// Set random seed so derived indices are the same each time the filter runs
+#ifdef __OpenBSD__
+ srand_deterministic (seed_); // OpenBSD only offers repeatable sequences with this function
+#else
std::srand (seed_);
+#endif // __OpenBSD__
// Algorithm S
std::size_t i = 0;
template<typename PointT> void
pcl::SamplingSurfaceNormal<PointT>::applyFilter (PointCloud &output)
{
+ if (ratio_ <= 0.0f)
+ {
+ PCL_ERROR("[SamplingSurfaceNormal::applyFilter] Parameter 'ratio' must be larger than zero!\n");
+ output.clear();
+ return;
+ }
+ if (sample_ < 5)
+ {
+ PCL_ERROR("[SamplingSurfaceNormal::applyFilter] Parameter 'sample' must be at least 5!\n");
+ output.clear();
+ return;
+ }
Indices indices;
std::size_t npts = input_->size ();
for (std::size_t i = 0; i < npts; i++)
continue;
}
- // Calculate the mean distance to its neighbors
+ // Calculate the mean distance to its neighbors.
double dist_sum = 0.0;
- for (int k = 1; k < searcher_k; ++k) // k = 0 is the query point
- dist_sum += sqrt (nn_dists[k]);
- distances[iii] = static_cast<float> (dist_sum / mean_k_);
+ for (std::size_t k = 1; k < nn_dists.size(); ++k) // k = 0 is the query point
+ dist_sum += sqrt(nn_dists[k]);
+ distances[iii] = static_cast<float>(dist_sum / (nn_dists.size() - 1));
valid_distances++;
}
#include <pcl/common/common.h>
#include <pcl/filters/uniform_sampling.h>
+#include <pcl/common/point_tests.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::UniformSampling<PointT>::applyFilter (PointCloud &output)
+pcl::UniformSampling<PointT>::applyFilter (Indices &indices)
{
- // Has the input dataset been set already?
- if (!input_)
- {
- PCL_WARN ("[pcl::%s::detectKeypoints] No input dataset given!\n", getClassName ().c_str ());
- output.width = output.height = 0;
- output.clear ();
- return;
- }
+ // The arrays to be used
+ indices.resize (indices_->size ());
+ removed_indices_->resize (indices_->size ());
- output.height = 1; // downsampling breaks the organized structure
- output.is_dense = true; // we filter out invalid points
+ int oii = 0, rii = 0; // oii = output indices iterator, rii = removed indices iterator
Eigen::Vector4f min_p, max_p;
// Get the minimum and maximum dimensions
// Set up the division multiplier
divb_mul_ = Eigen::Vector4i (1, div_b_[0], div_b_[0] * div_b_[1], 0);
- removed_indices_->clear();
// First pass: build a set of leaves with the point index closest to the leaf center
- for (std::size_t cp = 0; cp < indices_->size (); ++cp)
+ for (const auto& cp : *indices_)
{
if (!input_->is_dense)
{
// Check if the point is invalid
- if (!std::isfinite ((*input_)[(*indices_)[cp]].x) ||
- !std::isfinite ((*input_)[(*indices_)[cp]].y) ||
- !std::isfinite ((*input_)[(*indices_)[cp]].z))
+ if (!pcl::isXYZFinite ((*input_)[cp]))
{
if (extract_removed_indices_)
- removed_indices_->push_back ((*indices_)[cp]);
+ (*removed_indices_)[rii++] = cp;
continue;
}
}
Eigen::Vector4i ijk = Eigen::Vector4i::Zero ();
- ijk[0] = static_cast<int> (std::floor ((*input_)[(*indices_)[cp]].x * inverse_leaf_size_[0]));
- ijk[1] = static_cast<int> (std::floor ((*input_)[(*indices_)[cp]].y * inverse_leaf_size_[1]));
- ijk[2] = static_cast<int> (std::floor ((*input_)[(*indices_)[cp]].z * inverse_leaf_size_[2]));
+ ijk[0] = static_cast<int> (std::floor ((*input_)[cp].x * inverse_leaf_size_[0]));
+ ijk[1] = static_cast<int> (std::floor ((*input_)[cp].y * inverse_leaf_size_[1]));
+ ijk[2] = static_cast<int> (std::floor ((*input_)[cp].z * inverse_leaf_size_[2]));
// Compute the leaf index
int idx = (ijk - min_b_).dot (divb_mul_);
Leaf& leaf = leaves_[idx];
+
+ // Increment the count of points in this voxel
+ ++leaf.count;
+
// First time we initialize the index
if (leaf.idx == -1)
{
- leaf.idx = (*indices_)[cp];
+ leaf.idx = cp;
continue;
}
Eigen::Vector4f voxel_center = (ijk.cast<float>() + Eigen::Vector4f::Constant(0.5)) * search_radius_;
voxel_center[3] = 0;
// Check to see if this point is closer to the leaf center than the previous one we saved
- float diff_cur = ((*input_)[(*indices_)[cp]].getVector4fMap () - voxel_center).squaredNorm ();
+ float diff_cur = ((*input_)[cp].getVector4fMap () - voxel_center).squaredNorm ();
float diff_prev = ((*input_)[leaf.idx].getVector4fMap () - voxel_center).squaredNorm ();
// If current point is closer, copy its index instead
if (diff_cur < diff_prev)
{
if (extract_removed_indices_)
- removed_indices_->push_back (leaf.idx);
- leaf.idx = (*indices_)[cp];
+ (*removed_indices_)[rii++] = leaf.idx;
+ leaf.idx = cp;
}
else
{
if (extract_removed_indices_)
- removed_indices_->push_back ((*indices_)[cp]);
+ (*removed_indices_)[rii++] = cp;
}
}
+ removed_indices_->resize(rii);
// Second pass: go over all leaves and copy data
- output.resize (leaves_.size ());
- int cp = 0;
-
+ indices.resize (leaves_.size ());
for (const auto& leaf : leaves_)
- output[cp++] = (*input_)[leaf.second.idx];
- output.width = output.size ();
+ {
+ if (leaf.second.count >= min_points_per_voxel_)
+ indices[oii++] = leaf.second.idx;
+ }
+
+ indices.resize (oii);
+ if(negative_){
+ indices.swap(*removed_indices_);
+ }
}
#define PCL_INSTANTIATE_UniformSampling(T) template class PCL_EXPORTS pcl::UniformSampling<T>;
max_pt = max_p;
}
-struct cloud_point_index_idx
-{
- unsigned int idx;
- unsigned int cloud_point_index;
-
- cloud_point_index_idx() = default;
- cloud_point_index_idx (unsigned int idx_, unsigned int cloud_point_index_) : idx (idx_), cloud_point_index (cloud_point_index_) {}
- bool operator < (const cloud_point_index_idx &p) const { return (idx < p.idx); }
-};
-
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::VoxelGrid<PointT>::applyFilter (PointCloud &output)
divb_mul_ = Eigen::Vector4i (1, div_b_[0], div_b_[0] * div_b_[1], 0);
// Storage for mapping leaf and pointcloud indexes
- std::vector<cloud_point_index_idx> index_vector;
+ std::vector<internal::cloud_point_index_idx> index_vector;
index_vector.reserve (indices_->size ());
// If we don't want to process the entire cloud, but rather filter points far away from the viewpoint first...
// Second pass: sort the index_vector vector using value representing target cell as index
// in effect all points belonging to the same output cell will be next to each other
- auto rightshift_func = [](const cloud_point_index_idx &x, const unsigned offset) { return x.idx >> offset; };
+ auto rightshift_func = [](const internal::cloud_point_index_idx &x, const unsigned offset) { return x.idx >> offset; };
boost::sort::spreadsort::integer_sort(index_vector.begin(), index_vector.end(), rightshift_func);
// Third pass: count output cells
//////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
-pcl::VoxelGridCovariance<PointT>::getDisplayCloud (pcl::PointCloud<PointXYZ>& cell_cloud)
+pcl::VoxelGridCovariance<PointT>::getDisplayCloud (pcl::PointCloud<PointXYZ>& cell_cloud, int pnt_per_cell) const
{
cell_cloud.clear ();
- int pnt_per_cell = 1000;
+ // for now, we use random generator and normal distribution from boost instead of std because switching to std would make this function up to 2.8 times slower
boost::mt19937 rng;
boost::normal_distribution<> nd (0.0, 1.0);
boost::variate_generator<boost::mt19937&, boost::normal_distribution<> > var_nor (rng, nd);
// Generate points for each occupied voxel with sufficient points.
for (auto it = leaves_.begin (); it != leaves_.end (); ++it)
{
- Leaf& leaf = it->second;
+ const Leaf& leaf = it->second;
if (leaf.nr_points >= min_points_per_voxel_)
{
{
// initialization set to true
initialized_ = true;
-
+
// create the voxel grid and store the output cloud
this->filter (filtered_cloud_);
Eigen::Vector4f p = getCentroidCoordinate (ijk);
Eigen::Vector4f direction = p - sensor_origin_;
direction.normalize ();
-
+
// estimate entry point into the voxel grid
float tmin = rayBoxIntersection (sensor_origin_, direction);
// ray traversal
int state = rayTraversal (ijk, sensor_origin_, direction, tmin);
-
+
// if voxel is occluded
if (state == 1)
occluded_voxels.push_back (ijk);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> float
-pcl::VoxelGridOcclusionEstimation<PointT>::rayBoxIntersection (const Eigen::Vector4f& origin,
+pcl::VoxelGridOcclusionEstimation<PointT>::rayBoxIntersection (const Eigen::Vector4f& origin,
const Eigen::Vector4f& direction)
{
float tmin, tmax, tymin, tymax, tzmin, tzmax;
if (direction[1] >= 0)
{
tymin = (b_min_[1] - origin[1]) / direction[1];
- tymax = (b_max_[1] - origin[1]) / direction[1];
+ tymax = (b_max_[1] - origin[1]) / direction[1];
}
else
{
{
PCL_ERROR ("no intersection with the bounding box \n");
tmin = -1.0f;
- return tmin;
+ return tmin;
}
if (tzmin > tmin)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::VoxelGridOcclusionEstimation<PointT>::rayTraversal (const Eigen::Vector3i& target_voxel,
- const Eigen::Vector4f& origin,
+ const Eigen::Vector4f& origin,
const Eigen::Vector4f& direction,
const float t_min)
{
- // coordinate of the boundary of the voxel grid
- Eigen::Vector4f start = origin + t_min * direction;
+ // coordinate of the boundary of the voxel grid. To avoid numerical imprecision
+ // causing the start voxel index to be off by one, we add the machine epsilon
+ Eigen::Vector4f start = origin + (t_min > 0.0f ? (t_min + std::numeric_limits<float>::epsilon()) : t_min) * direction;
// i,j,k coordinate of the voxel were the ray enters the voxel grid
- Eigen::Vector3i ijk = getGridCoordinatesRound (start[0], start[1], start[2]);
+ Eigen::Vector3i ijk = this->getGridCoordinates(start[0], start[1], start[2]);
// steps in which direction we have to travel in the voxel grid
int step_x, step_y, step_z;
float t_max_x = t_min + (voxel_max[0] - start[0]) / direction[0];
float t_max_y = t_min + (voxel_max[1] - start[1]) / direction[1];
float t_max_z = t_min + (voxel_max[2] - start[2]) / direction[2];
-
+
float t_delta_x = leaf_size_[0] / static_cast<float> (std::abs (direction[0]));
float t_delta_y = leaf_size_[1] / static_cast<float> (std::abs (direction[1]));
float t_delta_z = leaf_size_[2] / static_cast<float> (std::abs (direction[2]));
- while ( (ijk[0] < max_b_[0]+1) && (ijk[0] >= min_b_[0]) &&
- (ijk[1] < max_b_[1]+1) && (ijk[1] >= min_b_[1]) &&
+ while ( (ijk[0] < max_b_[0]+1) && (ijk[0] >= min_b_[0]) &&
+ (ijk[1] < max_b_[1]+1) && (ijk[1] >= min_b_[1]) &&
(ijk[2] < max_b_[2]+1) && (ijk[2] >= min_b_[2]) )
{
// check if we reached target voxel
template <typename PointT> int
pcl::VoxelGridOcclusionEstimation<PointT>::rayTraversal (std::vector<Eigen::Vector3i, Eigen::aligned_allocator<Eigen::Vector3i> >& out_ray,
const Eigen::Vector3i& target_voxel,
- const Eigen::Vector4f& origin,
+ const Eigen::Vector4f& origin,
const Eigen::Vector4f& direction,
const float t_min)
{
int reserve_size = div_b_.maxCoeff () * div_b_.maxCoeff ();
out_ray.reserve (reserve_size);
- // coordinate of the boundary of the voxel grid
- Eigen::Vector4f start = origin + t_min * direction;
+ // coordinate of the boundary of the voxel grid. To avoid numerical imprecision
+ // causing the start voxel index to be off by one, we add the machine epsilon
+ Eigen::Vector4f start = origin + (t_min > 0.0f ? (t_min + std::numeric_limits<float>::epsilon()) : t_min) * direction;
// i,j,k coordinate of the voxel were the ray enters the voxel grid
- Eigen::Vector3i ijk = getGridCoordinatesRound (start[0], start[1], start[2]);
- //Eigen::Vector3i ijk = this->getGridCoordinates (start_x, start_y, start_z);
+ Eigen::Vector3i ijk = this->getGridCoordinates (start[0], start[1], start[2]);
// steps in which direction we have to travel in the voxel grid
int step_x, step_y, step_z;
float t_max_x = t_min + (voxel_max[0] - start[0]) / direction[0];
float t_max_y = t_min + (voxel_max[1] - start[1]) / direction[1];
float t_max_z = t_min + (voxel_max[2] - start[2]) / direction[2];
-
+
float t_delta_x = leaf_size_[0] / static_cast<float> (std::abs (direction[0]));
float t_delta_y = leaf_size_[1] / static_cast<float> (std::abs (direction[1]));
float t_delta_z = leaf_size_[2] / static_cast<float> (std::abs (direction[2]));
// the index of the cloud (-1 if empty)
int result = 0;
- while ( (ijk[0] < max_b_[0]+1) && (ijk[0] >= min_b_[0]) &&
- (ijk[1] < max_b_[1]+1) && (ijk[1] >= min_b_[1]) &&
+ while ( (ijk[0] < max_b_[0]+1) && (ijk[0] >= min_b_[0]) &&
+ (ijk[1] < max_b_[1]+1) && (ijk[1] >= min_b_[1]) &&
(ijk[2] < max_b_[2]+1) && (ijk[2] >= min_b_[2]) )
{
// add voxel to ray
* filter.filter (*cloud_out);
* \endcode
+ * \ingroup filters
*/
template <typename PointT>
class ModelOutlierRemoval : public FilterIndices<PointT>
* indices_rem = rorfilter.getRemovedIndices ();
* // The indices_rem array indexes all points of cloud_in that have 5 or more neighbors within the 0.1 search radius
* \endcode
+ * \sa StatisticalOutlierRemoval
* \author Radu Bogdan Rusu
* \ingroup filters
*/
*/
inline void
setSearchMethod (const SearcherPtr &searcher) { searcher_ = searcher; }
+
+ /** \brief Set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value back
+ * to automatic)
+ */
+ void
+ setNumberOfThreads(unsigned int nr_threads = 0)
+ {
+#ifdef _OPENMP
+ num_threads_ = nr_threads != 0 ? nr_threads : omp_get_num_procs();
+#else
+ if (num_threads_ != 1) {
+ PCL_WARN("OpenMP is not available. Keeping number of threads unchanged at 1\n");
+ }
+ num_threads_ = 1;
+#endif
+ }
+
protected:
using PCLBase<PointT>::input_;
using PCLBase<PointT>::indices_;
/** \brief The minimum number of neighbors that a point needs to have in the given search radius to be considered an inlier. */
int min_pts_radius_{1};
+
+ /**
+ * @brief Number of threads used during filtering
+ */
+ int num_threads_{1};
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using PointCloudPtr = typename PointCloud::Ptr;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
- using Vector = Eigen::Matrix<float, Eigen::Dynamic, 1>;
+ using Vector = Eigen::Matrix<float, 3, 1>;
public:
* indices_rem = sorfilter.getRemovedIndices ();
* // The indices_rem array indexes all points of cloud_in that are outliers
* \endcode
+ * \sa RadiusOutlierRemoval
* \author Radu Bogdan Rusu
* \ingroup filters
*/
#pragma once
-#include <pcl/filters/filter.h>
+#include <pcl/filters/filter_indices.h>
#include <unordered_map>
* Then, in each *voxel* (i.e., 3D box), all the points present will be
* approximated (i.e., *downsampled*) with the closest point to the center of the voxel.
*
+ * \sa VoxelGrid
* \author Radu Bogdan Rusu
* \ingroup filters
*/
template <typename PointT>
- class UniformSampling: public Filter<PointT>
+ class UniformSampling: public FilterIndices<PointT>
{
- using PointCloud = typename Filter<PointT>::PointCloud;
+ using PointCloud = typename FilterIndices<PointT>::PointCloud;
+
+ using FilterIndices<PointT>::negative_;
using Filter<PointT>::filter_name_;
using Filter<PointT>::input_;
/** \brief Empty constructor. */
UniformSampling (bool extract_removed_indices = false) :
- Filter<PointT>(extract_removed_indices),
+ FilterIndices<PointT>(extract_removed_indices),
leaves_ (),
leaf_size_ (Eigen::Vector4f::Zero ()),
inverse_leaf_size_ (Eigen::Vector4f::Zero ()),
search_radius_ = radius;
}
+ /** \brief Set the minimum number of points required for a voxel to be used.
+ * \param[in] min_points_per_voxel the minimum number of points for required for a voxel to be used
+ */
+ inline void
+ setMinimumPointsNumberPerVoxel (unsigned int min_points_per_voxel) { min_points_per_voxel_ = min_points_per_voxel; }
+
+ /** \brief Return the minimum number of points required for a voxel to be used.
+ */
+ inline unsigned int
+ getMinimumPointsNumberPerVoxel () const { return min_points_per_voxel_; }
+
+
protected:
/** \brief Simple structure to hold an nD centroid and the number of points in a leaf. */
struct Leaf
{
Leaf () = default;
int idx{-1};
+ unsigned int count{0};
};
/** \brief The 3D grid leaves. */
/** \brief The nearest neighbors search radius for each point. */
double search_radius_{0.0};
- /** \brief Downsample a Point Cloud using a voxelized grid approach
- * \param[out] output the resultant point cloud message
+ /** \brief Minimum number of points per voxel. */
+ unsigned int min_points_per_voxel_{0};
+
+ /** \brief Filtered results are indexed by an indices array.
+ * \param[out] indices The resultant indices.
*/
void
- applyFilter (PointCloud &output) override;
+ applyFilter (Indices &indices) override;
};
}
namespace pcl
{
+ /** \brief Obtain the maximum and minimum points in 3D from a given point cloud.
+ * \param[in] cloud the pointer to a pcl::PCLPointCloud2 dataset
+ * \param[in] x_idx the index of the X channel
+ * \param[in] y_idx the index of the Y channel
+ * \param[in] z_idx the index of the Z channel
+ * \param[out] min_pt the minimum data point
+ * \param[out] max_pt the maximum data point
+ */
+ PCL_EXPORTS void
+ getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, int x_idx, int y_idx, int z_idx,
+ Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
+
/** \brief Obtain the maximum and minimum points in 3D from a given point cloud.
* \param[in] cloud the pointer to a pcl::PCLPointCloud2 dataset
+ * \param[in] indices the point cloud indices that need to be considered
* \param[in] x_idx the index of the X channel
* \param[in] y_idx the index of the Y channel
* \param[in] z_idx the index of the Z channel
* \param[out] min_pt the minimum data point
* \param[out] max_pt the maximum data point
- */
+ */
PCL_EXPORTS void
- getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, int x_idx, int y_idx, int z_idx,
- Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
+ getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, const Indices &indices, int x_idx, int y_idx, int z_idx,
+ Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
/** \brief Obtain the maximum and minimum points in 3D from a given point cloud.
* \note Performs internal data filtering as well.
* \param[out] max_pt the maximum data point
* \param[in] limit_negative \b false if data \b inside of the [min_distance; max_distance] interval should be
* considered, \b true otherwise.
- */
+ */
PCL_EXPORTS void
getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, int x_idx, int y_idx, int z_idx,
+ const std::string &distance_field_name, float min_distance, float max_distance,
+ Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative = false);
+
+ /** \brief Obtain the maximum and minimum points in 3D from a given point cloud.
+ * \note Performs internal data filtering as well.
+ * \param[in] cloud the pointer to a pcl::PCLPointCloud2 dataset
+ * \param[in] indices the point cloud indices that need to be considered
+ * \param[in] x_idx the index of the X channel
+ * \param[in] y_idx the index of the Y channel
+ * \param[in] z_idx the index of the Z channel
+ * \param[in] distance_field_name the name of the dimension to filter data along to
+ * \param[in] min_distance the minimum acceptable value in \a distance_field_name data
+ * \param[in] max_distance the maximum acceptable value in \a distance_field_name data
+ * \param[out] min_pt the minimum data point
+ * \param[out] max_pt the maximum data point
+ * \param[in] limit_negative \b false if data \b inside of the [min_distance; max_distance] interval should be
+ * considered, \b true otherwise.
+ */
+ PCL_EXPORTS void
+ getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, const Indices &indices, int x_idx, int y_idx, int z_idx,
const std::string &distance_field_name, float min_distance, float max_distance,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative = false);
VoxelGrid () :
leaf_size_ (Eigen::Vector4f::Zero ()),
inverse_leaf_size_ (Eigen::Array4f::Zero ()),
-
+
min_b_ (Eigen::Vector4i::Zero ()),
max_b_ (Eigen::Vector4i::Zero ()),
div_b_ (Eigen::Vector4i::Zero ()),
void
applyFilter (PCLPointCloud2 &output) override;
};
+
+ namespace internal
+ {
+ /** \brief Used internally in voxel grid classes.
+ */
+ struct cloud_point_index_idx
+ {
+ unsigned int idx;
+ unsigned int cloud_point_index;
+
+ cloud_point_index_idx() = default;
+ cloud_point_index_idx (unsigned int idx_, unsigned int cloud_point_index_) : idx (idx_), cloud_point_index (cloud_point_index_) {}
+ bool operator < (const cloud_point_index_idx &p) const { return (idx < p.idx); }
+ };
+ }
}
#ifdef PCL_NO_PRECOMPILE
/** \brief Get a cloud to visualize each voxels normal distribution.
* \param[out] cell_cloud a cloud created by sampling the normal distributions of each voxel
+ * \param[in] pnt_per_cell how many points should be generated for each cell
*/
void
- getDisplayCloud (pcl::PointCloud<PointXYZ>& cell_cloud);
+ getDisplayCloud (pcl::PointCloud<PointXYZ>& cell_cloud, int pnt_per_cell = 1000) const;
/** \brief Search for the k-nearest occupied voxels for the given query point.
* \note Only voxels containing a sufficient number of points are used.
const Eigen::Vector4f& direction,
const float t_min);
- /** \brief Returns a rounded value.
+ /** \brief Returns a value rounded to the nearest integer
* \param[in] d
* \return rounded value
*/
return static_cast<float> (std::floor (d + 0.5f));
}
- // We use round here instead of std::floor due to some numerical issues.
- /** \brief Returns the corresponding (i,j,k) coordinates in the grid of point (x,y,z).
+ /** \brief Returns the corresponding (i,j,k) coordinates in the grid of point (x,y,z).
* \param[in] x the X point coordinate to get the (i, j, k) index at
* \param[in] y the Y point coordinate to get the (i, j, k) index at
* \param[in] z the Z point coordinate to get the (i, j, k) index at
result.b = static_cast<std::uint8_t>(b);
return (result);
}
-
-#ifndef PCL_NO_PRECOMPILE
-#include <pcl/impl/instantiate.hpp>
-#include <pcl/point_types.h>
-
-PCL_INSTANTIATE_PRODUCT(
- Convolution, ((pcl::RGB))((pcl::RGB)))
-
-PCL_INSTANTIATE_PRODUCT(
- Convolution, ((pcl::PointXYZRGB))((pcl::PointXYZRGB)))
-#endif // PCL_NO_PRECOMPILE
-
} // namespace filters
} // namespace pcl
return;
}
- int nr_points = input_->width * input_->height;
+ uindex_t nr_points = input_->width * input_->height;
// Check if we're going to keep the organized structure of the cloud or not
if (keep_organized_)
removed_indices_->resize (input_->data.size ());
- int nr_p = 0;
- int nr_removed_p = 0;
+ uindex_t nr_p = 0;
+ uindex_t nr_removed_p = 0;
// Create the first xyz_offset
- Eigen::Array4i xyz_offset (input_->fields[x_idx_].offset, input_->fields[y_idx_].offset,
+ Eigen::Array<uindex_t, 4, 1> xyz_offset (input_->fields[x_idx_].offset, input_->fields[y_idx_].offset,
input_->fields[z_idx_].offset, 0);
Eigen::Vector4f pt = Eigen::Vector4f::Zero ();
{
float distance_value = 0;
// Go over all points
- for (int cp = 0; cp < nr_points; ++cp, xyz_offset += input_->point_step)
+ for (uindex_t cp = 0; cp < nr_points; ++cp, xyz_offset += input_->point_step)
{
// Copy all the fields
memcpy (&output.data[cp * output.point_step], &input_->data[cp * output.point_step], output.point_step);
{
// Go over all points
float distance_value = 0;
- for (int cp = 0; cp < nr_points; ++cp, xyz_offset += input_->point_step)
+ for (uindex_t cp = 0; cp < nr_points; ++cp, xyz_offset += input_->point_step)
{
// Get the distance value
memcpy (&distance_value, &input_->data[cp * input_->point_step + input_->fields[distance_idx].offset],
// No distance filtering, process all data. No need to check for is_organized here as we did it above
else
{
- for (int cp = 0; cp < nr_points; ++cp, xyz_offset += input_->point_step)
+ for (uindex_t cp = 0; cp < nr_points; ++cp, xyz_offset += input_->point_step)
{
// Unoptimized memcpys: assume fields x, y, z are in random order
memcpy (&pt[0], &input_->data[xyz_offset[0]], sizeof(float)); // NOLINT(readability-container-data-pointer)
// The arrays to be used
indices.resize (indices_->size ());
removed_indices_->resize (indices_->size ());
- int oii = 0, rii = 0; // oii = output indices iterator, rii = removed indices iterator
+ uindex_t oii = 0, rii = 0; // oii = output indices iterator, rii = removed indices iterator
const auto x_offset = input_->fields[x_idx_].offset,
y_offset = input_->fields[y_idx_].offset,
z_offset = input_->fields[z_idx_].offset;
removed_indices_->resize (N - sample_size);
// Set random seed so derived indices are the same each time the filter runs
+#ifdef __OpenBSD__
+ srand_deterministic (seed_); // OpenBSD only offers repeatable sequences with this function
+#else
std::srand (seed_);
+#endif // __OpenBSD__
// Algorithm S
std::size_t i = 0;
continue;
}
- // Minimum distance (if mean_k_ == 2) or mean distance
- double dist_sum = 0;
- for (int j = 1; j < mean_k_; ++j)
- dist_sum += sqrt (nn_dists[j]);
- distances[cp] = static_cast<float> (dist_sum / (mean_k_ - 1));
+ // Calculate the mean distance to its neighbors.
+ double dist_sum = 0.0;
+ for (std::size_t k = 1; k < nn_dists.size(); ++k) // k = 0 is the query point
+ dist_sum += sqrt(nn_dists[k]);
+ distances[cp] = static_cast<float>(dist_sum / (nn_dists.size() - 1));
valid_distances++;
}
///////////////////////////////////////////////////////////////////////////////////////////
void
pcl::getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, int x_idx, int y_idx, int z_idx,
- Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt)
+ Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt)
{
// @todo fix this
if (cloud->fields[x_idx].datatype != pcl::PCLPointField::FLOAT32 ||
memcpy (&pt[1], &cloud->data[xyz_offset[1]], sizeof (float));
memcpy (&pt[2], &cloud->data[xyz_offset[2]], sizeof (float));
// Check if the point is invalid
- if (!std::isfinite (pt[0]) ||
- !std::isfinite (pt[1]) ||
+ if (!std::isfinite (pt[0]) ||
+ !std::isfinite (pt[1]) ||
!std::isfinite (pt[2]))
{
xyz_offset += cloud->point_step;
max_pt = max_p;
}
+///////////////////////////////////////////////////////////////////////////////////////////
+void
+pcl::getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, const pcl::Indices &indices, int x_idx, int y_idx, int z_idx,
+ Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt)
+{
+ if (cloud->fields[x_idx].datatype != pcl::PCLPointField::FLOAT32 ||
+ cloud->fields[y_idx].datatype != pcl::PCLPointField::FLOAT32 ||
+ cloud->fields[z_idx].datatype != pcl::PCLPointField::FLOAT32)
+ {
+ PCL_ERROR ("[pcl::getMinMax3D] XYZ dimensions are not float type!\n");
+ return;
+ }
+
+ Eigen::Array4f min_p, max_p;
+ min_p.setConstant (std::numeric_limits<float>::max());
+ max_p.setConstant (std::numeric_limits<float>::lowest());
+
+ Eigen::Array4f pt = Eigen::Array4f::Zero ();
+ Array4size_t xyz_offset (cloud->fields[x_idx].offset, cloud->fields[y_idx].offset, cloud->fields[z_idx].offset, 0);
+
+ if(cloud->is_dense) // no need to check validity of points
+ {
+ for (const auto& index : indices)
+ {
+ // Unoptimized memcpys: assume fields x, y, z are in random order
+ memcpy(&pt[0],&cloud->data[xyz_offset[0] + cloud->point_step * index],sizeof(float));
+ memcpy(&pt[1],&cloud->data[xyz_offset[1] + cloud->point_step * index],sizeof(float));
+ memcpy(&pt[2],&cloud->data[xyz_offset[2] + cloud->point_step * index],sizeof(float));
+ min_p = (min_p.min)(pt);
+ max_p = (max_p.max)(pt);
+ }
+ }
+ else
+ {
+ for (const auto& index : indices)
+ {
+ // Unoptimized memcpys: assume fields x, y, z are in random order
+ memcpy(&pt[0],&cloud->data[xyz_offset[0] + cloud->point_step * index],sizeof(float));
+ memcpy(&pt[1],&cloud->data[xyz_offset[1] + cloud->point_step * index],sizeof(float));
+ memcpy(&pt[2],&cloud->data[xyz_offset[2] + cloud->point_step * index],sizeof(float));
+ // Check if the point is invalid
+ if (!std::isfinite(pt[0]) ||
+ !std::isfinite(pt[1]) ||
+ !std::isfinite(pt[2]))
+ {
+ continue;
+ }
+ min_p = (min_p.min)(pt);
+ max_p = (max_p.max)(pt);
+ }
+ }
+ min_pt = min_p;
+ max_pt = max_p;
+}
+
///////////////////////////////////////////////////////////////////////////////////////////
void
pcl::getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, int x_idx, int y_idx, int z_idx,
- const std::string &distance_field_name, float min_distance, float max_distance,
- Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative)
+ const std::string &distance_field_name, float min_distance, float max_distance,
+ Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative)
{
// @todo fix this
if (cloud->fields[x_idx].datatype != pcl::PCLPointField::FLOAT32 ||
Eigen::Array4f pt = Eigen::Array4f::Zero ();
Array4size_t xyz_offset (cloud->fields[x_idx].offset,
- cloud->fields[y_idx].offset,
- cloud->fields[z_idx].offset,
- 0);
+ cloud->fields[y_idx].offset,
+ cloud->fields[z_idx].offset,
+ 0);
float distance_value = 0;
for (std::size_t cp = 0; cp < nr_points; ++cp)
{
memcpy (&pt[1], &cloud->data[xyz_offset[1]], sizeof (float));
memcpy (&pt[2], &cloud->data[xyz_offset[2]], sizeof (float));
// Check if the point is invalid
- if (!std::isfinite (pt[0]) ||
- !std::isfinite (pt[1]) ||
+ if (!std::isfinite (pt[0]) ||
+ !std::isfinite (pt[1]) ||
!std::isfinite (pt[2]))
{
xyz_offset += cloud->point_step;
max_pt = max_p;
}
+///////////////////////////////////////////////////////////////////////////////////////////
+void
+pcl::getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, const pcl::Indices &indices, int x_idx, int y_idx, int z_idx,
+ const std::string &distance_field_name, float min_distance, float max_distance,
+ Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative)
+{
+ // @todo fix this
+ if (cloud->fields[x_idx].datatype != pcl::PCLPointField::FLOAT32 ||
+ cloud->fields[y_idx].datatype != pcl::PCLPointField::FLOAT32 ||
+ cloud->fields[z_idx].datatype != pcl::PCLPointField::FLOAT32)
+ {
+ PCL_ERROR ("[pcl::getMinMax3D] XYZ dimensions are not float type!\n");
+ return;
+ }
+
+ Eigen::Array4f min_p, max_p;
+ min_p.setConstant (std::numeric_limits<float>::max());
+ max_p.setConstant (std::numeric_limits<float>::lowest());
+
+ // Get the distance field index
+ int distance_idx = pcl::getFieldIndex (*cloud, distance_field_name);
+
+ // @todo fix this
+ if (cloud->fields[distance_idx].datatype != pcl::PCLPointField::FLOAT32)
+ {
+ PCL_ERROR ("[pcl::getMinMax3D] Filtering dimensions is not float type!\n");
+ return;
+ }
+
+ Eigen::Array4f pt = Eigen::Array4f::Zero ();
+ Array4size_t xyz_offset (cloud->fields[x_idx].offset,
+ cloud->fields[y_idx].offset,
+ cloud->fields[z_idx].offset,
+ 0);
+ float distance_value = 0;
+ if(cloud->is_dense)
+ {
+ for (const auto& index : indices) {
+ std::size_t point_offset = index * cloud->point_step;
+
+ // Get the distance value
+ memcpy(&distance_value,
+ &cloud->data[point_offset + cloud->fields[distance_idx].offset],
+ sizeof(float));
+
+ if (limit_negative) {
+ // Use a threshold for cutting out points which inside the interval
+ if ((distance_value < max_distance) && (distance_value > min_distance)) {
+ continue;
+ }
+ }
+ else {
+ // Use a threshold for cutting out points which are too close/far away
+ if ((distance_value > max_distance) || (distance_value < min_distance)) {
+ continue;
+ }
+ }
+
+ // Unoptimized memcpys: assume fields x, y, z are in random order
+ memcpy(&pt[0],
+ &cloud->data[xyz_offset[0] + index * cloud->point_step],
+ sizeof(float));
+ memcpy(&pt[1],
+ &cloud->data[xyz_offset[1] + index * cloud->point_step],
+ sizeof(float));
+ memcpy(&pt[2],
+ &cloud->data[xyz_offset[2] + index * cloud->point_step],
+ sizeof(float));
+ min_p = (min_p.min)(pt);
+ max_p = (max_p.max)(pt);
+ }
+ }
+ else
+ {
+ for (const auto& index : indices)
+ {
+ std::size_t point_offset = index * cloud->point_step;
+
+ // Get the distance value
+ memcpy(&distance_value,&cloud->data[point_offset + cloud->fields[distance_idx].offset],sizeof(float));
+
+ if (limit_negative)
+ {
+ // Use a threshold for cutting out points which inside the interval
+ if ((distance_value < max_distance) && (distance_value > min_distance))
+ {
+ continue;
+ }
+ }
+ else
+ {
+ // Use a threshold for cutting out points which are too close/far away
+ if ((distance_value > max_distance) || (distance_value < min_distance))
+ {
+ continue;
+ }
+ }
+
+ // Unoptimized memcpys: assume fields x, y, z are in random order
+ memcpy(&pt[0],&cloud->data[xyz_offset[0] + index * cloud->point_step],sizeof(float));
+ memcpy(&pt[1],&cloud->data[xyz_offset[1] + index * cloud->point_step],sizeof(float));
+ memcpy(&pt[2],&cloud->data[xyz_offset[2] + index * cloud->point_step],sizeof(float));
+ // Check if the point is invalid
+ if (!std::isfinite(pt[0])
+ || !std::isfinite(pt[1])
+ || !std::isfinite(pt[2]))
+ {
+ continue;
+ }
+ min_p = (min_p.min)(pt);
+ max_p = (max_p.max)(pt);
+ }
+ }
+ min_pt = min_p;
+ max_pt = max_p;
+}
+
+
///////////////////////////////////////////////////////////////////////////////////////////
void
pcl::VoxelGrid<pcl::PCLPointCloud2>::applyFilter (PCLPointCloud2 &output)
output.data.clear ();
return;
}
- std::size_t nr_points = input_->width * input_->height;
// Copy the header (and thus the frame_id) + allocate enough space for points
output.height = 1; // downsampling breaks the organized structure
Eigen::Vector4f min_p, max_p;
// Get the minimum and maximum dimensions
if (!filter_field_name_.empty ()) // If we don't want to process the entire cloud...
- getMinMax3D (input_, x_idx_, y_idx_, z_idx_, filter_field_name_,
- static_cast<float> (filter_limit_min_),
+ getMinMax3D (input_, *indices_, x_idx_, y_idx_, z_idx_, filter_field_name_,
+ static_cast<float> (filter_limit_min_),
static_cast<float> (filter_limit_max_), min_p, max_p, filter_limit_negative_);
else
- getMinMax3D (input_, x_idx_, y_idx_, z_idx_, min_p, max_p);
+ getMinMax3D (input_, *indices_, x_idx_, y_idx_, z_idx_, min_p, max_p);
// Check that the leaf size is not too small, given the size of the data
std::int64_t dx = static_cast<std::int64_t>((max_p[0] - min_p[0]) * inverse_leaf_size_[0])+1;
div_b_ = max_b_ - min_b_ + Eigen::Vector4i::Ones ();
div_b_[3] = 0;
- std::vector<cloud_point_index_idx> index_vector;
- index_vector.reserve (nr_points);
+ std::vector<internal::cloud_point_index_idx> index_vector;
+ index_vector.reserve (indices_->size());
// Create the first xyz_offset, and set up the division multiplier
Array4size_t xyz_offset (input_->fields[x_idx_].offset,
if (downsample_all_data_)
{
centroid_size = static_cast<int> (input_->fields.size ());
-
- // ---[ RGB special case
+
+ // ---[ RGB special case
// if the data contains "rgba" or "rgb", add an extra field for r/g/b in centroid
for (int d = 0; d < centroid_size; ++d)
{
}
}
}
-
+
// If we don't want to process the entire cloud, but rather filter points far away from the viewpoint first...
if (!filter_field_name_.empty ())
{
// with calculated idx. Points with the same idx value will contribute to the
// same point of resulting CloudPoint
float distance_value = 0;
- for (std::size_t cp = 0; cp < nr_points; ++cp)
+ for (const auto &index : *indices_)
{
- std::size_t point_offset = cp * input_->point_step;
+ std::size_t point_offset = index * input_->point_step;
// Get the distance value
memcpy (&distance_value, &input_->data[point_offset + input_->fields[distance_idx].offset], sizeof (float));
// Use a threshold for cutting out points which inside the interval
if (distance_value < filter_limit_max_ && distance_value > filter_limit_min_)
{
- xyz_offset += input_->point_step;
continue;
}
}
// Use a threshold for cutting out points which are too close/far away
if (distance_value > filter_limit_max_ || distance_value < filter_limit_min_)
{
- xyz_offset += input_->point_step;
continue;
}
}
// Unoptimized memcpys: assume fields x, y, z are in random order
- memcpy (&pt[0], &input_->data[xyz_offset[0]], sizeof (float));
- memcpy (&pt[1], &input_->data[xyz_offset[1]], sizeof (float));
- memcpy (&pt[2], &input_->data[xyz_offset[2]], sizeof (float));
+ memcpy (&pt[0], &input_->data[xyz_offset[0] + index * input_->point_step], sizeof (float));
+ memcpy (&pt[1], &input_->data[xyz_offset[1] + index * input_->point_step], sizeof (float));
+ memcpy (&pt[2], &input_->data[xyz_offset[2] + index * input_->point_step], sizeof (float));
// Check if the point is invalid
- if (!std::isfinite (pt[0]) ||
- !std::isfinite (pt[1]) ||
+ if (!std::isfinite (pt[0]) ||
+ !std::isfinite (pt[1]) ||
!std::isfinite (pt[2]))
{
- xyz_offset += input_->point_step;
continue;
}
int ijk2 = static_cast<int> (std::floor (pt[2] * inverse_leaf_size_[2]) - min_b_[2]);
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
- index_vector.emplace_back(idx, static_cast<unsigned int> (cp));
+ index_vector.emplace_back(idx, static_cast<unsigned int> (index));
- xyz_offset += input_->point_step;
}
}
// No distance filtering, process all data
else
{
// First pass: go over all points and insert them into the right leaf
- for (std::size_t cp = 0; cp < nr_points; ++cp)
+ for (const auto &index : *indices_)
{
// Unoptimized memcpys: assume fields x, y, z are in random order
- memcpy (&pt[0], &input_->data[xyz_offset[0]], sizeof (float));
- memcpy (&pt[1], &input_->data[xyz_offset[1]], sizeof (float));
- memcpy (&pt[2], &input_->data[xyz_offset[2]], sizeof (float));
+ memcpy (&pt[0], &input_->data[xyz_offset[0] + index * input_->point_step], sizeof (float));
+ memcpy (&pt[1], &input_->data[xyz_offset[1] + index * input_->point_step], sizeof (float));
+ memcpy (&pt[2], &input_->data[xyz_offset[2] + index * input_->point_step], sizeof (float));
// Check if the point is invalid
- if (!std::isfinite (pt[0]) ||
- !std::isfinite (pt[1]) ||
+ if (!std::isfinite (pt[0]) ||
+ !std::isfinite (pt[1]) ||
!std::isfinite (pt[2]))
{
- xyz_offset += input_->point_step;
continue;
}
int ijk2 = static_cast<int> (std::floor (pt[2] * inverse_leaf_size_[2]) - min_b_[2]);
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
- index_vector.emplace_back(idx, static_cast<unsigned int> (cp));
- xyz_offset += input_->point_step;
+ index_vector.emplace_back(idx, static_cast<unsigned int> (index));
}
}
// Second pass: sort the index_vector vector using value representing target cell as index
// in effect all points belonging to the same output cell will be next to each other
- auto rightshift_func = [](const cloud_point_index_idx &x, const unsigned offset) { return x.idx >> offset; };
+ auto rightshift_func = [](const internal::cloud_point_index_idx &x, const unsigned offset) { return x.idx >> offset; };
boost::sort::spreadsort::integer_sort(index_vector.begin(), index_vector.end(), rightshift_func);
// Third pass: count output cells
std::vector<std::pair<index_t, index_t> > first_and_last_indices_vector;
// Worst case size
first_and_last_indices_vector.reserve (index_vector.size ());
- while (index < index_vector.size ())
+ while (index < index_vector.size ())
{
std::size_t i = index + 1;
- while (i < index_vector.size () && index_vector[i].idx == index_vector[index].idx)
+ while (i < index_vector.size () && index_vector[i].idx == index_vector[index].idx)
++i;
if ((i - index) >= min_points_per_voxel_)
{
output.row_step = output.point_step * output.width;
output.data.resize (output.width * output.point_step);
- if (save_leaf_layout_)
+ if (save_leaf_layout_)
{
try
{
for (std::uint32_t i = 0; i < reinit_size; i++)
{
leaf_layout_[i] = -1;
- }
- leaf_layout_.resize (new_layout_size, -1);
+ }
+ leaf_layout_.resize (new_layout_size, -1);
}
catch (std::bad_alloc&)
{
- throw PCLException("VoxelGrid bin size is too low; impossible to allocate memory for layout",
- "voxel_grid.cpp", "applyFilter");
+ throw PCLException("VoxelGrid bin size is too low; impossible to allocate memory for layout",
+ "voxel_grid.cpp", "applyFilter");
}
catch (std::length_error&)
{
- throw PCLException("VoxelGrid bin size is too low; impossible to allocate memory for layout",
- "voxel_grid.cpp", "applyFilter");
+ throw PCLException("VoxelGrid bin size is too low; impossible to allocate memory for layout",
+ "voxel_grid.cpp", "applyFilter");
}
}
-
+
// If we downsample each field, the {x,y,z}_idx_ offsets should correspond in input_ and output
if (downsample_all_data_)
xyz_offset = Array4size_t (output.fields[x_idx_].offset,
centroid += temporary;
}
centroid /= static_cast<float> (last_index - first_index);
-
+
std::size_t point_offset = index * output.point_step;
// Copy all the fields
for (std::size_t d = 0; d < output.fields.size (); ++d)
// ---[ RGB special case
// full extra r/g/b centroid field
- if (rgba_index >= 0)
+ if (rgba_index >= 0)
{
float r = centroid[centroid_size-4], g = centroid[centroid_size-3], b = centroid[centroid_size-2], a = centroid[centroid_size-1];
int rgb = (static_cast<int> (a) << 24) | (static_cast<int> (r) << 16) | (static_cast<int> (g) << 8) | static_cast<int> (b);
memcpy (&output.data[point_offset + output.fields[rgba_index].offset], &rgb, sizeof (float));
}
}
-
+
++index;
}
}
*/
#include <map>
+#include <pcl/common/common.h> // for getMinMax3D
#include <pcl/filters/voxel_grid_label.h>
-#include <pcl/filters/impl/voxel_grid.hpp>
+#include <boost/mpl/size.hpp> // for boost::mpl::size
//////////////////////////////////////////////////////////////////////////////
void
int label_index = -1;
label_index = pcl::getFieldIndex<PointXYZRGBL> ("label", fields);
- std::vector<cloud_point_index_idx> index_vector;
+ std::vector<internal::cloud_point_index_idx> index_vector;
index_vector.reserve(input_->size());
// If we don't want to process the entire cloud, but rather filter points far away from the viewpoint first...
set(SUBSYS_DESC "Point cloud geometry library")
set(SUBSYS_DEPS common)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
set(incs
"include/pcl/${SUBSYS_NAME}/boost.h"
- "include/pcl/${SUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/get_boundary.h"
"include/pcl/${SUBSYS_NAME}/line_iterator.h"
"include/pcl/${SUBSYS_NAME}/mesh_base.h"
"include/pcl/${SUBSYS_NAME}/impl/polygon_operations.hpp"
)
-
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
-add_library(${LIB_NAME} INTERFACE)
-target_include_directories(${LIB_NAME} INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/include")
+PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} INCLUDES ${incs} ${impl_incs})
target_link_libraries(${LIB_NAME} INTERFACE Boost::boost pcl_common)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} HEADER_ONLY)
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2009-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-
-#include <Eigen/Core>
-#include <Eigen/StdVector>
HalfEdgeIndex& /*idx_free_half_edge*/,
std::true_type /*is_manifold*/) const
{
- return !(is_new_ab && is_new_bc && !is_isolated_b);
+ return (!is_new_ab || !is_new_bc || is_isolated_b);
}
/** \brief Check if the half-edge bc is the next half-edge of ab.
typename IndexContainerT::value_type());
Index ind_old(0), ind_new(0);
- typename ElementContainerT::const_iterator it_e_old = elements.begin();
+ auto it_e_old = elements.cbegin();
auto it_e_new = elements.begin();
- typename DataContainerT::const_iterator it_d_old = data_cloud.begin();
+ auto it_d_old = data_cloud.cbegin();
auto it_d_new = data_cloud.begin();
auto it_ind_new = new_indices.begin();
- typename IndexContainerT::const_iterator it_ind_new_end = new_indices.end();
+ auto it_ind_new_end = new_indices.cend();
while (it_ind_new != it_ind_new_end) {
if (!this->isDeleted(ind_old)) {
set(SUBSYS_DESC "Point cloud GPU libraries")
set(SUBSYS_DEPS)
-option(BUILD_GPU "Build the GPU-related subsystems" ${DEFAULT})
+option(BUILD_GPU "Build the GPU-related subsystems" OFF)
if(NOT (BUILD_GPU AND CUDA_FOUND))
return()
set(SUBSYS_NAME gpu_containers)
set(SUBSYS_PATH gpu/containers)
set(SUBSYS_DESC "Containers with reference counting for GPU memory. This module can be compiled without Cuda Toolkit.")
-set(SUBSYS_DEPS common)
+set(SUBSYS_DEPS common gpu_utils)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
return()
endif()
-file(GLOB srcs src/*.cpp src/*.hpp)
+file(GLOB srcs src/*.cpp src/*.cu src/*.hpp)
file(GLOB incs include/pcl/gpu/containers/*.h)
file(GLOB incs_impl include/pcl/gpu/containers/impl/*.hpp)
source_group("Header Files\\impl" FILES ${incs_impl})
list(APPEND incs ${incs_impl})
-get_filename_component(UTILS_INC "${CMAKE_CURRENT_SOURCE_DIR}/../utils/include" REALPATH)
-
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" ${UTILS_INC})
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
-target_link_libraries(${LIB_NAME} pcl_common)
+target_link_libraries(${LIB_NAME} pcl_common pcl_gpu_utils)
#Ensures that CUDA is added and the project can compile as it uses cuda calls.
set_source_files_properties(src/device_memory.cpp PROPERTIES LANGUAGE CUDA)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#ifndef __PCL_GPU_UTILS_REPACKS_HPP__
+#define __PCL_GPU_UTILS_REPACKS_HPP__
+
+#include <pcl/gpu/containers/device_array.h>
+#include <pcl/pcl_macros.h>
+#include <pcl/point_types.h>
+
+namespace pcl {
+namespace gpu {
+///////////////////////////////////////
+/// This is an experimental code ///
+///////////////////////////////////////
+
+const int NoCP = 0xFFFFFFFF;
+
+/** \brief Returns field copy operation code. */
+inline int
+cp(int from, int to)
+{
+ return ((to & 0xF) << 4) + (from & 0xF);
+}
+
+/* Combines several field copy operations to one int (called rule) */
+inline int
+rule(int cp1, int cp2 = NoCP, int cp3 = NoCP, int cp4 = NoCP)
+{
+ return (cp1 & 0xFF) + ((cp2 & 0xFF) << 8) + ((cp3 & 0xFF) << 16) +
+ ((cp4 & 0xFF) << 24);
+}
+
+/* Combines performs all field copy operations in given rule array (can be 0, 1, or 16
+ * copies) */
+void
+copyFieldsImpl(
+ int in_size, int out_size, int rules[4], int size, const void* input, void* output);
+
+template <typename PointIn, typename PointOut>
+void
+copyFieldsEx(const DeviceArray<PointIn>& src,
+ DeviceArray<PointOut>& dst,
+ int rule1,
+ int rule2 = NoCP,
+ int rule3 = NoCP,
+ int rule4 = NoCP)
+{
+ int rules[4] = {rule1, rule2, rule3, rule4};
+ dst.create(src.size());
+ copyFieldsImpl(sizeof(PointIn) / sizeof(int),
+ sizeof(PointOut) / sizeof(int),
+ rules,
+ (int)src.size(),
+ src.ptr(),
+ dst.ptr());
+}
+
+void
+copyFields(const DeviceArray<PointXYZ>& src, DeviceArray<PointNormal>& dst)
+{
+ // PointXYZ.x (0) -> PointNormal.x (0)
+ // PointXYZ.y (1) -> PointNormal.y (1)
+ // PointXYZ.z (2) -> PointNormal.z (2)
+ copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
+};
+
+void
+copyFields(const DeviceArray<Normal>& src, DeviceArray<PointNormal>& dst)
+{
+ // PointXYZ.normal_x (0) -> PointNormal.normal_x (4)
+ // PointXYZ.normal_y (1) -> PointNormal.normal_y (5)
+ // PointXYZ.normal_z (2) -> PointNormal.normal_z (6)
+ // PointXYZ.curvature (4) -> PointNormal.curvature (8)
+ copyFieldsEx(src, dst, rule(cp(0, 4), cp(1, 5), cp(2, 6), cp(4, 8)));
+};
+
+void
+copyFields(const DeviceArray<PointXYZRGBL>& src, DeviceArray<PointXYZ>& dst)
+{
+ // PointXYZRGBL.x (0) -> PointXYZ.x (0)
+ // PointXYZRGBL.y (1) -> PointXYZ.y (1)
+ // PointXYZRGBL.z (2) -> PointXYZ.z (2)
+ copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
+};
+
+void
+copyFields(const DeviceArray<PointXYZRGB>& src, DeviceArray<PointXYZ>& dst)
+{
+ // PointXYZRGB.x (0) -> PointXYZ.x (0)
+ // PointXYZRGB.y (1) -> PointXYZ.y (1)
+ // PointXYZRGB.z (2) -> PointXYZ.z (2)
+ copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
+};
+
+void
+copyFields(const DeviceArray<PointXYZRGBA>& src, DeviceArray<PointXYZ>& dst)
+{
+ // PointXYZRGBA.x (0) -> PointXYZ.x (0)
+ // PointXYZRGBA.y (1) -> PointXYZ.y (1)
+ // PointXYZRGBA.z (2) -> PointXYZ.z (2)
+ copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
+};
+
+void
+copyFieldsZ(const DeviceArray<PointXYZ>& src, DeviceArray<float>& dst)
+{
+ // PointXYZRGBL.z (2) -> float (1)
+ copyFieldsEx(src, dst, rule(cp(2, 0)));
+};
+
+void
+copyFieldsZ(const DeviceArray<PointXYZRGB>& src, DeviceArray<float>& dst)
+{
+ // PointXYZRGBL.z (2) -> float (1)
+ copyFieldsEx(src, dst, rule(cp(2, 0)));
+};
+} // namespace gpu
+} // namespace pcl
+
+#endif /* __PCL_GPU_UTILS_REPACKS_HPP__ */
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#ifndef PCL_GPU_UTILS_TEXTURE_BINDER_HPP_
+#define PCL_GPU_UTILS_TEXTURE_BINDER_HPP_
+
+#include <pcl/gpu/containers/device_array.h>
+#include <pcl/gpu/utils/safe_call.hpp>
+
+namespace pcl {
+namespace gpu {
+class TextureBinder {
+public:
+ template <class T, enum cudaTextureReadMode readMode>
+ TextureBinder(const DeviceArray2D<T>& arr, const struct texture<T, 2, readMode>& tex)
+ : texref(&tex)
+ {
+ cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
+ cudaSafeCall(
+ cudaBindTexture2D(0, tex, arr.ptr(), desc, arr.cols(), arr.rows(), arr.step()));
+ }
+
+ template <class T, enum cudaTextureReadMode readMode>
+ TextureBinder(const DeviceArray<T>& arr, const struct texture<T, 1, readMode>& tex)
+ : texref(&tex)
+ {
+ cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
+ cudaSafeCall(cudaBindTexture(0, tex, arr.ptr(), desc, arr.sizeBytes()));
+ }
+
+ template <class T, enum cudaTextureReadMode readMode>
+ TextureBinder(const PtrStepSz<T>& arr, const struct texture<T, 2, readMode>& tex)
+ : texref(&tex)
+ {
+ cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
+ cudaSafeCall(
+ cudaBindTexture2D(0, tex, arr.data, desc, arr.cols, arr.rows, arr.step));
+ }
+
+ template <class T, enum cudaTextureReadMode readMode>
+ TextureBinder(const PtrSz<T>& arr, const struct texture<T, 1, readMode>& tex)
+ : texref(&tex)
+ {
+ cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
+ cudaSafeCall(cudaBindTexture(0, tex, arr.data, desc, arr.size * arr.elemSize()));
+ }
+
+ ~TextureBinder() { cudaSafeCall(cudaUnbindTexture(texref)); }
+
+private:
+ const struct textureReference* texref;
+};
+} // namespace gpu
+
+namespace device {
+using pcl::gpu::TextureBinder;
+}
+} // namespace pcl
+
+#endif /* PCL_GPU_UTILS_TEXTURE_BINDER_HPP_*/
\ No newline at end of file
#include <array> // replace c-style array with std::array
#include <cstdio>
+#if !defined(HAVE_CUDA)
#define HAVE_CUDA
+#endif
//#include <pcl_config.h>
#if !defined(HAVE_CUDA)
--- /dev/null
+#include <pcl/gpu/utils/safe_call.hpp>
+#include <pcl/pcl_exports.h>
+
+#include <algorithm>
+
+namespace pcl
+{
+ namespace device
+ {
+ struct Info
+ {
+ enum { SIZE = 4 };
+ int data[SIZE];
+ };
+
+ template<int n>
+ struct Point
+ {
+ int data[n];
+ };
+
+ template<int in, int out, typename Info>
+ __global__ void deviceCopyFields4B(const Info info, const int size, const void* input, void* output)
+ {
+ int idx = blockIdx.x * blockDim.x + threadIdx.x;
+
+ if (idx < size)
+ {
+ Point<in> point_in = reinterpret_cast<const Point<in>* >( input)[idx];
+ Point<out> point_out = reinterpret_cast< Point<out>* >(output)[idx];
+
+ for(int i = 0; i < Info::SIZE; ++i)
+ {
+ int byte;
+ int code = info.data[i];
+
+ byte = ((code >> 0) & 0xFF);
+
+ if (byte == 0xFF)
+ break;
+ else
+ point_out.data[byte >> 4] = point_in.data[byte & 0xF];
+
+ byte = ((code >> 8) & 0xFF);
+
+ if (byte == 0xFF)
+ break;
+ else
+ point_out.data[byte >> 4] = point_in.data[byte & 0xF];
+
+ byte = ((code >> 16) & 0xFF);
+
+ if (byte == 0xFF)
+ break;
+ else
+ point_out.data[byte >> 4] = point_in.data[byte & 0xF];
+
+ byte = ((code >> 24) & 0xFF);
+
+ if (byte == 0xFF)
+ break;
+ else
+ point_out.data[byte >> 4] = point_in.data[byte & 0xF];
+ }
+
+ reinterpret_cast< Point<out>* >(output)[idx] = point_out;
+ }
+ };
+
+ template<int in_size, int out_size>
+ void cf(int info[4], int size, const void* input, void* output)
+ {
+ Info i;
+ std::copy(info, info + 4, i.data);
+
+ dim3 block(256);
+ dim3 grid(divUp(size, block.x));
+
+ deviceCopyFields4B<in_size, out_size><<<grid, block>>>(i, size, input, output);
+ cudaSafeCall ( cudaGetLastError () );
+ cudaSafeCall (cudaDeviceSynchronize ());
+ }
+
+ using copy_fields_t = void (*)(int info[4], int size, const void* input, void* output);
+ }
+}
+
+namespace pcl
+{
+ namespace gpu
+ {
+ using namespace pcl::device;
+
+ PCL_EXPORTS void copyFieldsImpl(int in_size, int out_size, int rules[4], int size, const void* input, void* output)
+ {
+ pcl::device::copy_fields_t funcs[16][16] =
+ {
+ { /**/ cf<1,1>, cf<1, 2>, cf<1, 3>, cf<1, 4>, /**/ cf<1, 5>, cf<1, 6>, cf<1, 7>, cf<1, 8>, /**/ cf<1, 9>, cf<1,10>, cf<1, 11>, cf<1, 12>, /**/ cf<1, 13>, cf<1, 14>, cf<1, 15>, cf<1,16> },
+ { /**/ cf<2,1>, cf<2, 2>, cf<2, 3>, cf<2, 4>, /**/ cf<2, 5>, cf<2, 6>, cf<2, 7>, cf<2, 8>, /**/ cf<2, 9>, cf<1,10>, cf<2, 11>, cf<2, 12>, /**/ cf<2, 13>, cf<2, 14>, cf<2, 15>, cf<2,16> },
+ { /**/ cf<3,1>, cf<3, 2>, cf<3, 3>, cf<3, 4>, /**/ cf<3, 5>, cf<3, 6>, cf<3, 7>, cf<3, 8>, /**/ cf<3, 9>, cf<1,10>, cf<3, 11>, cf<3, 12>, /**/ cf<3, 13>, cf<3, 14>, cf<3, 15>, cf<3,16> },
+ { /**/ cf<4,1>, cf<4, 2>, cf<4, 3>, cf<4, 4>, /**/ cf<4, 5>, cf<4, 6>, cf<4, 7>, cf<4, 8>, /**/ cf<4, 9>, cf<1,10>, cf<4, 11>, cf<4, 12>, /**/ cf<4, 13>, cf<4, 14>, cf<4, 15>, cf<4,16> },
+ { /**/ cf<5,1>, cf<5, 2>, cf<5, 3>, cf<5, 4>, /**/ cf<5, 5>, cf<5, 6>, cf<5, 7>, cf<5, 8>, /**/ cf<5, 9>, cf<1,10>, cf<5, 11>, cf<5, 12>, /**/ cf<5, 13>, cf<5, 14>, cf<5, 15>, cf<5,16> },
+ { /**/ cf<6,1>, cf<6, 2>, cf<6, 3>, cf<6, 4>, /**/ cf<6, 5>, cf<6, 6>, cf<6, 7>, cf<6, 8>, /**/ cf<6, 9>, cf<1,10>, cf<6, 11>, cf<6, 12>, /**/ cf<6, 13>, cf<6, 14>, cf<6, 15>, cf<6,16> },
+ { /**/ cf<7,1>, cf<7, 2>, cf<7, 3>, cf<7, 4>, /**/ cf<7, 5>, cf<7, 6>, cf<7, 7>, cf<7, 8>, /**/ cf<7, 9>, cf<1,10>, cf<7, 11>, cf<7, 12>, /**/ cf<7, 13>, cf<7, 14>, cf<7, 15>, cf<7,16> },
+ { /**/ cf<8,1>, cf<8, 2>, cf<8, 3>, cf<8, 4>, /**/ cf<8, 5>, cf<8, 6>, cf<8, 7>, cf<8, 8>, /**/ cf<8, 9>, cf<1,10>, cf<8, 11>, cf<8, 12>, /**/ cf<8, 13>, cf<8, 14>, cf<8, 15>, cf<8,16> },
+ { /**/ cf<9,1>, cf<9, 2>, cf<9, 3>, cf<9, 4>, /**/ cf<9, 5>, cf<9, 6>, cf<9, 7>, cf<9, 8>, /**/ cf<9, 9>, cf<1,10>, cf<9, 11>, cf<9, 12>, /**/ cf<9, 13>, cf<9, 14>, cf<9, 15>, cf<9,16> },
+ { /**/ cf<10,1>, cf<10,2>, cf<10,3>, cf<10,4>, /**/ cf<10,5>, cf<10,6>, cf<10,7>, cf<10,8>, /**/ cf<10,9>, cf<1,10>, cf<10,11>, cf<10,12>, /**/ cf<10,13>, cf<10,14>, cf<10,15>, cf<10,16> },
+ { /**/ cf<11,1>, cf<11,2>, cf<11,3>, cf<11,4>, /**/ cf<11,5>, cf<11,6>, cf<11,7>, cf<11,8>, /**/ cf<11,9>, cf<1,10>, cf<11,11>, cf<11,12>, /**/ cf<11,13>, cf<11,14>, cf<11,15>, cf<11,16> },
+ { /**/ cf<12,1>, cf<12,2>, cf<12,3>, cf<12,4>, /**/ cf<12,5>, cf<12,6>, cf<12,7>, cf<12,8>, /**/ cf<12,9>, cf<1,10>, cf<12,11>, cf<12,12>, /**/ cf<12,13>, cf<12,14>, cf<12,15>, cf<12,16> },
+ { /**/ cf<13,1>, cf<13,2>, cf<13,3>, cf<13,4>, /**/ cf<13,5>, cf<13,6>, cf<13,7>, cf<13,8>, /**/ cf<13,9>, cf<1,10>, cf<13,11>, cf<13,12>, /**/ cf<13,13>, cf<13,14>, cf<13,15>, cf<13,16> },
+ { /**/ cf<14,1>, cf<14,2>, cf<14,3>, cf<14,4>, /**/ cf<14,5>, cf<14,6>, cf<14,7>, cf<14,8>, /**/ cf<14,9>, cf<1,10>, cf<14,11>, cf<14,12>, /**/ cf<14,13>, cf<14,14>, cf<14,15>, cf<14,16> },
+ { /**/ cf<15,1>, cf<15,2>, cf<15,3>, cf<15,4>, /**/ cf<15,5>, cf<15,6>, cf<15,7>, cf<15,8>, /**/ cf<15,9>, cf<1,10>, cf<15,11>, cf<15,12>, /**/ cf<15,13>, cf<15,14>, cf<15,15>, cf<15,16> },
+ { /**/ cf<16,1>, cf<16,2>, cf<16,3>, cf<16,4>, /**/ cf<16,5>, cf<16,6>, cf<16,7>, cf<16,8>, /**/ cf<16,9>, cf<1,10>, cf<16,11>, cf<16,12>, /**/ cf<16,13>, cf<16,14>, cf<16,15>, cf<16,16> }
+ };
+
+ funcs[in_size-1][out_size-1](rules, size, input, output);
+ }
+ }
+}
+
set(SUBSYS_DESC "Temporary GPU_common module. Weak CUDA dependency: a code that use this module requires CUDA Toolkit installed, but should be compiled without nvcc")
set(SUBSYS_DEPS common gpu_containers gpu_utils gpu_octree geometry)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
source_group("Source Files\\utils" FILES ${srcs_utils})
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/src")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${cuda} ${srcs_utils} ${dev_incs})
target_link_libraries("${LIB_NAME}" pcl_gpu_utils pcl_gpu_containers pcl_gpu_octree)
#include "internal.hpp"
+#include <thrust/tuple.h>
#include <thrust/device_ptr.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/permutation_iterator.h>
struct PlusFloat3
{
- __device__ __forceinline__ float3 operator()(const float3& e1, const float3& e2) const
- {
- return make_float3(e1.x + e2.x, e1.y + e2.y, e1.z + e2.z);
+ __device__ __forceinline__ float3 operator()(const float3& e1, const float3& e2) const
+ {
+ return make_float3(e1.x + e2.x, e1.y + e2.y, e1.z + e2.z);
}
};
-
+
struct TupleDistCvt
{
float3 pivot_;
TupleDistCvt(const float3& pivot) : pivot_(pivot) {}
- __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<float4, int>& t) const
- {
- float4 point = t.get<0>();
+ __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<float4, int>& t) const
+ {
+ float4 point = thrust::get<0>(t);
float dx = pivot_.x - point.x;
float dy = pivot_.y - point.y;
float dz = pivot_.z - point.z;
float dist = sqrt(dx*dx + dy*dy + dz*dz);
- return thrust::tuple<float, int>(dist, t.get<1>());
+ return thrust::tuple<float, int>(dist, thrust::get<1>(t));
}
};
{
thrust::device_ptr<PointT> src_beg((PointT*)cloud.ptr());
thrust::device_ptr<PointT> src_end = src_beg + cloud.size();
-
+
centroid = transform_reduce(src_beg, src_beg, PointT2float3<PointT>(), make_float3(0.f, 0.f, 0.f), PlusFloat3());
centroid *= 1.f/cloud.size();
}
compute3DCentroid(cloud, centroid);
else
{
- thrust::device_ptr<PointT> src_beg((PointT*)cloud.ptr());
+ thrust::device_ptr<PointT> src_beg((PointT*)cloud.ptr());
thrust::device_ptr<int> map_beg((int*)indices.ptr());
thrust::device_ptr<int> map_end = map_beg + indices.size();
centroid = transform_reduce(make_permutation_iterator(src_beg, map_beg),
- make_permutation_iterator(src_beg, map_end),
+ make_permutation_iterator(src_beg, map_end),
PointT2float3<PointT>(), make_float3(0.f, 0.f, 0.f), PlusFloat3());
centroid *= 1.f/indices.size();
template<typename PointT>
float3 pcl::device::getMaxDistance(const DeviceArray<PointT>& cloud, const float3& pivot)
-{
+{
thrust::device_ptr<PointT> src_beg((PointT*)cloud.ptr());
thrust::device_ptr<PointT> src_end = src_beg + cloud.size();
thrust::tuple<float, int> init(0.f, 0);
thrust::maximum<thrust::tuple<float, int>> op;
-
+
thrust::tuple<float, int> res =
transform_reduce(
make_zip_iterator(make_tuple( src_beg, cf )),
make_zip_iterator(make_tuple( src_beg, ce )),
TupleDistCvt(pivot), init, op);
- float4 point = src_beg[res.get<1>()];
+ float4 point = src_beg[thrust::get<1>(res)];
return make_float3(point.x, point.y, point.z);
}
template<typename PointT>
float3 pcl::device::getMaxDistance(const DeviceArray<PointT>& cloud, const Indices& indices, const float3& pivot)
-{
+{
if (indices.empty())
return getMaxDistance(cloud, pivot);
- thrust::device_ptr<PointT> src_beg((PointT*)cloud.ptr());
+ thrust::device_ptr<PointT> src_beg((PointT*)cloud.ptr());
thrust::device_ptr<int> map_beg((int*)indices.ptr());
thrust::device_ptr<int> map_end = map_beg + indices.size();
thrust::tuple<float, int> init(0.f, 0);
thrust::maximum<thrust::tuple<float, int>> op;
-
+
thrust::tuple<float, int> res = transform_reduce(
make_zip_iterator(make_tuple( make_permutation_iterator(src_beg, map_beg), cf )),
make_zip_iterator(make_tuple( make_permutation_iterator(src_beg, map_end), ce )),
TupleDistCvt(pivot), init, op);
- float4 point = src_beg[map_beg[res.get<1>()]];
+ float4 point = src_beg[map_beg[thrust::get<1>(res)]];
return make_float3(point.x, point.y, point.z);
}
template<int bins>
__device__ __forceinline__ void normalizeFeature(volatile float *feature, volatile float *buffer, int lane) const
{
- //nomalize buns
+ //normalize buns
float value = (lane < bins) ? feature[lane] : 0.f;
float sum = Warp::reduce(buffer, value, plus<volatile float>());
CTA_SIZE = 256,
WAPRS = CTA_SIZE / Warp::WARP_SIZE,
- MIN_NEIGHBOORS = 1
+ // if there are fewer than 3 neighbors, the normal is definitely garbage
+ MIN_NEIGHBOORS = 3
};
struct plus
{
constexpr float NaN = std::numeric_limits<float>::quiet_NaN();
normals.data[idx] = make_float4(NaN, NaN, NaN, NaN);
+ return;
}
const int *ibeg = indices.ptr(idx);
return()
endif()
-include_directories(
- "${PCL_SOURCE_DIR}/test/gtest-1.6.0/include"
- "${PCL_SOURCE_DIR}/test/gtest-1.6.0"
-)
-
-set(pcl_gtest_sources "${PCL_SOURCE_DIR}/test/gtest-1.6.0/src/gtest-all.cc")
-
set(the_test_target test_gpu_features)
-get_filename_component(DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
-get_filename_component(INC1 "${DIR}/../../../io/include" REALPATH)
-get_filename_component(INC2 "${DIR}/../../../features/include" REALPATH)
-get_filename_component(INC3 "${DIR}/../../../kdtree/include" REALPATH)
-get_filename_component(INC4 "${DIR}/../../../visualization/include" REALPATH)
-get_filename_component(INC5 "${DIR}/../../../common/include" REALPATH)
-get_filename_component(INC6 "${DIR}/../../../search/include" REALPATH)
-get_filename_component(INC7 "${DIR}/../../../octree/include" REALPATH)
-
-
file(GLOB test_src *.cpp *.hpp)
list(APPEND test_src ${pcl_gtest_sources})
-include_directories("${INC1}" "${INC2}" "${INC3}" "${INC4}" "${INC5}" "${INC6}" "${INC7}")
-include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
+
set(SUBSYS_NAME gpu_kinfu)
set(SUBSYS_PATH gpu/kinfu)
set(SUBSYS_DESC "Kinect Fusion implementation")
-set(SUBSYS_DEPS common io gpu_containers geometry search)
+set(SUBSYS_DEPS common io gpu_utils gpu_containers geometry search)
if(${CUDA_VERSION_STRING} VERSION_GREATER_EQUAL "12.0")
set(DEFAULT FALSE)
set(REASON "Kinfu uses textures which was removed in CUDA 12")
source_group("Source Files" FILES ${srcs})
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/src")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${cuda})
-target_link_libraries(${LIB_NAME} pcl_cuda pcl_gpu_containers)
+target_link_libraries(${LIB_NAME} pcl_gpu_utils pcl_gpu_containers)
+target_include_directories(${LIB_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
target_compile_options(${LIB_NAME} PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:--ftz=true;--prec-div=false;--prec-sqrt=false>)
set(SUBSUBSYS_DESC "Kinfu tools")
set(SUBSUBSYS_DEPS gpu_kinfu visualization)
set(SUBSUBSYS_OPT_DEPS opencv)
-set(EXT_DEPS openni)
+set(EXT_DEPS glew openni)
set(DEFAULT TRUE)
set(REASON "")
return()
endif()
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
file(GLOB hdrs "*.h*")
-include_directories(SYSTEM ${OPENNI_INCLUDE_DIRS})
## KINECT FUSION
set(the_target pcl_kinfu_app)
#include <iostream>
#include <vector>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
-#include <boost/filesystem.hpp>
-
#include <pcl/gpu/kinfu/kinfu.h>
#include <pcl/gpu/kinfu/raycaster.h>
#include <pcl/gpu/kinfu/marching_cubes.h>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::vector<std::string> getPcdFilesInDir(const std::string& directory)
{
- namespace fs = boost::filesystem;
- fs::path dir(directory);
+ pcl_fs::path dir(directory);
std::cout << "path: " << directory << std::endl;
- if (directory.empty() || !fs::exists(dir) || !fs::is_directory(dir))
+ if (directory.empty() || !pcl_fs::exists(dir) || !pcl_fs::is_directory(dir))
PCL_THROW_EXCEPTION (pcl::IOException, "No valid PCD directory given!\n");
std::vector<std::string> result;
- fs::directory_iterator pos(dir);
- fs::directory_iterator end;
+ pcl_fs::directory_iterator pos(dir);
+ pcl_fs::directory_iterator end;
for(; pos != end ; ++pos)
- if (fs::is_regular_file(pos->status()) )
+ if (pcl_fs::is_regular_file(pos->status()) )
if (pos->path().extension().string() == ".pcd")
{
result.push_back (pos->path ().string ());
pcl::gpu::printShortCudaDeviceInfo (device);
// if (checkIfPreFermiGPU(device))
-// return std::cout << std::endl << "Kinfu is supported only for Fermi and Kepler arhitectures. It is not even compiled for pre-Fermi by default. Exiting..." << std::endl, 1;
+// return std::cout << std::endl << "Kinfu is supported only for Fermi and Kepler architectures. It is not even compiled for pre-Fermi by default. Exiting..." << std::endl, 1;
std::unique_ptr<pcl::Grabber> capture;
std::cout << "";
std::cout << " For RGBD benchmark (Requires OpenCV):" << std::endl;
std::cout << " -eval <eval_folder> [-match_file <associations_file_in_the_folder>]" << std::endl;
- std::cout << " For Simuation (Requires pcl::simulation):" << std::endl;
+ std::cout << " For Simulation (Requires pcl::simulation):" << std::endl;
std::cout << " -plyfile : path to ply file for simulation testing " << std::endl;
return 0;
set(SUBSYS_PATH gpu/kinfu_large_scale)
set(SUBSYS_DESC "Kinect Fusion implementation, with volume shifting")
set(SUBSYS_DEPS common io gpu_containers gpu_utils geometry search octree filters kdtree features surface)
+set(EXT_DEPS vtk) # Uses saveRgbPNGFile from png_io, which requires VTK to be present
+
if(${CUDA_VERSION_STRING} VERSION_GREATER_EQUAL "12.0")
set(DEFAULT FALSE)
set(REASON "Kinfu_large_scale uses textures which was removed in CUDA 12")
endif()
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
+PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} EXT_DEPS ${EXT_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
mark_as_advanced("BUILD_${SUBSYS_NAME}")
source_group("Source Files" FILES ${srcs})
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/src")
-
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs} ${cuda})
-
-target_link_libraries(${LIB_NAME} pcl_cuda pcl_common pcl_io pcl_gpu_utils pcl_gpu_containers pcl_gpu_octree pcl_octree pcl_filters)
+target_link_libraries(${LIB_NAME} pcl_common pcl_io pcl_gpu_utils pcl_gpu_containers pcl_gpu_octree pcl_octree pcl_filters)
+target_include_directories(${LIB_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
target_compile_options(${LIB_NAME} PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:--ftz=true;--prec-div=false;--prec-sqrt=false>)
-PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} EXT_DEPS ${EXT_DEPS})
+PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} EXT_DEPS "")
# Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}" ${incs})
#include <pcl/gpu/containers/device_array.h>
#include <pcl/gpu/containers/kernel_containers.h>
#include <pcl/gpu/kinfu_large_scale/pixel_rgb.h>
-#include <boost/filesystem.hpp>
-//#include <boost/graph/buffer_concepts.hpp>
#include <pcl/io/png_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
{
ScreenshotManager::ScreenshotManager()
{
- boost::filesystem::path p ("KinFuSnapshots");
- boost::filesystem::create_directory (p);
+ pcl_fs::path p ("KinFuSnapshots");
+ pcl_fs::create_directory (p);
screenshot_counter = 0;
setCameraIntrinsics();
}
set(SUBSUBSYS_DESC "Kinfu large scale tools")
set(SUBSUBSYS_DEPS gpu_kinfu_large_scale visualization)
set(SUBSUBSYS_OPT_DEPS )
-set(EXT_DEPS openni openni2)
+set(EXT_DEPS glew openni openni2)
set(DEFAULT TRUE)
set(REASON "")
return()
endif()
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
file(GLOB hdrs "*.h*")
-include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
## STANDALONE TEXTURE MAPPING
set(the_target pcl_kinfu_largeScale_texture_output)
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
-#include <boost/filesystem.hpp>
-
#include <pcl/gpu/kinfu_large_scale/kinfu.h>
#include <pcl/gpu/kinfu_large_scale/raycaster.h>
#include <pcl/gpu/kinfu_large_scale/marching_cubes.h>
std::vector<std::string> getPcdFilesInDir(const std::string& directory)
{
- namespace fs = boost::filesystem;
- fs::path dir(directory);
+ pcl_fs::path dir(directory);
std::cout << "path: " << directory << std::endl;
- if (directory.empty() || !fs::exists(dir) || !fs::is_directory(dir))
+ if (directory.empty() || !pcl_fs::exists(dir) || !pcl_fs::is_directory(dir))
PCL_THROW_EXCEPTION (pcl::IOException, "No valid PCD directory given!\n");
std::vector<std::string> result;
- fs::directory_iterator pos(dir);
- fs::directory_iterator end;
+ pcl_fs::directory_iterator pos(dir);
+ pcl_fs::directory_iterator end;
for(; pos != end ; ++pos)
- if (fs::is_regular_file(pos->status()) )
+ if (pcl_fs::is_regular_file(pos->status()) )
if (pos->path().extension().string() == ".pcd")
{
result.push_back (pos->path ().string ());
pcl::gpu::printShortCudaDeviceInfo (device);
// if (checkIfPreFermiGPU(device))
- // return std::cout << std::endl << "Kinfu is supported only for Fermi and Kepler arhitectures. It is not even compiled for pre-Fermi by default. Exiting..." << std::endl, 1;
+ // return std::cout << std::endl << "Kinfu is supported only for Fermi and Kepler architectures. It is not even compiled for pre-Fermi by default. Exiting..." << std::endl, 1;
pcl::shared_ptr<pcl::Grabber> capture;
bool triggered_capture = false;
std::cout << "";
std::cout << " For RGBD benchmark (Requires OpenCV):" << std::endl;
std::cout << " -eval <eval_folder> [-match_file <associations_file_in_the_folder>]" << std::endl;
- std::cout << " For Simuation (Requires pcl::simulation):" << std::endl;
+ std::cout << " For Simulation (Requires pcl::simulation):" << std::endl;
std::cout << " -plyfile : path to ply file for simulation testing " << std::endl;
return 0;
* Author: Raphael Favier, Technical University Eindhoven, (r.mysurname <aT> tue.nl)
*/
-
-#include <boost/filesystem.hpp>
-
#include <fstream>
#include <iostream>
#include <sstream>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/transforms.h>
#include <pcl/features/normal_3d.h>
#include <pcl/visualization/pcl_visualizer.h>
PCL_INFO ("\nLoading textures and camera poses...\n");
pcl::texture_mapping::CameraVector my_cams;
- const boost::filesystem::path base_dir (".");
+ const pcl_fs::path base_dir (".");
std::string extension (".txt");
- for (boost::filesystem::directory_iterator it (base_dir); it != boost::filesystem::directory_iterator (); ++it)
+ for (pcl_fs::directory_iterator it (base_dir); it != pcl_fs::directory_iterator (); ++it)
{
- if(boost::filesystem::is_regular_file (it->status ()) && it->path ().extension ().string () == extension)
+ if(pcl_fs::is_regular_file (it->status ()) && it->path ().extension ().string () == extension)
{
pcl::TextureMapping<pcl::PointXYZ>::Camera cam;
readCamPoseFile(it->path ().string (), cam);
set(SUBSYS_DESC "Octree GPU")
set(SUBSYS_DEPS common gpu_containers gpu_utils)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
mark_as_advanced("BUILD_${SUBSYS_NAME}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
list(APPEND srcs ${utils} ${cuda})
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/src" "${CMAKE_CURRENT_SOURCE_DIR}/src/utils")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
target_link_libraries("${LIB_NAME}" pcl_gpu_containers)
+target_include_directories(${LIB_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
set(EXT_DEPS "")
#set(EXT_DEPS CUDA)
void create(int query_number, int max_elements)
{
max_elems = max_elements;
- data.create (query_number * max_elems);
+ data.create (static_cast<std::size_t>(query_number) * static_cast<std::size_t>(max_elems));
if (max_elems != 1)
sizes.create(query_number);
*/
void nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results, ResultSqrDists& sqr_distances) const;
- /** \brief Desroys octree and release all resources */
+ /** \brief Destroys octree and release all resources */
void clear();
private:
void *impl;
#include "utils/scan_block.hpp"
#include "utils/morton.hpp"
+#include <thrust/tuple.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
using namespace pcl::gpu;
-namespace pcl
+namespace pcl
{
namespace device
{
result.x = fmin(e1.x, e2.x);
result.y = fmin(e1.y, e2.y);
result.z = fmin(e1.z, e2.z);
- return result;
- }
+ return result;
+ }
};
template<typename PointType>
struct SelectMaxPoint
{
- __host__ __device__ __forceinline__ PointType operator()(const PointType& e1, const PointType& e2) const
- {
+ __host__ __device__ __forceinline__ PointType operator()(const PointType& e1, const PointType& e2) const
+ {
PointType result;
result.x = fmax(e1.x, e2.x);
result.y = fmax(e1.y, e2.y);
result.z = fmax(e1.z, e2.z);
- return result;
- }
+ return result;
+ }
};
__device__ __forceinline__ thrust::tuple<float, float, float> operator()(const PointType& arg) const
{
thrust::tuple<float, float, float> res;
- res.get<0>() = arg.x;
- res.get<1>() = arg.y;
- res.get<2>() = arg.z;
+ thrust::get<0>(res) = arg.x;
+ thrust::get<1>(res) = arg.y;
+ thrust::get<2>(res) = arg.z;
return res;
}
};
{
const static int max_points_per_leaf = 96;
- enum
- {
+ enum
+ {
GRID_SIZE = 1,
CTA_SIZE = 1024-32,
STRIDE = CTA_SIZE,
__shared__ int tasks_beg;
__shared__ int tasks_end;
__shared__ int total_new;
- __shared__ volatile int offsets[CTA_SIZE];
+ __shared__ volatile int offsets[CTA_SIZE];
struct SingleStepBuild
{
static __device__ __forceinline__ int divUp(int total, int grain) { return (total + grain - 1) / grain; };
__device__ __forceinline__ int FindCells(int task, int level, int cell_begs[], char cell_code[]) const
- {
+ {
int cell_count = 0;
int beg = octree.begs[task];
- int end = octree.ends[task];
+ int end = octree.ends[task];
if (end - beg < max_points_per_leaf)
- {
+ {
//cell_count == 0;
}
else
int cur_code = Morton::extractLevelCode(codes[beg], level);
cell_begs[cell_count] = beg;
- cell_code[cell_count] = cur_code;
- ++cell_count;
+ cell_code[cell_count] = cur_code;
+ ++cell_count;
int last_code = Morton::extractLevelCode(codes[end - 1], level);
if (last_code == cur_code)
{
- cell_begs[cell_count] = end;
+ cell_begs[cell_count] = end;
}
else
{
}
int morton_code = Morton::shiftLevelCode(search_code, level);
- int pos = lower_bound(codes + beg, codes + end, morton_code, CompareByLevelCode(level)) - codes;
+ int pos = lower_bound(codes + beg, codes + end, morton_code, CompareByLevelCode(level)) - codes;
if (pos == end)
{
cell_code[cell_count] = cur_code;
++cell_count;
beg = pos;
- }
+ }
}
}
return cell_count;
__device__ __forceinline__ void operator()() const
- {
+ {
//32 is a performance penalty step for search
static_assert((max_points_per_leaf % 32) == 0, "max_points_per_leaf must be a multiple of 32");
octree. ends[0] = points_number;
octree.parent[0] = -1;
- //init shared
+ //init shared
nodes_num = 1;
tasks_beg = 0;
tasks_end = 1;
__syncthreads();
while (tasks_beg < tasks_end && level < Morton::levels)
- {
- int task_count = tasks_end - tasks_beg;
+ {
+ int task_count = tasks_end - tasks_beg;
int iters = divUp(task_count, CTA_SIZE);
int task = tasks_beg + threadIdx.x;
//__syncthreads(); // extra??
for(int it = 0; it < iters; ++it, task += STRIDE)
- {
+ {
int cell_count = (task < tasks_end) ? FindCells(task, level, cell_begs, cell_code) : 0;
-
+
offsets[threadIdx.x] = cell_count;
__syncthreads();
scan_block<pcl::device::exclusive>(offsets);
-
+
//__syncthreads(); //because sync is inside the scan above
if (task < tasks_end)
__syncthreads();
if (threadIdx.x == CTA_SIZE - 1)
- {
+ {
total_new += cell_count + offsets[threadIdx.x];
nodes_num += cell_count + offsets[threadIdx.x];
- }
- __syncthreads();
+ }
+ __syncthreads();
} /* for(int it = 0; it < iters; ++it, task += STRIDE) */
//__syncthreads(); //extra ??
if (threadIdx.x == CTA_SIZE - 1)
- {
+ {
tasks_beg = tasks_end;
- tasks_end += total_new;
+ tasks_end += total_new;
total_new = 0;
}
++level;
- __syncthreads();
+ __syncthreads();
}
if (threadIdx.x == CTA_SIZE - 1)
}
void pcl::device::OctreeImpl::build()
-{
+{
using namespace pcl::device;
host_octree.downloaded = false;
//allocatations
{
- //ScopeTimer timer("new_allocs");
+ //ScopeTimer timer("new_allocs");
//+1 codes * points_num * sizeof(int)
//+1 indices * points_num * sizeof(int)
//+1 octreeGlobal.nodes * points_num * sizeof(int)
//+3 points_sorted * points_num * sizeof(float)
//==
- // 10 rows
+ // 10 rows
- //left
- //octreeGlobal.nodes_num * 1 * sizeof(int)
+ //left
+ //octreeGlobal.nodes_num * 1 * sizeof(int)
//==
- // 3 * sizeof(int) => +1 row
+ // 3 * sizeof(int) => +1 row
const int transaction_size = 128 / sizeof(int);
int cols = std::max<int>(points_num, transaction_size * 4);
int rows = 10 + 1; // = 13
-
+
storage.create(rows, cols);
-
+
codes = DeviceArray<int>(storage.ptr(0), points_num);
indices = DeviceArray<int>(storage.ptr(1), points_num);
-
+
octreeGlobal.nodes = storage.ptr(2);
octreeGlobal.codes = storage.ptr(3);
octreeGlobal.begs = storage.ptr(4);
points_sorted = DeviceArray2D<float>(3, points_num, storage.ptr(8), storage.step());
}
-
+
{
- //ScopeTimer timer("reduce-morton-sort-permutations");
-
+ //ScopeTimer timer("reduce-morton-sort-permutations");
+
thrust::device_ptr<PointType> beg(points.ptr());
thrust::device_ptr<PointType> end = beg + points.size();
atmin.x = atmin.y = atmin.z = std::numeric_limits<float>::lowest();
atmax.w = atmin.w = 0;
- //ScopeTimer timer("reduce");
+ //ScopeTimer timer("reduce");
PointType minp = thrust::reduce(beg, end, atmax, SelectMinPoint<PointType>());
PointType maxp = thrust::reduce(beg, end, atmin, SelectMaxPoint<PointType>());
octreeGlobal.minp = make_float3(minp.x, minp.y, minp.z);
octreeGlobal.maxp = make_float3(maxp.x, maxp.y, maxp.z);
}
-
+
thrust::device_ptr<int> codes_beg(codes.ptr());
thrust::device_ptr<int> codes_end = codes_beg + codes.size();
{
- //ScopeTimer timer("morton");
+ //ScopeTimer timer("morton");
thrust::transform(beg, end, codes_beg, CalcMorton(octreeGlobal.minp, octreeGlobal.maxp));
}
thrust::device_ptr<int> indices_beg(indices.ptr());
thrust::device_ptr<int> indices_end = indices_beg + indices.size();
{
- //ScopeTimer timer("sort");
+ //ScopeTimer timer("sort");
thrust::sequence(indices_beg, indices_end);
- thrust::sort_by_key(codes_beg, codes_end, indices_beg );
+ thrust::sort_by_key(codes_beg, codes_end, indices_beg );
}
{
- ////ScopeTimer timer("perm");
+ ////ScopeTimer timer("perm");
//thrust::copy(make_permutation_iterator(beg, indices_beg),
- // make_permutation_iterator(end, indices_end), device_ptr<float3>(points_sorted.ptr()));
+ // make_permutation_iterator(end, indices_end), device_ptr<float3>(points_sorted.ptr()));
+
-
}
{
thrust::device_ptr<float> xs(points_sorted.ptr(0));
thrust::device_ptr<float> ys(points_sorted.ptr(1));
thrust::device_ptr<float> zs(points_sorted.ptr(2));
- //ScopeTimer timer("perm2");
+ //ScopeTimer timer("perm2");
thrust::transform(make_permutation_iterator(beg, indices_beg),
- make_permutation_iterator(end, indices_end),
+ make_permutation_iterator(end, indices_end),
make_zip_iterator(make_tuple(xs, ys, zs)), PointType_to_tuple<PointType>());
-
+
}
}
-
+
SingleStepBuild ssb;
ssb.octree = octreeGlobal;
ssb.codes = codes;
* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
-#include "pcl/gpu/utils/timers_cuda.hpp"
-#include "pcl/gpu/utils/safe_call.hpp"
+#include <pcl/gpu/utils/timers_cuda.hpp>
+#include <pcl/gpu/utils/safe_call.hpp>
#include "internal.hpp"
#include "utils/approx_nearest_utils.hpp"
* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
+#include <pcl/gpu/containers/initialization.h>
#include <pcl/gpu/octree/octree.hpp>
#include <pcl/gpu/utils/timers_cuda.hpp>
#include <pcl/gpu/utils/safe_call.hpp>
#include "morton.hpp"
#include <assert.h>
+#include <cstdint>
#include <limits>
#include <tuple>
#include <bitset>
set(SUBSYS_DESC "Point cloud people library")
set(SUBSYS_DEPS common features filters geometry gpu_containers gpu_utils io kdtree octree search segmentation surface visualization)
-set(build FALSE)
-PCL_SUBSYS_OPTION(build ${SUBSYS_NAME} ${SUBSYS_DESC} OFF)
+if(${CUDA_VERSION_STRING} VERSION_GREATER_EQUAL "12.0")
+ set(DEFAULT FALSE)
+ set(REASON "gpu_people uses textures which was removed in CUDA 12")
+else()
+ set(DEFAULT TRUE)
+endif()
+
+PCL_SUBSYS_OPTION(build ${SUBSYS_NAME} ${SUBSYS_DESC} ${DEFAULT} ${REASON})
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
mark_as_advanced("BUILD_${SUBSYS_NAME}")
return()
endif()
-#find NPP
-unset(CUDA_npp_LIBRARY CACHE)
-find_cuda_helper_libs(nppc)
-find_cuda_helper_libs(npps)
-if(CUDA_VERSION VERSION_GREATER "9.0" OR CUDA_VERSION VERSION_EQUAL "9.0")
- find_cuda_helper_libs(nppim)
- find_cuda_helper_libs(nppidei)
- set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppim_LIBRARY} ${CUDA_nppidei_LIBRARY} ${CUDA_npps_LIBRARY} CACHE STRING "npp library")
+if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.17)
+ set(CUDA_npp_LIBRARY CUDA::nppc CUDA::nppim CUDA::nppidei CUDA::npps CACHE STRING "npp library")
else()
- find_cuda_helper_libs(nppi)
- set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} CACHE STRING "npp library")
+ #find NPP
+ unset(CUDA_npp_LIBRARY CACHE)
+ find_cuda_helper_libs(nppc)
+ find_cuda_helper_libs(npps)
+ if(CUDA_VERSION VERSION_GREATER_EQUAL "9.0")
+ find_cuda_helper_libs(nppim)
+ find_cuda_helper_libs(nppidei)
+ set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppim_LIBRARY} ${CUDA_nppidei_LIBRARY} ${CUDA_npps_LIBRARY} CACHE STRING "npp library")
+ else()
+ find_cuda_helper_libs(nppi)
+ set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} CACHE STRING "npp library")
+ endif()
endif()
-
#Label_skeleton
file(GLOB srcs src/*.cpp src/*.h*)
source_group("Source files\\cuda" FILES ${srcs_cuda})
source_group("Source files" FILES ${srcs})
-include_directories(
- "${CMAKE_CURRENT_SOURCE_DIR}/include"
- "${CMAKE_CURRENT_SOURCE_DIR}/src"
- "${CMAKE_CURRENT_SOURCE_DIR}/src/cuda"
- "${CMAKE_CURRENT_SOURCE_DIR}/src/cuda/nvidia"
-)
-
set(LIB_NAME "pcl_${SUBSYS_NAME}")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${hdrs} ${srcs_cuda})
-target_link_libraries(${LIB_NAME} pcl_cuda pcl_common pcl_search pcl_surface pcl_segmentation pcl_features pcl_sample_consensus pcl_gpu_utils pcl_gpu_containers "${CUDA_CUDART_LIBRARY}" ${CUDA_npp_LIBRARY})
+target_link_libraries(${LIB_NAME} pcl_common pcl_search pcl_surface pcl_segmentation pcl_features pcl_sample_consensus pcl_gpu_utils pcl_gpu_containers ${CUDA_CUDART_LIBRARY} ${CUDA_npp_LIBRARY})
+target_include_directories(
+ ${LIB_NAME}
+ PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}/src
+ ${CMAKE_CURRENT_SOURCE_DIR}/src/cuda
+ PUBLIC
+ ${CMAKE_CURRENT_SOURCE_DIR}/src/cuda/nvidia
+)
if(UNIX OR APPLE)
target_compile_options(${LIB_NAME} INTERFACE $<$<COMPILE_LANGUAGE:CUDA>:"-Xcompiler=-fPIC">)
enum { XML_VERSION = 1}; /** \brief This indicates the current used xml file version (for people lib only) **/
enum { NUM_ATTRIBS = 2000 };
- enum { NUM_LABELS = 32 }; /** \brief Our code is forseen to use maximal use 32 labels **/
+ enum { NUM_LABELS = 32 }; /** \brief Our code is foreseen to use maximal use 32 labels **/
/** @todo implement label 25 to 29 **/
enum part_t
#include "internal.h"
-#include <pcl/gpu/utils/texture_binder.hpp>
+#include <pcl/gpu/containers/impl/texture_binder.hpp>
#include <pcl/gpu/utils/device/block.hpp>
#include <cassert>
#include <pcl/gpu/people/tree.h>
#include <pcl/gpu/people/label_common.h>
#include <pcl/gpu/utils/safe_call.hpp>
-#include <pcl/gpu/utils/texture_binder.hpp>
+#include <pcl/gpu/containers/impl/texture_binder.hpp>
#include <stdio.h>
#include <limits>
#include <assert.h>
#include <pcl/gpu/people/tree.h>
#include <pcl/gpu/people/label_common.h>
#include <pcl/gpu/utils/safe_call.hpp>
-#include <pcl/gpu/utils/texture_binder.hpp>
+#include <pcl/gpu/containers/impl/texture_binder.hpp>
#include <stdio.h>
#include <limits>
#include <assert.h>
#include "internal.h"
#include <pcl/gpu/utils/safe_call.hpp>
-#include <pcl/gpu/utils/texture_binder.hpp>
+#include <pcl/gpu/containers/initialization.h>
+#include <pcl/gpu/containers/impl/texture_binder.hpp>
#include "npp.h"
#include <stdio.h>
else()
set(DEFAULT TRUE)
set(REASON)
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
endif()
set(the_target people_tracking)
-include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
-
#PCL_ADD_EXECUTABLE(${the_target} "${SUBSYS_NAME}" people_tracking.cpp)
#target_link_libraries("${the_target}" pcl_common pcl_kdtree pcl_gpu_people pcl_io pcl_visualization)
#include <pcl/pcl_base.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/time.h>
#include <pcl/exceptions.h>
#include <pcl/console/parse.h>
#include <pcl/io/pcd_grabber.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/png_io.h>
-#include <boost/filesystem.hpp>
#include <functional>
#include <iostream>
std::vector<std::string> getPcdFilesInDir(const std::string& directory)
{
- namespace fs = boost::filesystem;
- fs::path dir(directory);
+ pcl_fs::path dir(directory);
- if (!fs::exists(dir) || !fs::is_directory(dir))
+ if (!pcl_fs::exists(dir) || !pcl_fs::is_directory(dir))
PCL_THROW_EXCEPTION(pcl::IOException, "Wrong PCD directory");
std::vector<std::string> result;
- fs::directory_iterator pos(dir);
- fs::directory_iterator end;
+ pcl_fs::directory_iterator pos(dir);
+ pcl_fs::directory_iterator end;
for(; pos != end ; ++pos)
- if (fs::is_regular_file(pos->status()) )
+ if (pcl_fs::is_regular_file(pos->status()) )
if (pos->path().extension().string() == ".pcd")
result.push_back(pos->path().string());
set(SUBSYS_DESC "Point cloud GPU segmentation library")
set(SUBSYS_DEPS common gpu_containers gpu_utils gpu_octree)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_common pcl_gpu_octree pcl_gpu_utils pcl_gpu_containers)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
#pragma once
#include <pcl/common/copy_point.h>
#include <pcl/gpu/segmentation/gpu_extract_clusters.h>
+#include <pcl/pcl_exports.h>
namespace pcl {
namespace detail {
//// Downloads only the neccssary cluster indices from the device to the host.
-void
+PCL_EXPORTS void
economical_download(const pcl::gpu::NeighborIndices& source_indices,
const pcl::Indices& buffer_indices,
std::size_t buffer_size,
set(SUBSYS_DESC "Surface algorithms with GPU")
set(SUBSYS_DEPS common gpu_containers gpu_utils geometry)
-set(build FALSE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
list(APPEND srcs ${utils} ${cuda})
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/src" "${CMAKE_CURRENT_SOURCE_DIR}/src/cuda")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
-target_link_libraries("${LIB_NAME}" pcl_gpu_containers)
+target_link_libraries(${LIB_NAME} pcl_gpu_containers)
+target_include_directories(${LIB_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
set(EXT_DEPS "")
#set(EXT_DEPS CUDA)
namespace pcl
{
namespace device
- {
+ {
template<bool use_max>
struct IndOp
{
__device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<float, int>& e1, const thrust::tuple<float, int>& e2) const
- {
+ {
thrust::tuple<float, int> res;
-
+
if (use_max)
- res.get<0>() = fmax(e1.get<0>(), e2.get<0>());
+ thrust::get<0>(res) = fmax(thrust::get<0>(e1), thrust::get<0>(e2));
else
- res.get<0>() = fmin(e1.get<0>(), e2.get<0>());
+ thrust::get<0>(res) = fmin(thrust::get<0>(e1), thrust::get<0>(e2));
- res.get<1>() = (res.get<0>() == e1.get<0>()) ? e1.get<1>() : e2.get<1>();
- return res;
- }
+ thrust::get<1>(res) = (thrust::get<0>(res) == thrust::get<0>(e1)) ? thrust::get<1>(e1) : thrust::get<1>(e2);
+ return res;
+ }
};
struct X
- {
- __device__ __forceinline__
- thrust::tuple<float, int>
+ {
+ __device__ __forceinline__
+ thrust::tuple<float, int>
operator()(const thrust::tuple<PointType, int>& in) const
{
- return thrust::tuple<float, int>(in.get<0>().x, in.get<1>());
+ return thrust::tuple<float, int>(thrust::get<0>(in).x, thrust::get<1>(in));
}
};
struct Y
- {
+ {
__device__ __forceinline__ float operator()(const PointType& in) const { return in.y; }
};
struct Z
- {
+ {
__device__ __forceinline__ float operator()(const PointType& in) const { return in.z; }
};
-
+
struct LineDist
{
float3 x1, x2;
LineDist(const PointType& p1, const PointType& p2) : x1(tr(p1)), x2(tr(p2)) {}
-
+
__device__ __forceinline__
thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const
- {
- float3 x0 = tr(in.get<0>());
+ {
+ float3 x0 = tr(thrust::get<0>(in));
- float dist = norm(cross(x0 - x1, x0 - x2))/norm(x1 - x2);
- return thrust::tuple<float, int>(dist, in.get<1>());
- }
+ float dist = norm(cross(x0 - x1, x0 - x2))/norm(x1 - x2);
+ return thrust::tuple<float, int>(dist, thrust::get<1>(in));
+ }
};
struct PlaneDist
- {
+ {
float3 x1, n;
PlaneDist(const PointType& p1, const PointType& p2, const PointType& p3) : x1(tr(p1))
{
float3 x2 = tr(p2), x3 = tr(p3);
n = normalized(cross(x2 - x1, x3 - x1));
}
-
+
__device__ __forceinline__
thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const
{
- float3 x0 = tr(in.get<0>());
+ float3 x0 = tr(thrust::get<0>(in));
float dist = std::abs(dot(n, x0 - x1));
- return thrust::tuple<float, int>(dist, in.get<1>());
+ return thrust::tuple<float, int>(dist, thrust::get<1>(in));
}
};
-
+
template<typename It, typename Unary, typename Init, typename Binary>
int transform_reduce_index(It beg, It end, Unary unop, Init init, Binary binary)
{
thrust::counting_iterator<int> cbeg(0);
thrust::counting_iterator<int> cend = cbeg + thrust::distance(beg, end);
-
- thrust::tuple<float, int> t = transform_reduce(
- make_zip_iterator(thrust::make_tuple(beg, cbeg)),
- make_zip_iterator(thrust::make_tuple(end, cend)),
+
+ thrust::tuple<float, int> t = transform_reduce(
+ make_zip_iterator(thrust::make_tuple(beg, cbeg)),
+ make_zip_iterator(thrust::make_tuple(end, cend)),
unop, init, binary);
-
- return t.get<1>();
+
+ return thrust::get<1>(t);
}
template<typename It, typename Unary>
{
thrust::tuple<float, int> max_tuple(std::numeric_limits<float>::min(), 0);
return transform_reduce_index(beg, end, unop, max_tuple, IndOp<true>());
- }
+ }
}
}
pcl::device::PointStream::PointStream(const Cloud& cloud_) : cloud(cloud_)
-{
+{
cloud_size = cloud.size();
facets_dists.create(cloud_size);
perm.create(cloud_size);
- thrust::device_ptr<int> pbeg(perm.ptr());
+ thrust::device_ptr<int> pbeg(perm.ptr());
thrust::sequence(pbeg, pbeg + cloud_size);
}
void pcl::device::PointStream::computeInitalSimplex()
{
- thrust::device_ptr<const PointType> beg(cloud.ptr());
+ thrust::device_ptr<const PointType> beg(cloud.ptr());
thrust::device_ptr<const PointType> end = beg + cloud_size;
-
+
int minx = transform_reduce_min_index(beg, end, X());
int maxx = transform_reduce_max_index(beg, end, X());
PointType p1 = *(beg + minx);
PointType p2 = *(beg + maxx);
-
+
int maxl = transform_reduce_max_index(beg, end, LineDist(p1, p2));
PointType p3 = *(beg + maxl);
-
+
int maxp = transform_reduce_max_index(beg, end, PlaneDist(p1, p2, p3));
PointType p4 = *(beg + maxp);
simplex.x1 = tr(p1); simplex.x2 = tr(p2); simplex.x3 = tr(p3); simplex.x4 = tr(p4);
simplex.i1 = minx; simplex.i2 = maxx; simplex.i3 = maxl; simplex.i4 = maxp;
- float maxy = transform_reduce(beg, end, Y(), std::numeric_limits<float>::min(), thrust::maximum<float>());
- float miny = transform_reduce(beg, end, Y(), std::numeric_limits<float>::max(), thrust::minimum<float>());
+ float maxy = transform_reduce(beg, end, Y(), std::numeric_limits<float>::min(), thrust::maximum<float>());
+ float miny = transform_reduce(beg, end, Y(), std::numeric_limits<float>::max(), thrust::minimum<float>());
+
+ float maxz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::min(), thrust::maximum<float>());
+ float minz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::max(), thrust::minimum<float>());
- float maxz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::min(), thrust::maximum<float>());
- float minz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::max(), thrust::minimum<float>());
-
float dx = (p2.x - p1.x);
float dy = (maxy - miny);
float dz = (maxz - minz);
simplex.p1 = compute_plane(simplex.x4, simplex.x2, simplex.x3, simplex.x1);
simplex.p2 = compute_plane(simplex.x3, simplex.x1, simplex.x4, simplex.x2);
simplex.p3 = compute_plane(simplex.x2, simplex.x1, simplex.x4, simplex.x3);
- simplex.p4 = compute_plane(simplex.x1, simplex.x2, simplex.x3, simplex.x4);
+ simplex.p4 = compute_plane(simplex.x1, simplex.x2, simplex.x3, simplex.x4);
}
namespace pcl
namespace device
{
__global__ void init_fs(int i1, int i2, int i3, int i4, PtrStep<int> verts_inds)
- {
+ {
*(int4*)verts_inds.ptr(0) = make_int4(i2, i1, i1, i1);
*(int4*)verts_inds.ptr(1) = make_int4(i3, i3, i2, i2);
*(int4*)verts_inds.ptr(2) = make_int4(i4, i4, i4, i3);
}
void pcl::device::FacetStream::setInitialFacets(const InitalSimplex& s)
-{
- init_fs<<<1, 1>>>(s.i1, s.i2, s.i3, s.i4, verts_inds);
+{
+ init_fs<<<1, 1>>>(s.i1, s.i2, s.i3, s.i4, verts_inds);
cudaSafeCall( cudaGetLastError() );
- cudaSafeCall( cudaDeviceSynchronize() );
+ cudaSafeCall( cudaDeviceSynchronize() );
facet_count = 4;
}
{
float diag;
float4 pl1, pl2, pl3, pl4;
-
- InitalClassify(const float4& p1, const float4& p2, const float4& p3, const float4& p4, float diagonal)
+
+ InitalClassify(const float4& p1, const float4& p2, const float4& p3, const float4& p4, float diagonal)
: diag(diagonal), pl1(p1), pl2(p2), pl3(p3), pl4(p4)
- {
+ {
pl1 *= compue_inv_normal_norm(pl1);
pl2 *= compue_inv_normal_norm(pl2);
pl3 *= compue_inv_normal_norm(pl3);
pl4 *= compue_inv_normal_norm(pl4);
}
-
+
__device__ __forceinline__
- std::uint64_t
+ std::uint64_t
operator()(const PointType& p) const
- {
+ {
float4 x = p;
x.w = 1;
float dists[] = { d0, d1, d2, d3 };
int negs_inds[4];
int neg_count = 0;
-
+
int idx = std::numeric_limits<int>::max();
float dist = 0;
{
int i1 = negs_inds[1];
int i2 = negs_inds[2];
-
+
int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1;
negs_inds[1] = ir;
--neg_count;
{
int i1 = negs_inds[0];
int i2 = negs_inds[1];
-
+
int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1;
negs_inds[0] = ir;
- --neg_count;
+ --neg_count;
}
if (neg_count == 1)
std::uint64_t res = idx;
res <<= 32;
return res + *reinterpret_cast<unsigned int*>(&dist);
- }
- };
+ }
+ };
- __global__ void initalClassifyKernel(const InitalClassify ic, const PointType* points, int cloud_size, std::uint64_t* output)
- {
+ __global__ void initalClassifyKernel(const InitalClassify ic, const PointType* points, int cloud_size, std::uint64_t* output)
+ {
int index = threadIdx.x + blockIdx.x * blockDim.x;
- if (index < cloud_size)
- output[index] = ic(points[index]);
+ if (index < cloud_size)
+ output[index] = ic(points[index]);
}
}
}
void pcl::device::PointStream::initalClassify()
-{
+{
//thrust::device_ptr<const PointType> beg(cloud.ptr());
//thrust::device_ptr<const PointType> end = beg + cloud_size;
thrust::device_ptr<std::uint64_t> out(facets_dists.ptr());
-
+
InitalClassify ic(simplex.p1, simplex.p2, simplex.p3, simplex.p4, cloud_diag);
//thrust::transform(beg, end, out, ic);
-
+
//printFuncAttrib(initalClassifyKernel);
initalClassifyKernel<<<divUp(cloud_size, 256), 256>>>(ic, cloud, cloud_size, facets_dists);
{
namespace device
{
- __device__ int new_cloud_size;
+ __device__ int new_cloud_size;
struct SearchFacetHeads
- {
+ {
std::uint64_t *facets_dists;
int cloud_size;
int facet_count;
mutable int* head_points;
//bool logger;
-
+
__device__ __forceinline__
void operator()(int facet) const
- {
+ {
const std::uint64_t* b = facets_dists;
const std::uint64_t* e = b + cloud_size;
bool last_thread = facet == facet_count;
- int search_value = !last_thread ? facet : std::numeric_limits<int>::max();
- int index = lower_bound(b, e, search_value, LessThanByFacet()) - b;
-
+ int search_value = !last_thread ? facet : std::numeric_limits<int>::max();
+ int index = lower_bound(b, e, search_value, LessThanByFacet()) - b;
+
if (last_thread)
new_cloud_size = index;
else
{
bool not_found = index == cloud_size || (facet != (facets_dists[index] >> 32));
- head_points[facet] = not_found ? -1 : perm[index];
+ head_points[facet] = not_found ? -1 : perm[index];
}
}
};
sfh.facet_count = (int)facet_count;
sfh.perm = perm;
sfh.points = cloud.ptr();
- sfh.head_points = head_points;
-
+ sfh.head_points = head_points;
+
//thrust::counting_iterator<int> b(0);
- //thrust::counting_iterator<int> e = b + facet_count + 1;
+ //thrust::counting_iterator<int> e = b + facet_count + 1;
//thrust::for_each(b, e, sfh);
searchFacetHeadsKernel<<<divUp(facet_count+1, 256), 256>>>(sfh);
cudaSafeCall( cudaGetLastError() );
- cudaSafeCall( cudaDeviceSynchronize() );
+ cudaSafeCall( cudaDeviceSynchronize() );
int new_size;
- cudaSafeCall( cudaMemcpyFromSymbol( (void*)&new_size, pcl::device::new_cloud_size, sizeof(new_size)) );
+ cudaSafeCall( cudaMemcpyFromSymbol( (void*)&new_size, pcl::device::new_cloud_size, sizeof(new_size)) );
return new_size;
}
struct Compaction
{
- enum
+ enum
{
CTA_SIZE = 256,
int* head_points_in;
PtrStep<int> verts_inds_in;
-
+
int *scan_buffer;
int facet_count;
mutable int* head_points_out;
mutable PtrStep<int> verts_inds_out;
-
+
mutable PtrStep<int> empty_facets;
mutable int *empty_count;
-
+
__device__ __forceinline__
void operator()() const
{
int offset = scan_buffer[idx];
head_points_out[offset] = head_idx;
-
+
verts_inds_out.ptr(0)[offset] = verts_inds_in.ptr(0)[idx];
verts_inds_out.ptr(1)[offset] = verts_inds_in.ptr(1)[idx];
verts_inds_out.ptr(2)[offset] = verts_inds_in.ptr(2)[idx];
-
-
+
+
}
- else
- empty = 1;
+ else
+ empty = 1;
}
int total = __popc (__ballot_sync (__activemask (), empty));
if (laneid == 0)
{
int old = atomicAdd(empty_count, total);
- wapr_buffer[warpid] = old;
+ wapr_buffer[warpid] = old;
}
int old = wapr_buffer[warpid];
{
empty_facets.ptr(0)[old + offset] = verts_inds_in.ptr(0)[idx];
empty_facets.ptr(1)[old + offset] = verts_inds_in.ptr(1)[idx];
- empty_facets.ptr(2)[old + offset] = verts_inds_in.ptr(2)[idx];
+ empty_facets.ptr(2)[old + offset] = verts_inds_in.ptr(2)[idx];
int a1 = verts_inds_in.ptr(0)[idx], a2 = verts_inds_in.ptr(1)[idx], a3 = verts_inds_in.ptr(2)[idx];
}
- }
+ }
}
};
void pcl::device::FacetStream::compactFacets()
{
- int old_empty_count;
- empty_count.download(&old_empty_count);
+ int old_empty_count;
+ empty_count.download(&old_empty_count);
thrust::device_ptr<int> b(head_points.ptr());
thrust::device_ptr<int> e = b + facet_count;
thrust::device_ptr<int> o(scan_buffer.ptr());
-
- thrust::transform_exclusive_scan(b, e, o, NotMinus1(), 0, thrust::plus<int>());
-
+
+ thrust::transform_exclusive_scan(b, e, o, NotMinus1(), 0, thrust::plus<int>());
+
Compaction c;
c.verts_inds_in = verts_inds;
- c.head_points_in = head_points;
+ c.head_points_in = head_points;
c.scan_buffer = scan_buffer;
c.facet_count = facet_count;
c.empty_facets = empty_facets;
c.empty_count = empty_count;
-
+
int block = Compaction::CTA_SIZE;
int grid = divUp(facet_count, block);
- compactionKernel<<<grid, block>>>(c);
+ compactionKernel<<<grid, block>>>(c);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
-
+
verts_inds.swap(verts_inds2);
head_points.swap(head_points2);
- int new_empty_count;
- empty_count.download(&new_empty_count);
-
+ int new_empty_count;
+ empty_count.download(&new_empty_count);
+
facet_count -= new_empty_count - old_empty_count;
}
int facet_count;
- __device__ __forceinline__
+ __device__ __forceinline__
void operator()(int point_idx) const
{
int perm_index = perm[point_idx];
-
+
int facet = facets_dists[point_idx] >> 32;
facet = scan_buffer[facet];
if (hi == perm_index)
{
std::uint64_t res = std::numeric_limits<int>::max();
- res <<= 32;
+ res <<= 32;
facets_dists[point_idx] = res;
}
- else
+ else
{
int i1 = verts_inds.ptr(0)[facet];
float3 v1 = tr( points[ i1 ] );
float3 v2 = tr( points[ i2 ] );
float3 v3 = tr( points[ i3 ] );
-
+
float4 p0 = compute_plane(hp, v1, v2, /*opposite*/v3); // j
float4 p1 = compute_plane(hp, v2, v3, /*opposite*/v1); // facet_count + j
- float4 p2 = compute_plane(hp, v3, v1, /*opposite*/v2); // facet_count + j*2
+ float4 p2 = compute_plane(hp, v3, v1, /*opposite*/v2); // facet_count + j*2
p0 *= compue_inv_normal_norm(p0);
p1 *= compue_inv_normal_norm(p1);
p2 *= compue_inv_normal_norm(p2);
-
+
float4 p = points[perm_index];
p.w = 1;
for(int i = 0; i < 3; ++i)
if (dists[i] < 0)
negs_inds[neg_count++] = i;
-
+
if (neg_count == 3)
{
int i1 = negs_inds[1];
int i2 = negs_inds[2];
-
+
int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1;
negs_inds[1] = ir;
--neg_count;
{
int i1 = negs_inds[0];
int i2 = negs_inds[1];
-
+
int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1;
negs_inds[0] = ir;
- --neg_count;
+ --neg_count;
}
if (neg_count == 1)
// if (neg_count == 0)
// new_idx = std::numeric_limits<int>::max() ==>> internal point
-
+
std::uint64_t res = new_idx;
res <<= 32;
res += *reinterpret_cast<unsigned int*>(&dist);
facets_dists[point_idx] = res;
- } /* if (hi == perm_index) */
+ } /* if (hi == perm_index) */
}
- };
+ };
__global__ void classifyKernel(const Classify c, int cloud_size)
{
}
void pcl::device::PointStream::classify(FacetStream& fs)
-{
+{
Classify c;
c.facets_dists = facets_dists;
c.diag = cloud_diag;
c.facet_count = fs.facet_count;
- //thrust::counting_iterator<int> b(0);
+ //thrust::counting_iterator<int> b(0);
//thrust::for_each(b, b + cloud_size, c);
classifyKernel<<<divUp(cloud_size, 256), 256>>>(c, cloud_size);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
-
+
thrust::device_ptr<std::uint64_t> beg(facets_dists.ptr());
thrust::device_ptr<std::uint64_t> end = beg + cloud_size;
-
+
thrust::device_ptr<int> pbeg(perm.ptr());
thrust::sort_by_key(beg, end, pbeg);
}
mutable PtrStep<int> verts_inds;
- __device__ __forceinline__
+ __device__ __forceinline__
void operator()(int facet) const
{
int hi = head_points[facet];
int i1 = verts_inds.ptr(0)[facet];
int i2 = verts_inds.ptr(1)[facet];
int i3 = verts_inds.ptr(2)[facet];
-
+
make_facet(hi, i1, i2, facet);
make_facet(hi, i2, i3, facet + facet_count);
make_facet(hi, i3, i1, facet + facet_count * 2);
{
int facet = threadIdx.x + blockIdx.x * blockDim.x;
- if (facet < sf.facet_count)
- sf(facet);
+ if (facet < sf.facet_count)
+ sf(facet);
}
}
}
sf.head_points = head_points;
sf.verts_inds = verts_inds;
sf.facet_count = facet_count;
-
- //thrust::counting_iterator<int> b(0);
+
+ //thrust::counting_iterator<int> b(0);
//thrust::for_each(b, b + facet_count, sf);
splitFacetsKernel<<<divUp(facet_count, 256), 256>>>(sf);
thrust::device_ptr<int> beg(indeces.ptr());
thrust::device_ptr<int> end = beg + indeces.size();
- thrust::sort(beg, end);
- return (std::size_t)(thrust::unique(beg, end) - beg);
+ thrust::sort(beg, end);
+ return (std::size_t)(thrust::unique(beg, end) - beg);
}
{
output.create(indeces.size());
- //thrust::device_ptr<const PointType> in(points.ptr());
-
+ //thrust::device_ptr<const PointType> in(points.ptr());
+
//thrust::device_ptr<const int> mb(indeces.ptr());
//thrust::device_ptr<const int> me = mb + indeces.size();
- //thrust::device_ptr<PointType> out(output.ptr());
+ //thrust::device_ptr<PointType> out(output.ptr());
//thrust::gather(mb, me, in, out);
-
+
gatherKernel<<<divUp(indeces.size(), 256), 256>>>(indeces, points, output);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
set(the_test_target test_gpu_surface)
-get_filename_component(DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
-get_filename_component(INC_SURF "${DIR}/../../surface/include" REALPATH)
-get_filename_component(INC_IO "${DIR}/../../../io/include" REALPATH)
-get_filename_component(INC_VIZ "${DIR}/../../../visualization/include" REALPATH)
-get_filename_component(INC_GEO "${DIR}/../../../geometry/include" REALPATH)
-get_filename_component(INC_SURF_CPU "${DIR}/../../../surface/include" REALPATH)
-get_filename_component(INC_SEA "${DIR}/../../../search/include" REALPATH)
-get_filename_component(INC_KD "${DIR}/../../../kdtree/include" REALPATH)
-get_filename_component(INC_OCT "${DIR}/../../../octree/include" REALPATH)
-include_directories("${INC_SURF}" "${INC_IO}" "${INC_VIZ}" "${INC_GEO}" "${INC_SURF_CPU}" "${INC_SEA}" "${INC_KD}" "${INC_OCT}")
-include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
-
file(GLOB test_src *.cpp *.hpp)
#PCL_ADD_TEST(a_gpu_surface_test ${the_test_target} FILES ${test_src} LINK_WITH pcl_io pcl_gpu_containers pcl_gpu_surface pcl_visualization pcl_surface pcl_octree pcl_kdtree pcl_search)
#add_dependencies(${the_test_target} pcl_io pcl_gpu_containes pcl_gpu_surface pcl_visualization)
set(SUBSYS_DESC "Tracking with GPU")
set(SUBSYS_DEPS common gpu_containers gpu_utils gpu_octree tracking search kdtree filters octree)
-set(build FALSE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
list(APPEND srcs ${utils} ${cuda})
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/src" "${CMAKE_CURRENT_SOURCE_DIR}/src/cuda")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
-target_link_libraries("${LIB_NAME}" pcl_gpu_containers)
+target_link_libraries(${LIB_NAME} pcl_gpu_containers)
+target_include_directories(${LIB_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
set(EXT_DEPS "")
#set(EXT_DEPS CUDA)
set(SUBSYS_NAME gpu_utils)
set(SUBSYS_PATH gpu/utils)
set(SUBSYS_DESC "Device layer functions.")
-set(SUBSYS_DEPS common gpu_containers)
+set(SUBSYS_DEPS common)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_SET_SUBSYS_INCLUDE_DIR("${SUBSYS_NAME}" "${SUBSYS_PATH}")
source_group("Source Files" FILES ${srcs})
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_CUDA_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${dev_incs} ${incs} ${srcs})
-target_link_libraries("${LIB_NAME}" pcl_gpu_containers)
+target_link_libraries("${LIB_NAME}" pcl_common)
set(EXT_DEPS "")
#set(EXT_DEPS CUDA)
{
// Function Objects
- using thrust::unary_function;
using thrust::binary_function;
// Arithmetic Operations
using thrust::bit_or;
using thrust::bit_xor;
- template <typename T> struct bit_not : unary_function<T, T>
+ template <typename T> struct bit_not
{
+ typedef T argument_type;
+ typedef T result_type;
__forceinline__ __device__ T operator ()(const T& v) const {return ~v;}
};
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-PCL_DEPRECATED_HEADER(1, 15, "pcl::device::Static will be removed at PCL release 1.15");
-
-#ifndef PCL_GPU_DEVICE_STATIC_CHECK_HPP_
-#define PCL_GPU_DEVICE_STATIC_CHECK_HPP_
-
-#if defined(__CUDACC__)
-#define __PCL_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
-#else
-#define __PCL_GPU_HOST_DEVICE__
-#endif
-
-namespace pcl {
-namespace device {
-template <bool expr>
-struct Static {};
-
-template <>
-struct [[deprecated("This class will be replaced at PCL release 1.15 by "
- "c++11's static_assert instead")]] Static<true>
-{
- __PCL_GPU_HOST_DEVICE__ static void check(){};
-};
-} // namespace device
-
-namespace gpu {
-using pcl::device::Static;
-}
-} // namespace pcl
-
-#undef __PCL_GPU_HOST_DEVICE__
-
-#endif /* PCL_GPU_DEVICE_STATIC_CHECK_HPP_ */
////////////////////////////////
- // tempalted operations vectors
+ // templated operations vectors
template<typename T> __device__ __host__ __forceinline__ float norm(const T& val)
{
#ifndef __PCL_GPU_UTILS_REPACKS_HPP__
#define __PCL_GPU_UTILS_REPACKS_HPP__
-#include <pcl/pcl_macros.h>
-#include <pcl/point_types.h>
+PCL_DEPRECATED_HEADER(1, 17, "This has been deprecated, please include it from pcl/gpu/containers.")
-#include <pcl/gpu/containers/device_array.h>
-
-namespace pcl
-{
- namespace gpu
- {
- ///////////////////////////////////////
- /// This is an experimental code ///
- ///////////////////////////////////////
-
- const int NoCP = 0xFFFFFFFF;
-
- /** \brief Returns field copy operation code. */
- inline int cp(int from, int to)
- {
- return ((to & 0xF) << 4) + (from & 0xF);
- }
-
- /* Combines several field copy operations to one int (called rule) */
- inline int rule(int cp1, int cp2 = NoCP, int cp3 = NoCP, int cp4 = NoCP)
- {
- return (cp1 & 0xFF) + ((cp2 & 0xFF) << 8) + ((cp3 & 0xFF) << 16) + ((cp4 & 0xFF) << 24);
- }
-
- /* Combines performs all field copy operations in given rule array (can be 0, 1, or 16 copies) */
- void copyFieldsImpl(int in_size, int out_size, int rules[4], int size, const void* input, void* output);
-
- template<typename PointIn, typename PointOut>
- void copyFieldsEx(const DeviceArray<PointIn>& src, DeviceArray<PointOut>& dst, int rule1, int rule2 = NoCP, int rule3 = NoCP, int rule4 = NoCP)
- {
- int rules[4] = { rule1, rule2, rule3, rule4 };
- dst.create(src.size());
- copyFieldsImpl(sizeof(PointIn)/sizeof(int), sizeof(PointOut)/sizeof(int), rules, (int)src.size(), src.ptr(), dst.ptr());
- }
-
- void copyFields(const DeviceArray<PointXYZ>& src, DeviceArray<PointNormal>& dst)
- {
- //PointXYZ.x (0) -> PointNormal.x (0)
- //PointXYZ.y (1) -> PointNormal.y (1)
- //PointXYZ.z (2) -> PointNormal.z (2)
- copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
- };
-
- void copyFields(const DeviceArray<Normal>& src, DeviceArray<PointNormal>& dst)
- {
- //PointXYZ.normal_x (0) -> PointNormal.normal_x (4)
- //PointXYZ.normal_y (1) -> PointNormal.normal_y (5)
- //PointXYZ.normal_z (2) -> PointNormal.normal_z (6)
- //PointXYZ.curvature (4) -> PointNormal.curvature (8)
- copyFieldsEx(src, dst, rule(cp(0, 4), cp(1, 5), cp(2, 6), cp(4,8)));
- };
-
- void copyFields(const DeviceArray<PointXYZRGBL>& src, DeviceArray<PointXYZ>& dst)
- {
- //PointXYZRGBL.x (0) -> PointXYZ.x (0)
- //PointXYZRGBL.y (1) -> PointXYZ.y (1)
- //PointXYZRGBL.z (2) -> PointXYZ.z (2)
- copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
- };
-
- void copyFields(const DeviceArray<PointXYZRGB>& src, DeviceArray<PointXYZ>& dst)
- {
- //PointXYZRGB.x (0) -> PointXYZ.x (0)
- //PointXYZRGB.y (1) -> PointXYZ.y (1)
- //PointXYZRGB.z (2) -> PointXYZ.z (2)
- copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
- };
-
- void copyFields(const DeviceArray<PointXYZRGBA>& src, DeviceArray<PointXYZ>& dst)
- {
- //PointXYZRGBA.x (0) -> PointXYZ.x (0)
- //PointXYZRGBA.y (1) -> PointXYZ.y (1)
- //PointXYZRGBA.z (2) -> PointXYZ.z (2)
- copyFieldsEx(src, dst, rule(cp(0, 0), cp(1, 1), cp(2, 2)));
- };
-
- void copyFieldsZ(const DeviceArray<PointXYZ>& src, DeviceArray<float>& dst)
- {
- //PointXYZRGBL.z (2) -> float (1)
- copyFieldsEx(src, dst, rule(cp(2, 0)));
- };
-
- void copyFieldsZ(const DeviceArray<PointXYZRGB>& src, DeviceArray<float>& dst)
- {
- //PointXYZRGBL.z (2) -> float (1)
- copyFieldsEx(src, dst, rule(cp(2, 0)));
- };
- }
-}
#endif /* __PCL_GPU_UTILS_REPACKS_HPP__ */
#define __PCL_CUDA_SAFE_CALL_HPP__
#include <cuda_runtime_api.h>
-#include <pcl/gpu/containers/initialization.h>
+
+#include <iostream>
#if defined(__GNUC__)
- #define cudaSafeCall(expr) pcl::gpu::___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
+#define cudaSafeCall(expr) pcl::gpu::___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
- #define cudaSafeCall(expr) pcl::gpu::___cudaSafeCall(expr, __FILE__, __LINE__)
+#define cudaSafeCall(expr) pcl::gpu::___cudaSafeCall(expr, __FILE__, __LINE__)
#endif
-namespace pcl
-{
- namespace gpu
- {
- static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
- {
- if (cudaSuccess != err)
- error(cudaGetErrorString(err), file, line, func);
- }
+namespace pcl {
+namespace gpu {
- static inline int divUp(int total, int grain) { return (total + grain - 1) / grain; }
- }
+static inline void
+___cudaSafeCall(cudaError_t err,
+ const char* file,
+ const int line,
+ const char* func = "")
+{
+ if (cudaSuccess != err) {
+ std::cout << "Error: " << cudaGetErrorString(err) << "\t" << file << ":" << line
+ << ":" << func << std::endl;
+ exit(EXIT_FAILURE);
+ }
+}
- namespace device
- {
- using pcl::gpu::divUp;
- }
+static inline int
+divUp(int total, int grain)
+{
+ return (total + grain - 1) / grain;
}
+} // namespace gpu
+namespace device {
+using pcl::gpu::divUp;
+}
+} // namespace pcl
#endif /* __PCL_CUDA_SAFE_CALL_HPP__ */
#ifndef PCL_GPU_UTILS_TEXTURE_BINDER_HPP_
#define PCL_GPU_UTILS_TEXTURE_BINDER_HPP_
-#include <pcl/gpu/utils/safe_call.hpp>
-#include <pcl/gpu/containers/device_array.h>
+PCL_DEPRECATED_HEADER(1, 17, "This has been deprecated, please include it from pcl/gpu/containers.")
-namespace pcl
-{
- namespace gpu
- {
- class TextureBinder
- {
- public:
- template<class T, enum cudaTextureReadMode readMode>
- TextureBinder(const DeviceArray2D<T>& arr, const struct texture<T, 2, readMode>& tex) : texref(&tex)
- {
- cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
- cudaSafeCall( cudaBindTexture2D(0, tex, arr.ptr(), desc, arr.cols(), arr.rows(), arr.step()) );
- }
-
- template<class T, enum cudaTextureReadMode readMode>
- TextureBinder(const DeviceArray<T>& arr, const struct texture<T, 1, readMode> &tex) : texref(&tex)
- {
- cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
- cudaSafeCall( cudaBindTexture(0, tex, arr.ptr(), desc, arr.sizeBytes()) );
- }
-
- template<class T, enum cudaTextureReadMode readMode>
- TextureBinder(const PtrStepSz<T>& arr, const struct texture<T, 2, readMode>& tex) : texref(&tex)
- {
- cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
- cudaSafeCall( cudaBindTexture2D(0, tex, arr.data, desc, arr.cols, arr.rows, arr.step) );
- }
-
- template<class T, enum cudaTextureReadMode readMode>
- TextureBinder(const PtrSz<T>& arr, const struct texture<T, 1, readMode> &tex) : texref(&tex)
- {
- cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
- cudaSafeCall( cudaBindTexture(0, tex, arr.data, desc, arr.size * arr.elemSize()) );
- }
-
- ~TextureBinder()
- {
- cudaSafeCall( cudaUnbindTexture(texref) );
- }
- private:
- const struct textureReference *texref;
- };
- }
-
- namespace device
- {
- using pcl::gpu::TextureBinder;
- }
-}
-
-#endif /* PCL_GPU_UTILS_TEXTURE_BINDER_HPP_*/
\ No newline at end of file
+#endif /* PCL_GPU_UTILS_TEXTURE_BINDER_HPP_*/
--- /dev/null
+// added empty file for Cmake to determine link language.
#ifndef PCL_GPU_UTILS_INTERNAL_HPP_
#define PCL_GPU_UTILS_INTERNAL_HPP_
+#include <pcl/pcl_macros.h>
+
+PCL_DEPRECATED_HEADER(1, 17, "This has been deprecated and will be removed.")
+
+
namespace pcl
{
namespace device
}
}
-#endif
\ No newline at end of file
+#endif
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-#include <cassert>
-
-#include <pcl/gpu/utils/repacks.hpp>
-#include "internal.hpp"
-
+++ /dev/null
-#include "internal.hpp"
-#include <algorithm>
-#include "pcl/gpu/utils/safe_call.hpp"
-#include "pcl/pcl_exports.h"
-
-namespace pcl
-{
- namespace device
- {
- struct Info
- {
- enum { SIZE = 4 };
- int data[SIZE];
- };
-
- template<int n>
- struct Point
- {
- int data[n];
- };
-
- template<int in, int out, typename Info>
- __global__ void deviceCopyFields4B(const Info info, const int size, const void* input, void* output)
- {
- int idx = blockIdx.x * blockDim.x + threadIdx.x;
-
- if (idx < size)
- {
- Point<in> point_in = reinterpret_cast<const Point<in>* >( input)[idx];
- Point<out> point_out = reinterpret_cast< Point<out>* >(output)[idx];
-
- for(int i = 0; i < Info::SIZE; ++i)
- {
- int byte;
- int code = info.data[i];
-
- byte = ((code >> 0) & 0xFF);
-
- if (byte == 0xFF)
- break;
- else
- point_out.data[byte >> 4] = point_in.data[byte & 0xF];
-
- byte = ((code >> 8) & 0xFF);
-
- if (byte == 0xFF)
- break;
- else
- point_out.data[byte >> 4] = point_in.data[byte & 0xF];
-
- byte = ((code >> 16) & 0xFF);
-
- if (byte == 0xFF)
- break;
- else
- point_out.data[byte >> 4] = point_in.data[byte & 0xF];
-
- byte = ((code >> 24) & 0xFF);
-
- if (byte == 0xFF)
- break;
- else
- point_out.data[byte >> 4] = point_in.data[byte & 0xF];
- }
-
- reinterpret_cast< Point<out>* >(output)[idx] = point_out;
- }
- };
-
- template<int in_size, int out_size>
- void cf(int info[4], int size, const void* input, void* output)
- {
- Info i;
- std::copy(info, info + 4, i.data);
-
- dim3 block(256);
- dim3 grid(divUp(size, block.x));
-
- deviceCopyFields4B<in_size, out_size><<<grid, block>>>(i, size, input, output);
- cudaSafeCall ( cudaGetLastError () );
- cudaSafeCall (cudaDeviceSynchronize ());
- }
-
- using copy_fields_t = void (*)(int info[4], int size, const void* input, void* output);
- }
-}
-
-namespace pcl
-{
- namespace gpu
- {
- using namespace pcl::device;
-
- PCL_EXPORTS void copyFieldsImpl(int in_size, int out_size, int rules[4], int size, const void* input, void* output)
- {
- pcl::device::copy_fields_t funcs[16][16] =
- {
- { /**/ cf<1,1>, cf<1, 2>, cf<1, 3>, cf<1, 4>, /**/ cf<1, 5>, cf<1, 6>, cf<1, 7>, cf<1, 8>, /**/ cf<1, 9>, cf<1,10>, cf<1, 11>, cf<1, 12>, /**/ cf<1, 13>, cf<1, 14>, cf<1, 15>, cf<1,16> },
- { /**/ cf<2,1>, cf<2, 2>, cf<2, 3>, cf<2, 4>, /**/ cf<2, 5>, cf<2, 6>, cf<2, 7>, cf<2, 8>, /**/ cf<2, 9>, cf<1,10>, cf<2, 11>, cf<2, 12>, /**/ cf<2, 13>, cf<2, 14>, cf<2, 15>, cf<2,16> },
- { /**/ cf<3,1>, cf<3, 2>, cf<3, 3>, cf<3, 4>, /**/ cf<3, 5>, cf<3, 6>, cf<3, 7>, cf<3, 8>, /**/ cf<3, 9>, cf<1,10>, cf<3, 11>, cf<3, 12>, /**/ cf<3, 13>, cf<3, 14>, cf<3, 15>, cf<3,16> },
- { /**/ cf<4,1>, cf<4, 2>, cf<4, 3>, cf<4, 4>, /**/ cf<4, 5>, cf<4, 6>, cf<4, 7>, cf<4, 8>, /**/ cf<4, 9>, cf<1,10>, cf<4, 11>, cf<4, 12>, /**/ cf<4, 13>, cf<4, 14>, cf<4, 15>, cf<4,16> },
- { /**/ cf<5,1>, cf<5, 2>, cf<5, 3>, cf<5, 4>, /**/ cf<5, 5>, cf<5, 6>, cf<5, 7>, cf<5, 8>, /**/ cf<5, 9>, cf<1,10>, cf<5, 11>, cf<5, 12>, /**/ cf<5, 13>, cf<5, 14>, cf<5, 15>, cf<5,16> },
- { /**/ cf<6,1>, cf<6, 2>, cf<6, 3>, cf<6, 4>, /**/ cf<6, 5>, cf<6, 6>, cf<6, 7>, cf<6, 8>, /**/ cf<6, 9>, cf<1,10>, cf<6, 11>, cf<6, 12>, /**/ cf<6, 13>, cf<6, 14>, cf<6, 15>, cf<6,16> },
- { /**/ cf<7,1>, cf<7, 2>, cf<7, 3>, cf<7, 4>, /**/ cf<7, 5>, cf<7, 6>, cf<7, 7>, cf<7, 8>, /**/ cf<7, 9>, cf<1,10>, cf<7, 11>, cf<7, 12>, /**/ cf<7, 13>, cf<7, 14>, cf<7, 15>, cf<7,16> },
- { /**/ cf<8,1>, cf<8, 2>, cf<8, 3>, cf<8, 4>, /**/ cf<8, 5>, cf<8, 6>, cf<8, 7>, cf<8, 8>, /**/ cf<8, 9>, cf<1,10>, cf<8, 11>, cf<8, 12>, /**/ cf<8, 13>, cf<8, 14>, cf<8, 15>, cf<8,16> },
- { /**/ cf<9,1>, cf<9, 2>, cf<9, 3>, cf<9, 4>, /**/ cf<9, 5>, cf<9, 6>, cf<9, 7>, cf<9, 8>, /**/ cf<9, 9>, cf<1,10>, cf<9, 11>, cf<9, 12>, /**/ cf<9, 13>, cf<9, 14>, cf<9, 15>, cf<9,16> },
- { /**/ cf<10,1>, cf<10,2>, cf<10,3>, cf<10,4>, /**/ cf<10,5>, cf<10,6>, cf<10,7>, cf<10,8>, /**/ cf<10,9>, cf<1,10>, cf<10,11>, cf<10,12>, /**/ cf<10,13>, cf<10,14>, cf<10,15>, cf<10,16> },
- { /**/ cf<11,1>, cf<11,2>, cf<11,3>, cf<11,4>, /**/ cf<11,5>, cf<11,6>, cf<11,7>, cf<11,8>, /**/ cf<11,9>, cf<1,10>, cf<11,11>, cf<11,12>, /**/ cf<11,13>, cf<11,14>, cf<11,15>, cf<11,16> },
- { /**/ cf<12,1>, cf<12,2>, cf<12,3>, cf<12,4>, /**/ cf<12,5>, cf<12,6>, cf<12,7>, cf<12,8>, /**/ cf<12,9>, cf<1,10>, cf<12,11>, cf<12,12>, /**/ cf<12,13>, cf<12,14>, cf<12,15>, cf<12,16> },
- { /**/ cf<13,1>, cf<13,2>, cf<13,3>, cf<13,4>, /**/ cf<13,5>, cf<13,6>, cf<13,7>, cf<13,8>, /**/ cf<13,9>, cf<1,10>, cf<13,11>, cf<13,12>, /**/ cf<13,13>, cf<13,14>, cf<13,15>, cf<13,16> },
- { /**/ cf<14,1>, cf<14,2>, cf<14,3>, cf<14,4>, /**/ cf<14,5>, cf<14,6>, cf<14,7>, cf<14,8>, /**/ cf<14,9>, cf<1,10>, cf<14,11>, cf<14,12>, /**/ cf<14,13>, cf<14,14>, cf<14,15>, cf<14,16> },
- { /**/ cf<15,1>, cf<15,2>, cf<15,3>, cf<15,4>, /**/ cf<15,5>, cf<15,6>, cf<15,7>, cf<15,8>, /**/ cf<15,9>, cf<1,10>, cf<15,11>, cf<15,12>, /**/ cf<15,13>, cf<15,14>, cf<15,15>, cf<15,16> },
- { /**/ cf<16,1>, cf<16,2>, cf<16,3>, cf<16,4>, /**/ cf<16,5>, cf<16,6>, cf<16,7>, cf<16,8>, /**/ cf<16,9>, cf<1,10>, cf<16,11>, cf<16,12>, /**/ cf<16,13>, cf<16,14>, cf<16,15>, cf<16,16> }
- };
-
- funcs[in_size-1][out_size-1](rules, size, input, output);
- }
- }
-}
-
set(SUBSYS_DEPS common octree)
set(SUBSYS_EXT_DEPS boost eigen3)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
if(WIN32)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS openni openni2 ensenso davidSDK dssdk rssdk rssdk2 pcap png vtk OpenMP EXT_DEPS ${SUBSYS_EXT_DEPS})
target_link_libraries(pcl_io_ply ws2_32)
endif()
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/ply" ${PLY_INCLUDES})
-target_include_directories(pcl_io_ply PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
target_link_libraries(pcl_io_ply pcl_common Boost::boost)
+PCL_MAKE_PKGCONFIG(pcl_io_ply COMPONENT ${SUBSYS_NAME} DESC "${SUBSYS_DESC}, PLY" PCL_DEPS common)
+
set(srcs
src/debayer.cpp
src/pcd_grabber.cpp
endif()
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
- "include/pcl/${SUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/debayer.h"
"include/pcl/${SUBSYS_NAME}/fotonic_grabber.h"
"include/pcl/${SUBSYS_NAME}/file_io.h"
"include/pcl/${SUBSYS_NAME}/low_level_io.h"
"include/pcl/${SUBSYS_NAME}/lzf.h"
"include/pcl/${SUBSYS_NAME}/lzf_image_io.h"
- "include/pcl/${SUBSYS_NAME}/io.h"
"include/pcl/${SUBSYS_NAME}/grabber.h"
"include/pcl/${SUBSYS_NAME}/file_grabber.h"
"include/pcl/${SUBSYS_NAME}/timestamp.h"
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${compression_incs} ${impl_incs} ${OPENNI_INCLUDES} ${OPENNI2_INCLUDES})
-target_include_directories(${LIB_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
-
-target_link_libraries("${LIB_NAME}" Boost::boost Boost::filesystem Boost::iostreams pcl_common pcl_io_ply)
+target_link_libraries("${LIB_NAME}" Boost::boost Boost::iostreams pcl_common pcl_io_ply pcl_octree)
+if(TARGET Boost::filesystem)
+ target_link_libraries("${LIB_NAME}" Boost::filesystem)
+endif()
if(VTK_FOUND)
if(${VTK_VERSION} VERSION_GREATER_EQUAL 9.0)
endif()
if(PNG_FOUND)
+ target_include_directories("${LIB_NAME}" SYSTEM PRIVATE ${PNG_INCLUDE_DIRS})
target_link_libraries("${LIB_NAME}" ${PNG_LIBRARIES})
endif()
if(WITH_ENSENSO)
list(APPEND EXT_DEPS ensenso)
endif()
+list(APPEND EXT_DEPS pcl_io_ply)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS} EXT_DEPS ${EXT_DEPS})
namespace io
{
/** \brief @b Octree pointcloud compression class
- * \note This class enables compression and decompression of point cloud data based on octree data structures.
+ * \note This class enables compression and decompression of point cloud data based on octree data structures. It is a lossy compression. See also `PCDWriter` for another way to compress point cloud data.
* \note
* \note typename: PointT: type of point used in pointcloud
* \author Julius Kammerl (julius@kammerl.de)
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2011-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-//https://bugreports.qt-project.org/browse/QTBUG-22829
-#ifndef Q_MOC_RUN
-#ifndef __CUDACC__
-#include <boost/version.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-#include <boost/filesystem.hpp>
-#include <boost/mpl/fold.hpp>
-#include <boost/mpl/inherit.hpp>
-#include <boost/mpl/inherit_linearly.hpp>
-#include <boost/mpl/joint_view.hpp>
-#include <boost/mpl/transform.hpp>
-#include <boost/mpl/vector.hpp>
-#include <boost/tokenizer.hpp>
-#include <boost/foreach.hpp>
-#include <boost/shared_array.hpp>
-#include <boost/interprocess/permissions.hpp>
-#include <boost/iostreams/device/mapped_file.hpp>
-#define BOOST_PARAMETER_MAX_ARITY 7
-#include <boost/signals2.hpp>
-#include <boost/signals2/slot.hpp>
-#endif
-#include <boost/algorithm/string.hpp>
-#include <boost/interprocess/sync/file_lock.hpp>
-#endif
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2011-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-
-#include <Eigen/Core>
boost::signals2::connection ret = signal->connect (callback);
connections_[typeid (T).name ()].push_back (ret);
- shared_connections_[typeid (T).name ()].push_back (boost::signals2::shared_connection_block (connections_[typeid (T).name ()].back (), false));
+ shared_connections_[typeid (T).name ()].emplace_back(connections_[typeid (T).name ()].back (), false);
signalsChanged ();
return (ret);
}
boost::asio::ip::udp::endpoint udp_listener_endpoint_;
boost::asio::ip::address source_address_filter_;
std::uint16_t source_port_filter_;
- boost::asio::io_service hdl_read_socket_service_;
+ boost::asio::io_context hdl_read_socket_service_;
boost::asio::ip::udp::socket *hdl_read_socket_;
std::string pcap_file_name_;
std::thread *queue_consumer_thread_;
#ifndef PCL_IO_AUTO_IO_IMPL_H_
#define PCL_IO_AUTO_IO_IMPL_H_
+#include <pcl/common/pcl_filesystem.h>
+
#include <pcl/io/obj_io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/io/ifs_io.h>
-#include <boost/filesystem.hpp> // for path
namespace pcl
{
template<typename PointT> int
load (const std::string& file_name, pcl::PointCloud<PointT>& cloud)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".pcd")
template<typename PointT> int
save (const std::string& file_name, const pcl::PointCloud<PointT>& cloud)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".pcd")
}
}
-#endif //PCL_IO_AUTO_IO_IMPL_H_
\ No newline at end of file
+#endif //PCL_IO_AUTO_IO_IMPL_H_
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#pragma once
-#include <pcl/pcl_macros.h> // for PCL_DEPRECATED_HEADER
-PCL_DEPRECATED_HEADER(1, 15, "Please include pcl/common/io.h directly.")
-#include <pcl/common/io.h>
// All other errors are passed up.
if (errno != EINVAL)
return -1;
+# elif defined(__OpenBSD__)
+ // OpenBSD has neither posix_fallocate nor fallocate
+ if (::ftruncate(fd, length) == 0)
+ return 0;
+ if (errno != EINVAL)
+ return -1;
# else
// Conforming POSIX systems have posix_fallocate.
const int res = ::posix_fallocate(fd, 0, length);
void
setDepthRegistration (bool on_off);
- /** \return whether the depth stream is registered to the RGB camera fram or not. */
+ /** \return whether the depth stream is registered to the RGB camera frame or not. */
bool
isDepthRegistered () const noexcept;
#include <string>
#include <tuple>
#include <vector>
+#include <functional>
#include <boost/lexical_cast.hpp> // for lexical_cast
#include <boost/mpl/fold.hpp> // for fold
#include <boost/mpl/inherit.hpp> // for inherit
const Eigen::Quaternionf &orientation = Eigen::Quaternionf::Identity (),
int precision = 8,
bool use_camera = true);
-
+ /** \brief Save point cloud data to a std::ostream containing n-D points, in BINARY format
+ * \param[in] os the output buffer
+ * \param[in] cloud the point cloud data message
+ * \param[in] origin the sensor data acquisition origin
+ * (translation) \param[in] orientation the sensor data acquisition origin
+ * (rotation) \param[in] use_camera if set to true then PLY file will use element
+ * camera else element range_grid will be used
+ */
+ int
+ writeBinary(std::ostream& os, const pcl::PCLPointCloud2& cloud,
+ const Eigen::Vector4f& origin = Eigen::Vector4f::Zero (), const Eigen::Quaternionf& orientation= Eigen::Quaternionf::Identity (),
+ bool use_camera=true);
+
/** \brief Save point cloud data to a PLY file containing n-D points, in BINARY format
* \param[in] file_name the output file name
* \param[in] cloud the point cloud data message
boost::asio::ip::address sensor_address_;
boost::asio::ip::udp::endpoint sender_endpoint_;
- boost::asio::io_service io_service_;
+ boost::asio::io_context io_service_;
std::shared_ptr<boost::asio::ip::udp::socket> socket_;
std::shared_ptr<std::thread> socket_thread_;
std::shared_ptr<std::thread> consumer_thread_;
std::vector<float> distances_;
boost::asio::ip::tcp::endpoint tcp_endpoint_;
- boost::asio::io_service tim_io_service_;
+ boost::asio::io_context tim_io_service_;
boost::asio::ip::tcp::socket tim_socket_;
//// wait time for receiving data (on the order of milliseconds)
unsigned int wait_time_milliseconds_ = 0;
/** \brief Load a \ref PolygonMesh object given an input file name, based on the file extension
* \param[in] file_name the name of the file containing the polygon data
* \param[out] mesh the object that we want to load the data in
+ * \return Number of points in the point cloud of the mesh.
* \ingroup io
*/
PCL_EXPORTS int
/** \brief Load a VTK file into a \ref PolygonMesh object
* \param[in] file_name the name of the file that contains the data
* \param[out] mesh the object that we want to load the data in
+ * \return Number of points in the point cloud of the mesh.
* \ingroup io
*/
PCL_EXPORTS int
/** \brief Load a PLY file into a \ref PolygonMesh object
* \param[in] file_name the name of the file that contains the data
* \param[out] mesh the object that we want to load the data in
+ * \return Number of points in the point cloud of the mesh.
* \ingroup io
*/
PCL_EXPORTS int
/** \brief Load an OBJ file into a \ref PolygonMesh object
* \param[in] file_name the name of the file that contains the data
* \param[out] mesh the object that we want to load the data in
+ * \return Number of points in the point cloud of the mesh.
* \ingroup io
*/
PCL_EXPORTS int
* load the material information.
* \param[in] file_name the name of the file that contains the data
* \param[out] mesh the object that we want to load the data in
+ * \return Number of points in the point cloud of the mesh.
* \ingroup io
*/
PCL_EXPORTS int
/** \brief Load an STL file into a \ref PolygonMesh object
* \param[in] file_name the name of the file that contains the data
* \param[out] mesh the object that we want to load the data in
+ * \return Number of points in the point cloud of the mesh.
* \ingroup io
*/
PCL_EXPORTS int
*/
#include <pcl/common/io.h> // for getFieldSize
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/io/ascii_io.h>
#include <istream>
#include <fstream>
-#include <boost/filesystem.hpp>
+
#include <boost/lexical_cast.hpp> // for lexical_cast
#include <boost/algorithm/string.hpp> // for split
#include <cstdint>
{
pcl::utils::ignore(offset); //offset is not used for ascii file implementation
- boost::filesystem::path fpath = file_name;
+ pcl_fs::path fpath = file_name;
- if (!boost::filesystem::exists (fpath))
+ if (!pcl_fs::exists (fpath))
{
PCL_ERROR ("[%s] File %s does not exist.\n", name_.c_str (), file_name.c_str ());
return (-1);
#define ASSIGN_TOKEN(CASE_LABEL) \
case CASE_LABEL: { \
*(reinterpret_cast<pcl::traits::asType_t<CASE_LABEL>*>(data_target)) = \
- boost::lexical_cast<pcl::traits::asType_t<CASE_LABEL>>(token); \
+ boost::lexical_cast<pcl::traits::asType_t<(CASE_LABEL)>>(token); \
return sizeof(pcl::traits::asType_t<CASE_LABEL>); \
}
switch (field.datatype)
int
pcl::io::load (const std::string& file_name, pcl::PCLPointCloud2& blob)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".pcd")
int
pcl::io::load (const std::string& file_name, pcl::PolygonMesh& mesh)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".ply")
int
pcl::io::load (const std::string& file_name, pcl::TextureMesh& mesh)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".obj")
int
pcl::io::save (const std::string& file_name, const pcl::PCLPointCloud2& blob, unsigned precision)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".pcd")
int
pcl::io::save (const std::string &file_name, const pcl::TextureMesh &tex_mesh, unsigned precision)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".obj")
int
pcl::io::save (const std::string &file_name, const pcl::PolygonMesh &poly_mesh, unsigned precision)
{
- boost::filesystem::path p (file_name.c_str ());
+ pcl_fs::path p (file_name.c_str ());
std::string extension = p.extension ().string ();
int result = -1;
if (extension == ".ply")
boost::asio::ip::address
pcl::HDLGrabber::getDefaultNetworkAddress ()
{
- return (boost::asio::ip::address::from_string ("192.168.3.255"));
+ return (boost::asio::ip::make_address ("192.168.3.255"));
}
/////////////////////////////////////////////////////////////////////////////
#include <cstring>
#include <cerrno>
-#include <boost/filesystem.hpp> // for exists
#include <boost/iostreams/device/mapped_file.hpp> // for mapped_file_source
///////////////////////////////////////////////////////////////////////////////////////////
//cloud.is_dense = true;
std::uint32_t nr_points = 0;
+
+ if (file_name.empty ())
+ {
+ PCL_ERROR ("[pcl::IFSReader::readHeader] No file name given!\n");
+ return (-1);
+ }
+
std::ifstream fs;
+ fs.open (file_name.c_str (), std::ios::binary);
- if (file_name.empty() || !boost::filesystem::exists (file_name))
+ if (!fs.good ())
{
PCL_ERROR ("[pcl::IFSReader::readHeader] Could not find file '%s'.\n", file_name.c_str ());
return (-1);
}
- fs.open (file_name.c_str (), std::ios::binary);
if (!fs.is_open () || fs.fail ())
{
PCL_ERROR ("[pcl::IFSReader::readHeader] Could not open file '%s'! Error : %s\n", file_name.c_str (), strerror(errno));
*
*/
// Looking for PCL_BUILT_WITH_VTK
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/for_each_type.h>
#include <pcl/io/timestamp.h>
#include <pcl/io/image_grabber.h>
#include <pcl/memory.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <boost/filesystem.hpp> // for exists, basename, is_directory, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
#ifdef PCL_BUILT_WITH_VTK
void
pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles (const std::string &dir)
{
- if (!boost::filesystem::exists (dir) || !boost::filesystem::is_directory (dir))
+ if (!pcl_fs::exists (dir) || !pcl_fs::is_directory (dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadDepthAndRGBFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
" is not a directory: %s\n", dir.c_str ());
std::string pathname;
std::string extension;
std::string basename;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (dir); itr != end_itr; ++itr)
{
extension = boost::algorithm::to_upper_copy (itr->path ().extension ().string ());
pathname = itr->path ().string ();
basename = itr->path ().stem ().string ();
- if (!boost::filesystem::is_directory (itr->status ())
+ if (!pcl_fs::is_directory (itr->status ())
&& isValidExtension (extension))
{
if (basename.find ("rgb") < std::string::npos)
void
pcl::ImageGrabberBase::ImageGrabberImpl::loadDepthAndRGBFiles (const std::string &depth_dir, const std::string &rgb_dir)
{
- if (!boost::filesystem::exists (depth_dir) || !boost::filesystem::is_directory (depth_dir))
+ if (!pcl_fs::exists (depth_dir) || !pcl_fs::is_directory (depth_dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadDepthAndRGBFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
" is not a directory: %s\n", depth_dir.c_str ());
return;
}
- if (!boost::filesystem::exists (rgb_dir) || !boost::filesystem::is_directory (rgb_dir))
+ if (!pcl_fs::exists (rgb_dir) || !pcl_fs::is_directory (rgb_dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadDepthAndRGBFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
" is not a directory: %s\n", rgb_dir.c_str ());
std::string pathname;
std::string extension;
std::string basename;
- boost::filesystem::directory_iterator end_itr;
+ pcl_fs::directory_iterator end_itr;
// First iterate over depth images
- for (boost::filesystem::directory_iterator itr (depth_dir); itr != end_itr; ++itr)
+ for (pcl_fs::directory_iterator itr (depth_dir); itr != end_itr; ++itr)
{
extension = boost::algorithm::to_upper_copy (itr->path ().extension ().string ());
pathname = itr->path ().string ();
basename = itr->path ().stem ().string ();
- if (!boost::filesystem::is_directory (itr->status ())
+ if (!pcl_fs::is_directory (itr->status ())
&& isValidExtension (extension))
{
if (basename.find ("depth") < std::string::npos)
}
}
// Then iterate over RGB images
- for (boost::filesystem::directory_iterator itr (rgb_dir); itr != end_itr; ++itr)
+ for (pcl_fs::directory_iterator itr (rgb_dir); itr != end_itr; ++itr)
{
extension = boost::algorithm::to_upper_copy (itr->path ().extension ().string ());
pathname = itr->path ().string ();
basename = itr->path ().stem ().string ();
- if (!boost::filesystem::is_directory (itr->status ())
+ if (!pcl_fs::is_directory (itr->status ())
&& isValidExtension (extension))
{
if (basename.find ("rgb") < std::string::npos)
void
pcl::ImageGrabberBase::ImageGrabberImpl::loadPCLZFFiles (const std::string &dir)
{
- if (!boost::filesystem::exists (dir) || !boost::filesystem::is_directory (dir))
+ if (!pcl_fs::exists (dir) || !pcl_fs::is_directory (dir))
{
PCL_ERROR ("[pcl::ImageGrabber::loadPCLZFFiles] Error: attempted to instantiate a pcl::ImageGrabber from a path which"
" is not a directory: %s\n", dir.c_str ());
std::string pathname;
std::string extension;
std::string basename;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (dir); itr != end_itr; ++itr)
{
extension = boost::algorithm::to_upper_copy (itr->path ().extension ().string ());
pathname = itr->path ().string ();
basename = itr->path ().stem ().string ();
- if (!boost::filesystem::is_directory (itr->status ())
+ if (!pcl_fs::is_directory (itr->status ())
&& isValidExtension (extension))
{
if (basename.find ("rgb") < std::string::npos)
{
// For now, we assume the file is of the form frame_[22-char POSIX timestamp]_*
char timestamp_str[256];
- int result = std::sscanf (boost::filesystem::path (filepath).stem ().string ().c_str (),
+ int result = std::sscanf (pcl_fs::path (filepath).stem ().string ().c_str (),
"frame_%22s_%*s",
timestamp_str);
if (result > 0)
double &cx,
double &cy) const
{
- if (idx > depth_pclzf_files_.size ())
+ if (idx >= depth_pclzf_files_.size ())
{
return (false);
}
pathname = impl_->depth_pclzf_files_[impl_->cur_frame_];
else
pathname = impl_->depth_image_files_[impl_->cur_frame_];
- std::string basename = boost::filesystem::path (pathname).stem ().string ();
+ std::string basename = pcl_fs::path (pathname).stem ().string ();
return (basename);
}
//////////////////////////////////////////////////////////////////////////////////////////
pathname = impl_->depth_pclzf_files_[impl_->cur_frame_-1];
else
pathname = impl_->depth_image_files_[impl_->cur_frame_-1];
- std::string basename = boost::filesystem::path (pathname).stem ().string ();
+ std::string basename = pcl_fs::path (pathname).stem ().string ();
return (basename);
}
pathname = impl_->depth_pclzf_files_[idx];
else
pathname = impl_->depth_image_files_[idx];
- std::string basename = boost::filesystem::path (pathname).stem ().string ();
+ std::string basename = pcl_fs::path (pathname).stem ().string ();
return (basename);
}
// IDX works because it is very similar to a multiplicative hash, e.g.
// ((h * 57321 >> (3*8 - HLOG)) & ((1 << (HLOG)) - 1))
-#define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & ((1 << (HLOG)) - 1))
+#define IDX(h) ((( (h) >> (3*8 - HLOG)) - (h) ) & ((1 << (HLOG)) - 1))
///////////////////////////////////////////////////////////////////////////////////////////
//
#include <pcl/io/low_level_io.h>
#include <pcl/io/lzf_image_io.h>
#include <pcl/io/lzf.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <fcntl.h>
#include <cstring>
-#include <boost/filesystem.hpp>
+
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
std::vector<char> &data,
std::uint32_t &uncompressed_size)
{
- if (filename.empty() || !boost::filesystem::exists (filename))
+ if (filename.empty() || !pcl_fs::exists (filename))
{
PCL_ERROR ("[pcl::io::LZFImageReader::loadImage] Could not find file '%s'.\n", filename.c_str ());
return (false);
#include <pcl/io/obj_io.h>
#include <fstream>
#include <pcl/common/io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/time.h>
#include <pcl/io/split.h>
#include <boost/lexical_cast.hpp> // for lexical_cast
-#include <boost/filesystem.hpp> // for exists
#include <boost/algorithm/string.hpp> // for trim
pcl::MTLReader::MTLReader ()
pcl::MTLReader::read (const std::string& obj_file_name,
const std::string& mtl_file_name)
{
- if (obj_file_name.empty() || !boost::filesystem::exists (obj_file_name))
+ if (obj_file_name.empty ())
+ {
+ PCL_ERROR ("[pcl::MTLReader::read] No OBJ file name given!\n");
+ return (-1);
+ }
+
+ if (!pcl_fs::exists (obj_file_name))
{
PCL_ERROR ("[pcl::MTLReader::read] Could not find file '%s'!\n",
obj_file_name.c_str ());
return (-1);
}
- if (mtl_file_name.empty())
+ if (mtl_file_name.empty ())
{
PCL_ERROR ("[pcl::MTLReader::read] MTL file name is empty!\n");
return (-1);
}
- boost::filesystem::path obj_file_path (obj_file_name.c_str ());
- boost::filesystem::path mtl_file_path = obj_file_path.parent_path ();
+ pcl_fs::path obj_file_path (obj_file_name.c_str ());
+ pcl_fs::path mtl_file_path = obj_file_path.parent_path ();
mtl_file_path /= mtl_file_name;
return (read (mtl_file_path.string ()));
}
int
pcl::MTLReader::read (const std::string& mtl_file_path)
{
- if (mtl_file_path.empty() || !boost::filesystem::exists (mtl_file_path))
+ if (mtl_file_path.empty ())
{
- PCL_ERROR ("[pcl::MTLReader::read] Could not find file '%s'.\n", mtl_file_path.c_str ());
+ PCL_ERROR ("[pcl::MTLReader::read] No file name given!\n");
return (-1);
}
+ // Open file in binary mode to avoid problem of
+ // std::getline() corrupting the result of ifstream::tellg()
std::ifstream mtl_file;
mtl_file.open (mtl_file_path.c_str (), std::ios::binary);
+
+ if (!mtl_file.good ())
+ {
+ PCL_ERROR ("[pcl::MTLReader::read] Could not find file '%s'.\n", mtl_file_path.c_str ());
+ return (-1);
+ }
+
if (!mtl_file.is_open () || mtl_file.fail ())
{
PCL_ERROR ("[pcl::MTLReader::read] Could not open file '%s'! Error : %s\n",
std::string line;
std::vector<std::string> st;
- boost::filesystem::path parent_path = mtl_file_path.c_str ();
+ pcl_fs::path parent_path = mtl_file_path.c_str ();
parent_path = parent_path.parent_path ();
try
// Tokenize the line
pcl::split (st, line, "\t\r ");
- // Ignore comments
- if (st[0] == "#")
+ // Ignore comments and lines with only whitespace
+ if (st.empty() || st[0] == "#")
continue;
if (st[0] == "newmtl")
if (st[0] == "map_Kd")
{
- boost::filesystem::path full_path = parent_path;
+ pcl_fs::path full_path = parent_path;
full_path/= st.back ().c_str ();
materials_.back ().tex_file = full_path.string ();
continue;
data_type = 0;
data_idx = offset;
- std::ifstream fs;
std::string line;
- if (file_name.empty() || !boost::filesystem::exists (file_name))
+ if (file_name.empty ())
{
- PCL_ERROR ("[pcl::OBJReader::readHeader] Could not find file '%s'.\n", file_name.c_str ());
+ PCL_ERROR ("[pcl::OBJReader::readHeader] No file name given!\n");
return (-1);
}
// Open file in binary mode to avoid problem of
// std::getline() corrupting the result of ifstream::tellg()
+ std::ifstream fs;
fs.open (file_name.c_str (), std::ios::binary);
+
+ if (!fs.good ())
+ {
+ PCL_ERROR ("[pcl::OBJReader::readHeader] Could not find file '%s'.\n", file_name.c_str ());
+ return (-1);
+ }
+
if (!fs.is_open () || fs.fail ())
{
PCL_ERROR ("[pcl::OBJReader::readHeader] Could not open file '%s'! Error : %s\n", file_name.c_str (), strerror(errno));
// Get normal_x and rgba fields indices
int normal_x_field = -1;
+ std::vector<Eigen::Vector3f> normals;
+
+ //vector[idx of vertex]<accumulated normals{x, y, z}>
+ std::vector<Eigen::Vector3f> normal_mapping;
+ bool normal_mapping_used = false;
+
// std::size_t rgba_field = 0;
for (std::size_t i = 0; i < cloud.fields.size (); ++i)
if (cloud.fields[i].name == "normal_x")
{
+ normals.reserve(cloud.width);
+ normal_mapping.resize(cloud.width, Eigen::Vector3f::Zero());
normal_x_field = i;
break;
}
// Tokenize the line
pcl::split (st, line, "\t\r ");
- // Ignore comments
- if (st[0] == "#")
+ // Ignore comments and lines with only whitespace
+ if (st.empty() || st[0] == "#")
continue;
// Vertex
// Vertex normal
if (st[0] == "vn")
{
- if (normal_idx >= cloud.width)
- {
- if (normal_idx == cloud.width)
- PCL_WARN ("[pcl:OBJReader] Too many vertex normals (expected %d), skipping remaining normals.\n", cloud.width, normal_idx + 1);
- ++normal_idx;
- continue;
- }
try
{
- for (int i = 1, f = normal_x_field; i < 4; ++i, ++f)
- {
- float value = boost::lexical_cast<float> (st[i]);
- memcpy (&cloud.data[normal_idx * cloud.point_step + cloud.fields[f].offset],
- &value,
- sizeof (float));
- }
+ normals.emplace_back(
+ boost::lexical_cast<float> (st[1]),
+ boost::lexical_cast<float> (st[2]),
+ boost::lexical_cast<float> (st[3])
+ );
++normal_idx;
}
catch (const boost::bad_lexical_cast&)
}
continue;
}
+
+ // Face
+ if (st[0] == "f")
+ {
+ std::vector<std::string> f_st;
+ std::string n_st;
+
+ pcl::Vertices face_vertices; face_vertices.vertices.resize(st.size() - 1);
+ for (std::size_t i = 1; i < st.size (); ++i)
+ {
+ if (st[i].find("//") != std::string::npos)
+ {
+ //covers format v//vn
+ pcl::split(f_st, st[i], "//");
+ n_st = f_st[1];
+ }
+ else if (st[i].find('/') != std::string::npos)
+ {
+ //covers format v/vt/vn and v/vt
+ pcl::split(f_st, st[i], "/");
+ if (f_st.size() > 2)
+ n_st = f_st[2];
+ }
+ else
+ f_st = { st[i] };
+
+ int v = std::stoi(f_st[0]);
+ v = (v < 0) ? point_idx + v : v - 1;
+ face_vertices.vertices[i - 1] = v;
+
+ //handle normals
+ if (!n_st.empty())
+ {
+ int n = std::stoi(n_st);
+ n = (n < 0) ? normal_idx + n : n - 1;
+
+ normal_mapping[v] += normals[n];
+ normal_mapping_used = true;
+ }
+ }
+ continue;
+ }
}
}
catch (const char *exception)
return (-1);
}
+ if (normal_mapping_used && !normal_mapping.empty())
+ {
+ for (uindex_t i = 0, main_offset = 0; i < cloud.width; ++i, main_offset += cloud.point_step)
+ {
+ normal_mapping[i].normalize();
+
+ for (int j = 0, f = normal_x_field; j < 3; ++j, ++f)
+ memcpy(&cloud.data[main_offset + cloud.fields[f].offset], &normal_mapping[i][j], sizeof(float));
+ }
+ }
+ else if (cloud.width == normals.size())
+ {
+ // if obj file contains vertex normals (same number as vertices), but does not define faces,
+ // then associate vertices and vertex normals one-to-one
+ for (uindex_t i = 0, main_offset = 0; i < cloud.width; ++i, main_offset += cloud.point_step)
+ {
+ for (int j = 0, f = normal_x_field; j < 3; ++j, ++f)
+ memcpy(&cloud.data[main_offset + cloud.fields[f].offset], &normals[i][j], sizeof(float));
+ }
+ }
+
double total_time = tt.toc ();
PCL_DEBUG ("[pcl::OBJReader::read] Loaded %s as a dense cloud in %g ms with %d points. Available dimensions: %s.\n",
file_name.c_str (), total_time,
// Get normal_x and rgba fields indices
int normal_x_field = -1;
+ std::vector<Eigen::Vector3f> normals;
+
+ //vector[idx of vertex]<accumulated normals{x, y, z}>
+ std::vector<Eigen::Vector3f> normal_mapping;
+
// std::size_t rgba_field = 0;
for (std::size_t i = 0; i < mesh.cloud.fields.size (); ++i)
if (mesh.cloud.fields[i].name == "normal_x")
{
+ normals.reserve(mesh.cloud.width);
+ normal_mapping.resize(mesh.cloud.width, Eigen::Vector3f::Zero());
normal_x_field = i;
break;
}
std::size_t v_idx = 0;
std::size_t f_idx = 0;
+ std::size_t vt_idx = 0;
std::string line;
std::vector<std::string> st;
std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> > coordinates;
// Tokenize the line
pcl::split (st, line, "\t\r ");
- // Ignore comments
- if (st[0] == "#")
+ // Ignore comments and lines with only whitespace
+ if (st.empty() || st[0] == "#")
continue;
// Vertex
if (st[0] == "v")
{
try
{
- for (int i = 1, f = normal_x_field; i < 4; ++i, ++f)
- {
- float value = boost::lexical_cast<float> (st[i]);
- memcpy (&mesh.cloud.data[vn_idx * mesh.cloud.point_step + mesh.cloud.fields[f].offset],
- &value,
- sizeof (float));
- }
+ normals.emplace_back(
+ boost::lexical_cast<float> (st[1]),
+ boost::lexical_cast<float> (st[2]),
+ boost::lexical_cast<float> (st[3])
+ );
++vn_idx;
}
catch (const boost::bad_lexical_cast&)
coordinates.emplace_back(c[0], c[1]);
else
coordinates.emplace_back(c[0]/c[2], c[1]/c[2]);
+ ++vt_idx;
}
catch (const boost::bad_lexical_cast&)
{
// Face
if (st[0] == "f")
{
- // TODO read in normal indices properly
- pcl::Vertices face_v; face_v.vertices.resize (st.size () - 1);
+ std::vector<std::string> f_st;
+ std::string n_st;
+ std::string vt_st;
+
+ pcl::Vertices face_vertices; face_vertices.vertices.resize (st.size () - 1);
pcl::Vertices tex_indices; tex_indices.vertices.reserve (st.size () - 1);
for (std::size_t i = 1; i < st.size (); ++i)
{
- char* str_end;
- int v = std::strtol(st[i].c_str(), &str_end, 10);
+ if (st[i].find("//") != std::string::npos)
+ {
+ //covers format v//vn
+ pcl::split(f_st, st[i], "//");
+ n_st = f_st[1];
+ }
+ else if (st[i].find('/') != std::string::npos)
+ {
+ //covers format v/vt/vn and v/vt
+ pcl::split(f_st, st[i], "/");
+ if (f_st.size() > 1)
+ vt_st = f_st[1];
+
+ if (f_st.size() > 2)
+ n_st = f_st[2];
+ }
+ else
+ f_st = { st[i] };
+
+ int v = std::stoi(f_st[0]);
v = (v < 0) ? v_idx + v : v - 1;
- face_v.vertices[i-1] = v;
- if (str_end[0] == '/' && str_end[1] != '/' && str_end[1] != '\0')
+ face_vertices.vertices[i - 1] = v;
+
+ //handle normals
+ if (!n_st.empty())
{
- // texture coordinate indices are optional
- int tex_index = std::strtol(str_end+1, &str_end, 10);
- tex_indices.vertices.push_back (tex_index - 1);
+ int n = std::stoi(n_st);
+ n = (n < 0) ? vn_idx + n : n - 1;
+
+ normal_mapping[v] += normals[n];
+ }
+
+ if (!vt_st.empty())
+ {
+ int vt = std::stoi(vt_st);
+ vt = (vt < 0) ? vt_idx + vt : vt - 1;
+
+ tex_indices.vertices.push_back(vt);
}
}
- mesh.tex_polygons.back ().push_back (face_v);
+ mesh.tex_polygons.back ().push_back (face_vertices);
mesh.tex_coord_indices.back ().push_back (tex_indices);
++f_idx;
continue;
return (-1);
}
+ if (!normal_mapping.empty())
+ {
+ for (uindex_t i = 0, main_offset = 0; i < mesh.cloud.width; ++i, main_offset += mesh.cloud.point_step)
+ {
+ normal_mapping[i].normalize();
+
+ for (int j = 0, f = normal_x_field; j < 3; ++j, ++f)
+ memcpy(&mesh.cloud.data[main_offset + mesh.cloud.fields[f].offset], &normal_mapping[i][j], sizeof(float));
+ }
+ }
+
double total_time = tt.toc ();
PCL_DEBUG ("[pcl::OBJReader::read] Loaded %s as a TextureMesh in %g ms with %zu points, %zu texture materials, %zu polygons.\n",
file_name.c_str (), total_time,
// Get normal_x and rgba fields indices
int normal_x_field = -1;
+ std::vector<Eigen::Vector3f> normals;
+
+ //vector[idx of vertex]<accumulated normals{x, y, z}>
+ std::vector<Eigen::Vector3f> normal_mapping;
+ bool normal_mapping_used = false;
+
// std::size_t rgba_field = 0;
for (std::size_t i = 0; i < mesh.cloud.fields.size (); ++i)
if (mesh.cloud.fields[i].name == "normal_x")
{
+ normals.reserve(mesh.cloud.width);
+ normal_mapping.resize(mesh.cloud.width, Eigen::Vector3f::Zero());
normal_x_field = i;
break;
}
// Tokenize the line
pcl::split (st, line, "\t\r ");
- // Ignore comments
- if (st[0] == "#")
+ // Ignore comments and lines with only whitespace
+ if (st.empty() || st[0] == "#")
continue;
// Vertex
{
try
{
- for (int i = 1, f = normal_x_field; i < 4; ++i, ++f)
- {
- float value = boost::lexical_cast<float> (st[i]);
- memcpy (&mesh.cloud.data[vn_idx * mesh.cloud.point_step + mesh.cloud.fields[f].offset],
- &value,
- sizeof (float));
- }
+ normals.emplace_back(
+ boost::lexical_cast<float> (st[1]),
+ boost::lexical_cast<float> (st[2]),
+ boost::lexical_cast<float> (st[3])
+ );
++vn_idx;
}
catch (const boost::bad_lexical_cast&)
// Face
if (st[0] == "f")
{
+ std::vector<std::string> f_st;
+ std::string n_st;
+
pcl::Vertices face_vertices; face_vertices.vertices.resize (st.size () - 1);
for (std::size_t i = 1; i < st.size (); ++i)
{
- int v;
- sscanf (st[i].c_str (), "%d", &v);
+ if (st[i].find("//") != std::string::npos)
+ {
+ //covers format v//vn
+ pcl::split(f_st, st[i], "//");
+ n_st = f_st[1];
+ }
+ else if (st[i].find('/') != std::string::npos)
+ {
+ //covers format v/vt/vn and v/vt
+ pcl::split(f_st, st[i], "/");
+ if (f_st.size() > 2)
+ n_st = f_st[2];
+ }
+ else
+ f_st = { st[i] };
+
+ int v = std::stoi(f_st[0]);
v = (v < 0) ? v_idx + v : v - 1;
face_vertices.vertices[i - 1] = v;
+
+ //handle normals
+ if (!n_st.empty())
+ {
+ int n = std::stoi(n_st);
+ n = (n < 0) ? vn_idx + n : n - 1;
+
+ normal_mapping[v] += normals[n];
+ normal_mapping_used = true;
+ }
}
mesh.polygons.push_back (face_vertices);
continue;
return (-1);
}
+ if (normal_mapping_used && !normal_mapping.empty())
+ {
+ for (uindex_t i = 0, main_offset = 0; i < mesh.cloud.width; ++i, main_offset += mesh.cloud.point_step)
+ {
+ normal_mapping[i].normalize();
+
+ for (int j = 0, f = normal_x_field; j < 3; ++j, ++f)
+ memcpy(&mesh.cloud.data[main_offset + mesh.cloud.fields[f].offset], &normal_mapping[i][j], sizeof(float));
+ }
+ }
+ else if (mesh.cloud.width == normals.size())
+ {
+ // if obj file contains vertex normals (same number as vertices), but does not define faces,
+ // then associate vertices and vertex normals one-to-one
+ for (uindex_t i = 0, main_offset = 0; i < mesh.cloud.width; ++i, main_offset += mesh.cloud.point_step)
+ {
+ for (int j = 0, f = normal_x_field; j < 3; ++j, ++f)
+ memcpy(&mesh.cloud.data[main_offset + mesh.cloud.fields[f].offset], &normals[i][j], sizeof(float));
+ }
+ }
+
double total_time = tt.toc ();
PCL_DEBUG ("[pcl::OBJReader::read] Loaded %s as a PolygonMesh in %g ms with %zu points and %zu polygons.\n",
file_name.c_str (), total_time,
openni_device_.reset (new openni::Device);
- if (device_URI.length () > 0)
+ if (!device_URI.empty())
status = openni_device_->open (device_URI.c_str ());
else
status = openni_device_->open (openni::ANY_DEVICE);
bool supported = false;
- std::vector<OpenNI2VideoMode>::const_iterator it = ir_video_modes_.begin ();
- std::vector<OpenNI2VideoMode>::const_iterator it_end = ir_video_modes_.end ();
+ auto it = ir_video_modes_.cbegin ();
+ auto it_end = ir_video_modes_.cend ();
while (it != it_end && !supported)
{
bool supported = false;
- std::vector<OpenNI2VideoMode>::const_iterator it = color_video_modes_.begin ();
- std::vector<OpenNI2VideoMode>::const_iterator it_end = color_video_modes_.end ();
+ auto it = color_video_modes_.cbegin ();
+ auto it_end = color_video_modes_.cend ();
while (it != it_end && !supported)
{
bool supported = false;
- std::vector<OpenNI2VideoMode>::const_iterator it = depth_video_modes_.begin ();
- std::vector<OpenNI2VideoMode>::const_iterator it_end = depth_video_modes_.end ();
+ auto it = depth_video_modes_.cbegin ();
+ auto it_end = depth_video_modes_.cend ();
while (it != it_end && !supported)
{
{
double sum = 0;
- std::deque<double>::const_iterator it = buffer_.begin ();
- std::deque<double>::const_iterator it_end = buffer_.end ();
+ auto it = buffer_.cbegin ();
+ auto it_end = buffer_.cend ();
while (it != it_end)
{
#include <pcl/io/openni2/openni2_device.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/time.h>
#include <pcl/console/print.h>
#include <pcl/exceptions.h>
#include <iostream>
-#include <boost/filesystem.hpp> // for exists
using namespace pcl::io::openni2;
try
{
- if (boost::filesystem::exists (device_id))
+ if (pcl_fs::exists (device_id))
{
device_ = deviceManager->getFileDevice (device_id); // Treat as file path
}
{
libusb_device* device = devices[devIdx];
std::uint8_t busId = libusb_get_bus_number (device);
- std::map<unsigned char, std::map<unsigned char, unsigned> >::const_iterator busIt = bus_map_.find (busId);
+ auto busIt = bus_map_.find (busId);
if (busIt == bus_map_.end ())
continue;
#include <pcl/io/openni_grabber.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/common/time.h>
#include <pcl/console/print.h>
#include <pcl/exceptions.h>
#include <iostream>
#include <thread>
-#include <boost/filesystem.hpp> // for exists
using namespace std::chrono_literals;
try
{
- if (boost::filesystem::exists (device_id))
+ if (pcl_fs::exists (device_id))
{
device_ = driver.createVirtualDevice (device_id, true, true);
}
#include <cstdlib>
#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/common/io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/io/low_level_io.h>
#include <pcl/io/lzf.h>
#include <pcl/io/pcd_io.h>
#include <cstring>
#include <cerrno>
-#include <boost/filesystem.hpp> // for permissions
///////////////////////////////////////////////////////////////////////////////////////////
void
else
PCL_DEBUG ("[pcl::PCDWriter::setLockingPermissions] File %s could not be locked!\n", file_name.c_str ());
- namespace fs = boost::filesystem;
try
{
- fs::permissions (fs::path (file_name), fs::add_perms | fs::set_gid_on_exe);
+#ifdef PCL_USING_STD_FILESYSTEM
+ pcl_fs::permissions (pcl_fs::path (file_name), pcl_fs::perms::set_gid, pcl_fs::perm_options::add);
+#else
+ pcl_fs::permissions (pcl_fs::path (file_name), pcl_fs::add_perms | pcl_fs::set_gid_on_exe);
+#endif
}
catch (const std::exception &e)
{
pcl::utils::ignore(file_name, lock);
#ifndef _WIN32
#ifndef NO_MANDATORY_LOCKING
- namespace fs = boost::filesystem;
try
{
- fs::permissions (fs::path (file_name), fs::remove_perms | fs::set_gid_on_exe);
+#ifdef PCL_USING_STD_FILESYSTEM
+ pcl_fs::permissions (pcl_fs::path (file_name), pcl_fs::perms::set_gid, pcl_fs::perm_options::remove);
+#else
+ pcl_fs::permissions (pcl_fs::path (file_name), pcl_fs::remove_perms | pcl_fs::set_gid_on_exe);
+#endif
}
catch (const std::exception &e)
{
// Read the header + comments line by line until we get to <DATA>
if (line_type.substr (0, 4) == "DATA")
{
- data_idx = static_cast<int> (fs.tellg ());
if (st.at (1).substr (0, 17) == "binary_compressed")
- data_type = 2;
- else
- if (st.at (1).substr (0, 6) == "binary")
- data_type = 1;
- continue;
+ data_type = 2;
+ else if (st.at (1).substr (0, 6) == "binary")
+ data_type = 1;
+ else if (st.at (1).substr (0, 5) == "ascii")
+ data_type = 0;
+ else {
+ PCL_WARN("[pcl::PCDReader::readHeader] Unknown DATA format: %s\n", line.c_str());
+ continue;
+ }
+ data_idx = static_cast<int> (fs.tellg ());
}
- break;
+ break; // DATA is the last header entry, everything after it will be interpreted as point cloud data
}
}
catch (const char *exception)
Eigen::Vector4f &origin, Eigen::Quaternionf &orientation,
int &pcd_version, int &data_type, unsigned int &data_idx, const int offset)
{
- if (file_name.empty() || !boost::filesystem::exists (file_name))
+ if (file_name.empty ())
{
- PCL_ERROR ("[pcl::PCDReader::readHeader] Could not find file '%s'.\n", file_name.c_str ());
+ PCL_ERROR ("[pcl::PCDReader::readHeader] No file name given!\n");
return (-1);
}
// std::getline() corrupting the result of ifstream::tellg()
std::ifstream fs;
fs.open (file_name.c_str (), std::ios::binary);
+
+ if (!fs.good ())
+ {
+ PCL_ERROR ("[pcl::PCDReader::readHeader] Could not find file '%s'.\n", file_name.c_str ());
+ return (-1);
+ }
+
if (!fs.is_open () || fs.fail ())
{
PCL_ERROR ("[pcl::PCDReader::readHeader] Could not open file '%s'! Error : %s\n", file_name.c_str (), strerror (errno));
{
#define COPY_STRING(CASE_LABEL) \
case CASE_LABEL: { \
- copyStringValue<pcl::traits::asType_t<CASE_LABEL>>( \
+ copyStringValue<pcl::traits::asType_t<(CASE_LABEL)>>( \
st.at(total + c), cloud, idx, d, c, is); \
break; \
}
{
for (uindex_t c = 0; c < cloud.fields[d].count; ++c)
{
-#define SET_CLOUD_DENSE(CASE_LABEL) \
- case CASE_LABEL: { \
- if (!isValueFinite<pcl::traits::asType_t<CASE_LABEL>>(cloud, i, point_size, d, c)) \
- cloud.is_dense = false; \
- break; \
+#define SET_CLOUD_DENSE(CASE_LABEL) \
+ case CASE_LABEL: { \
+ if (!isValueFinite<pcl::traits::asType_t<(CASE_LABEL)>>(cloud, i, point_size, d, c)) \
+ cloud.is_dense = false; \
+ break; \
}
switch (cloud.fields[d].datatype)
{
pcl::console::TicToc tt;
tt.tic ();
- if (file_name.empty() || !boost::filesystem::exists (file_name))
+ if (file_name.empty ())
+ {
+ PCL_ERROR ("[pcl::PCDReader::read] No file name given!\n");
+ return (-1);
+ }
+
+ if (!pcl_fs::exists (file_name))
{
PCL_ERROR ("[pcl::PCDReader::read] Could not find file '%s'.\n", file_name.c_str ());
return (-1);
{
#define COPY_VALUE(CASE_LABEL) \
case CASE_LABEL: { \
- copyValueString<pcl::traits::asType_t<CASE_LABEL>>( \
+ copyValueString<pcl::traits::asType_t<(CASE_LABEL)>>( \
cloud, i, point_size, d, c, stream); \
break; \
}
#include <pcl/point_types.h>
#include <pcl/common/io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/io/ply_io.h>
#include <algorithm>
#include <string>
#include <tuple>
-// https://www.boost.org/doc/libs/1_70_0/libs/filesystem/doc/index.htm#Coding-guidelines
-#define BOOST_FILESYSTEM_NO_DEPRECATED
-#include <boost/filesystem.hpp>
#include <boost/algorithm/string.hpp> // for split
-namespace fs = boost::filesystem;
-
std::tuple<std::function<void ()>, std::function<void ()> >
pcl::PLYReader::elementDefinitionCallback (const std::string& element_name, std::size_t count)
{
pcl::PLYReader::vertexIntensityCallback (pcl::io::ply::uint8 intensity)
{
pcl::io::ply::float32 intensity_ (intensity);
- cloud_->at<pcl::io::ply::float32>(vertex_count_, vertex_offset_before_) = intensity_;
- vertex_offset_before_ += static_cast<int> (sizeof (pcl::io::ply::float32));
+ try
+ {
+ cloud_->at<pcl::io::ply::float32>(vertex_count_, vertex_offset_before_) = intensity_;
+ vertex_offset_before_ += static_cast<int> (sizeof (pcl::io::ply::float32));
+ }
+ catch(const std::out_of_range&)
+ {
+ PCL_WARN ("[pcl::PLYReader::vertexIntensityCallback] Incorrect data index specified (%lu)!\n", vertex_count_ * cloud_->point_step + vertex_offset_before_);
+ assert(false);
+ }
}
void
void
pcl::PLYReader::rangeGridBeginCallback ()
{
- range_grid_->push_back (std::vector <int> ());
+ range_grid_->emplace_back();
}
void
void
pcl::PLYReader::faceBeginCallback ()
{
- polygons_->push_back (pcl::Vertices ());
+ polygons_->emplace_back();
}
void
int data_type;
unsigned int data_idx;
- if (!fs::exists (file_name))
+ if (!pcl_fs::exists (file_name))
{
PCL_ERROR ("[pcl::PLYReader::read] File (%s) not found!\n",file_name.c_str ());
return (-1);
unsigned int data_idx;
polygons_ = &(mesh.polygons);
- if (!fs::exists (file_name))
+ if (!pcl_fs::exists (file_name))
{
PCL_ERROR ("[pcl::PLYReader::read] File (%s) not found!\n",file_name.c_str ());
return (-1);
////////////////////////////////////////////////////////////////////////////////////////
int
-pcl::PLYWriter::writeBinary (const std::string &file_name,
- const pcl::PCLPointCloud2 &cloud,
- const Eigen::Vector4f &origin,
- const Eigen::Quaternionf &orientation,
+pcl::PLYWriter::writeBinary (const std::string& file_name,
+ const pcl::PCLPointCloud2& cloud,
+ const Eigen::Vector4f& origin,
+ const Eigen::Quaternionf& orientation,
bool use_camera)
{
if (cloud.data.empty ())
}
std::ofstream fs;
- fs.open (file_name.c_str ()); // Open file
+ fs.open (file_name.c_str (),
+ std::ios::out | std::ios::binary |
+ std::ios::trunc); // Open file in binary mode and erase current contents
+ // if any
if (!fs)
{
- PCL_ERROR ("[pcl::PLYWriter::writeBinary] Error during opening (%s)!\n", file_name.c_str ());
+ PCL_ERROR ("[pcl::PLYWriter::writeBinary] Error during opening (%s)!\n",
+ file_name.c_str ());
return (-1);
}
- unsigned int nr_points = cloud.width * cloud.height;
+ if (!(writeBinary (fs, cloud, origin, orientation, use_camera) == 0))
+ {
+ fs.close();
+ return -1;
+ }
+ fs.close();
+ return 0;
+}
+
+int
+pcl::PLYWriter::writeBinary (std::ostream& os,
+ const pcl::PCLPointCloud2& cloud,
+ const Eigen::Vector4f& origin,
+ const Eigen::Quaternionf& orientation,
+ bool use_camera)
+{
+ os.imbue(std::locale::classic());
+
+ unsigned int nr_points = cloud.width * cloud.height;
// Compute the range_grid, if necessary, and then write out the PLY header
bool doRangeGrid = !use_camera && cloud.height > 1;
// If no x-coordinate field exists, then assume all points are valid
if (xfield < 0)
{
- for (unsigned int i=0; i < nr_points; ++i)
+ for (unsigned int i = 0; i < nr_points; ++i)
rangegrid[i] = i;
valid_points = nr_points;
}
// Otherwise, look at their x-coordinates to determine if points are valid
else
{
- for (std::size_t i=0; i < nr_points; ++i)
+ for (std::size_t i = 0; i < nr_points; ++i)
{
- const float& value = cloud.at<float>(i, cloud.fields[xfield].offset);
- if (std::isfinite(value))
+ const float& value = cloud.at<float> (i, cloud.fields[xfield].offset);
+ if (std::isfinite (value))
{
rangegrid[i] = valid_points;
++valid_points;
rangegrid[i] = -1;
}
}
- fs << generateHeader (cloud, origin, orientation, true, use_camera, valid_points);
+ os << generateHeader (cloud, origin, orientation, true, use_camera, valid_points);
}
else
{
- fs << generateHeader (cloud, origin, orientation, true, use_camera, nr_points);
- }
-
- // Close the file
- fs.close ();
- // Open file in binary appendable
- std::ofstream fpout (file_name.c_str (), std::ios::app | std::ios::binary);
- if (!fpout)
- {
- PCL_ERROR ("[pcl::PLYWriter::writeBinary] Error during reopening (%s)!\n", file_name.c_str ());
- return (-1);
+ os << generateHeader (cloud, origin, orientation, true, use_camera, nr_points);
}
// Iterate through the points
{
int count = cloud.fields[d].count;
if (count == 0)
- count = 1; //workaround
+ count = 1; // workaround
if (count > 1)
{
static unsigned int ucount (count);
- fpout.write (reinterpret_cast<const char*> (&ucount), sizeof (unsigned int));
+ os.write (reinterpret_cast<const char*> (&ucount), sizeof (unsigned int));
}
// Ignore invalid padded dimensions that are inherited from binary data
if (cloud.fields[d].name == "_")
{
- total += cloud.fields[d].count; // jump over this many elements in the string token
+ total +=
+ cloud.fields[d].count; // jump over this many elements in the string token
continue;
}
{
switch (cloud.fields[d].datatype)
{
- case pcl::PCLPointField::INT8:
- {
- fpout.write (&cloud.at<char>(i, cloud.fields[d].offset + (total + c) * sizeof (char)), sizeof (char));
- break;
- }
- case pcl::PCLPointField::UINT8:
- {
- fpout.write (reinterpret_cast<const char*> (&cloud.at<unsigned char>(i, cloud.fields[d].offset + (total + c) * sizeof (unsigned char))), sizeof (unsigned char));
- break;
- }
- case pcl::PCLPointField::INT16:
- {
- fpout.write (reinterpret_cast<const char*> (&cloud.at<short>(i, cloud.fields[d].offset + (total + c) * sizeof (short))), sizeof (short));
- break;
- }
- case pcl::PCLPointField::UINT16:
- {
- fpout.write (reinterpret_cast<const char*> (&cloud.at<unsigned short>(i, cloud.fields[d].offset + (total + c) * sizeof (unsigned short))), sizeof (unsigned short));
- break;
- }
- case pcl::PCLPointField::INT32:
+ case pcl::PCLPointField::INT8:
+ {
+ os.write (
+ &cloud.at<char> (i, cloud.fields[d].offset + (total + c) * sizeof (char)),
+ sizeof (char));
+ break;
+ }
+ case pcl::PCLPointField::UINT8:
+ {
+ os.write (
+ reinterpret_cast<const char*> (&cloud.at<unsigned char> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (unsigned char))),
+ sizeof (unsigned char));
+ break;
+ }
+ case pcl::PCLPointField::INT16:
+ {
+ os.write (reinterpret_cast<const char*> (&cloud.at<short> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (short))),
+ sizeof (short));
+ break;
+ }
+ case pcl::PCLPointField::UINT16:
+ {
+ os.write (
+ reinterpret_cast<const char*> (&cloud.at<unsigned short> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (unsigned short))),
+ sizeof (unsigned short));
+ break;
+ }
+ case pcl::PCLPointField::INT32:
+ {
+ os.write (reinterpret_cast<const char*> (&cloud.at<int> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (int))),
+ sizeof (int));
+ break;
+ }
+ case pcl::PCLPointField::UINT32:
+ {
+ if (cloud.fields[d].name.find ("rgba") == std::string::npos)
{
- fpout.write (reinterpret_cast<const char*> (&cloud.at<int>(i, cloud.fields[d].offset + (total + c) * sizeof (int))), sizeof (int));
- break;
+ os.write (
+ reinterpret_cast<const char*> (&cloud.at<unsigned int> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (unsigned int))),
+ sizeof (unsigned int));
}
- case pcl::PCLPointField::UINT32:
+ else
{
- if (cloud.fields[d].name.find ("rgba") == std::string::npos)
- {
- fpout.write (reinterpret_cast<const char*> (&cloud.at<unsigned int>(i, cloud.fields[d].offset + (total + c) * sizeof (unsigned int))), sizeof (unsigned int));
- }
- else
- {
- const auto& color = cloud.at<pcl::RGB>(i, cloud.fields[d].offset + (total + c) * sizeof (pcl::RGB));
- fpout.write (reinterpret_cast<const char*> (&color.r), sizeof (unsigned char));
- fpout.write (reinterpret_cast<const char*> (&color.g), sizeof (unsigned char));
- fpout.write (reinterpret_cast<const char*> (&color.b), sizeof (unsigned char));
- fpout.write (reinterpret_cast<const char*> (&color.a), sizeof (unsigned char));
- }
- break;
+ const auto& color = cloud.at<pcl::RGB> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (pcl::RGB));
+ os.write (reinterpret_cast<const char*> (&color.r), sizeof (unsigned char));
+ os.write (reinterpret_cast<const char*> (&color.g), sizeof (unsigned char));
+ os.write (reinterpret_cast<const char*> (&color.b), sizeof (unsigned char));
+ os.write (reinterpret_cast<const char*> (&color.a), sizeof (unsigned char));
}
- case pcl::PCLPointField::FLOAT32:
+ break;
+ }
+ case pcl::PCLPointField::FLOAT32:
+ {
+ if (cloud.fields[d].name.find ("rgb") == std::string::npos)
{
- if (cloud.fields[d].name.find ("rgb") == std::string::npos)
- {
- fpout.write (reinterpret_cast<const char*> (&cloud.at<float>(i, cloud.fields[d].offset + (total + c) * sizeof (float))), sizeof (float));
- }
- else
- {
- const auto& color = cloud.at<pcl::RGB>(i, cloud.fields[d].offset + (total + c) * sizeof (pcl::RGB));
- fpout.write (reinterpret_cast<const char*> (&color.r), sizeof (unsigned char));
- fpout.write (reinterpret_cast<const char*> (&color.g), sizeof (unsigned char));
- fpout.write (reinterpret_cast<const char*> (&color.b), sizeof (unsigned char));
- }
- break;
+ os.write (reinterpret_cast<const char*> (&cloud.at<float> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (float))),
+ sizeof (float));
}
- case pcl::PCLPointField::FLOAT64:
+ else
{
- fpout.write (reinterpret_cast<const char*> (&cloud.at<double>(i, cloud.fields[d].offset + (total + c) * sizeof (double))), sizeof (double));
- break;
+ const auto& color = cloud.at<pcl::RGB> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (pcl::RGB));
+ os.write (reinterpret_cast<const char*> (&color.r), sizeof (unsigned char));
+ os.write (reinterpret_cast<const char*> (&color.g), sizeof (unsigned char));
+ os.write (reinterpret_cast<const char*> (&color.b), sizeof (unsigned char));
}
- default:
- PCL_WARN ("[pcl::PLYWriter::writeBinary] Incorrect field data type specified (%d)!\n", cloud.fields[d].datatype);
- break;
+ break;
+ }
+ case pcl::PCLPointField::FLOAT64:
+ {
+ os.write (reinterpret_cast<const char*> (&cloud.at<double> (
+ i, cloud.fields[d].offset + (total + c) * sizeof (double))),
+ sizeof (double));
+ break;
+ }
+ default:
+ PCL_WARN ("[pcl::PLYWriter::writeBinary] Incorrect field data type specified "
+ "(%d)!\n",
+ cloud.fields[d].datatype);
+ break;
}
}
}
for (int i = 0; i < 3; ++i)
{
if (origin[3] != 0)
- t = origin[i]/origin[3];
+ t = origin[i] / origin[3];
else
t = origin[i];
- fpout.write (reinterpret_cast<const char*> (&t), sizeof (float));
+ os.write (reinterpret_cast<const char*> (&t), sizeof (float));
}
Eigen::Matrix3f R = orientation.toRotationMatrix ();
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
- {
- fpout.write (reinterpret_cast<const char*> (&R (i, j)),sizeof (float));
- }
+ {
+ os.write (reinterpret_cast<const char*> (&R (i, j)), sizeof (float));
+ }
/////////////////////////////////////////////////////
// Append those properties directly. //
const float zerof = 0;
for (int i = 0; i < 5; ++i)
- fpout.write (reinterpret_cast<const char*> (&zerof), sizeof (float));
+ os.write (reinterpret_cast<const char*> (&zerof), sizeof (float));
// width and height
int width = cloud.width;
- fpout.write (reinterpret_cast<const char*> (&width), sizeof (int));
+ os.write (reinterpret_cast<const char*> (&width), sizeof (int));
int height = cloud.height;
- fpout.write (reinterpret_cast<const char*> (&height), sizeof (int));
+ os.write (reinterpret_cast<const char*> (&height), sizeof (int));
for (int i = 0; i < 2; ++i)
- fpout.write (reinterpret_cast<const char*> (&zerof), sizeof (float));
+ os.write (reinterpret_cast<const char*> (&zerof), sizeof (float));
}
else if (doRangeGrid)
{
// Write out range_grid
- for (std::size_t i=0; i < nr_points; ++i)
+ for (std::size_t i = 0; i < nr_points; ++i)
{
pcl::io::ply::uint8 listlen;
if (rangegrid[i] >= 0)
{
listlen = 1;
- fpout.write (reinterpret_cast<const char*> (&listlen), sizeof (pcl::io::ply::uint8));
- fpout.write (reinterpret_cast<const char*> (&rangegrid[i]), sizeof (pcl::io::ply::int32));
+ os.write (reinterpret_cast<const char*> (&listlen),
+ sizeof (pcl::io::ply::uint8));
+ os.write (reinterpret_cast<const char*> (&rangegrid[i]),
+ sizeof (pcl::io::ply::int32));
}
else
{
listlen = 0;
- fpout.write (reinterpret_cast<const char*> (&listlen), sizeof (pcl::io::ply::uint8));
+ os.write (reinterpret_cast<const char*> (&listlen),
+ sizeof (pcl::io::ply::uint8));
}
}
}
// Close file
- fpout.close ();
+
return (0);
}
signal_PointXYZRGBA->num_slots () == 0)
return;
+ // cache stream options
+ const bool color_requested = signal_PointXYZRGB->num_slots () > 0 || signal_PointXYZRGBA->num_slots () > 0;
+ const bool ir_requested = signal_PointXYZI->num_slots () > 0;
+
running_ = true;
quit_ = false;
{
cfg.enable_device_from_file ( file_name_or_serial_number_, repeat_playback_ );
}
+ // capture from camera
else
{
+ // find by serial number if provided
if (!file_name_or_serial_number_.empty ())
cfg.enable_device ( file_name_or_serial_number_ );
- if (signal_PointXYZRGB->num_slots () > 0 || signal_PointXYZRGBA->num_slots () > 0)
- {
+ // enable camera streams as requested
+ if (color_requested)
cfg.enable_stream ( RS2_STREAM_COLOR, device_width_, device_height_, RS2_FORMAT_RGB8, target_fps_ );
- }
cfg.enable_stream ( RS2_STREAM_DEPTH, device_width_, device_height_, RS2_FORMAT_Z16, target_fps_ );
- if (signal_PointXYZI->num_slots () > 0)
- {
+ if (ir_requested)
cfg.enable_stream ( RS2_STREAM_INFRARED, device_width_, device_height_, RS2_FORMAT_Y8, target_fps_ );
- }
}
rs2::pipeline_profile prof = pipe_.start ( cfg );
- if ( prof.get_stream ( RS2_STREAM_COLOR ).format ( ) != RS2_FORMAT_RGB8 ||
+ // check all requested streams started properly
+ if ( (color_requested && prof.get_stream ( RS2_STREAM_COLOR ).format ( ) != RS2_FORMAT_RGB8) ||
prof.get_stream ( RS2_STREAM_DEPTH ).format ( ) != RS2_FORMAT_Z16 ||
- prof.get_stream ( RS2_STREAM_INFRARED ).format ( ) != RS2_FORMAT_Y8 )
+ (ir_requested && prof.get_stream (RS2_STREAM_INFRARED ).format ( ) != RS2_FORMAT_Y8) )
THROW_IO_EXCEPTION ( "This stream type or format not supported." );
thread_ = std::thread ( &RealSense2Grabber::threadFunction, this );
pcl::RobotEyeGrabber::socketThreadLoop ()
{
asyncSocketReceive();
- io_service_.reset();
+ io_service_.restart();
io_service_.run();
}
try {
boost::asio::ip::tcp::resolver resolver (tim_io_service_);
- tcp_endpoint_ = *resolver.resolve (tcp_endpoint_);
- tim_socket_.connect (tcp_endpoint_);
+ boost::asio::ip::tcp::resolver::results_type endpoints = resolver.resolve (tcp_endpoint_);
+ boost::asio::connect(tim_socket_, endpoints);
}
catch (std::exception& e)
{
boost::asio::ip::address
pcl::VLPGrabber::getDefaultNetworkAddress ()
{
- return (boost::asio::ip::address::from_string ("255.255.255.255"));
+ return (boost::asio::ip::make_address ("255.255.255.255"));
}
/////////////////////////////////////////////////////////////////////////////
if (poly_data->GetPoints () == nullptr)
{
- PCL_ERROR ("[pcl::io::vtk2mesh] Given vtkPolyData is misformed (contains nullpointer instead of points).\n");
+ PCL_ERROR ("[pcl::io::vtk2mesh] Given vtkPolyData is malformed (contains nullpointer instead of points).\n");
return (0);
}
vtkSmartPointer<vtkPoints> mesh_points = poly_data->GetPoints ();
{
float tex[2];
texture_coords->GetTupleValue (i, tex);
- mesh.tex_coordinates.front ().push_back (Eigen::Vector2f (tex[0], tex[1]));
+ mesh.tex_coordinates.front ().emplace_back(tex[0], tex[1]);
}
}
else
for (int x = 0; x < dims[0]; x++)
{
float* pixel = static_cast<float*>(image->GetScalarPointer(x,y,0));
- pixel[0] = range_image(y,x).range;
+ *pixel = range_image(x,y).range;
}
}
set(SUBSYS_DESC "Point cloud kd-tree library")
set(SUBSYS_DEPS common)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} EXT_DEPS flann)
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_common FLANN::FLANN)
set(EXT_DEPS flann)
return (min_pts_);
}
+ /** \brief Gets whether the results should be sorted (ascending in the distance) or not
+ * Otherwise the results may be returned in any order.
+ */
+ inline bool
+ getSortedResults () const
+ {
+ return (sorted_);
+ }
+
protected:
/** \brief The input point cloud dataset containing the points we need to use. */
PointCloudConstPtr input_;
} // namespace detail
/**
- * @brief Comaptibility template function to allow use of various types of indices with
+ * @brief Compatibility template function to allow use of various types of indices with
* FLANN
* @details Template is used for all params to not constrain any FLANN side capability
* @param[in,out] index A index searcher, of type ::flann::Index<Dist> or similar, where
const SearchParams& params);
/**
- * @brief Comaptibility template function to allow use of various types of indices with
+ * @brief Compatibility template function to allow use of various types of indices with
* FLANN
* @details Template is used for all params to not constrain any FLANN side capability
* @param[in,out] index A index searcher, of type ::flann::Index<Dist> or similar, where
set(SUBSYS_DESC "Point cloud keypoints library")
set(SUBSYS_DEPS common search kdtree octree features filters)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS OpenMP)
"include/pcl/${SUBSYS_NAME}/keypoint.h"
"include/pcl/${SUBSYS_NAME}/narf_keypoint.h"
"include/pcl/${SUBSYS_NAME}/sift_keypoint.h"
- "include/pcl/${SUBSYS_NAME}/uniform_sampling.h"
"include/pcl/${SUBSYS_NAME}/smoothed_surfaces_keypoint.h"
"include/pcl/${SUBSYS_NAME}/agast_2d.h"
"include/pcl/${SUBSYS_NAME}/harris_2d.h"
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_features pcl_filters)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#pragma once
-
-PCL_DEPRECATED_HEADER(1, 15, "UniformSampling is not a Keypoint anymore, use <pcl/filters/uniform_sampling.h> instead.")
-
-#include <pcl/filters/uniform_sampling.h>
return (static_cast<float>(coeff6) / 18.0f);
}
- if (!(H_det > 0 && coeff1 < 0))
+ if (H_det <= 0 || coeff1 >= 0)
{
// The maximum must be at the one of the 4 patch corners.
int tmp_max = coeff3 + coeff4 + coeff5;
static_cast<int> (pcl_lrint (std::floor ( (angle+deg2rad (90.0f))/deg2rad (180.0f) * angle_histogram_size))));
float& histogram_value = angle_histogram[histogram_cell];
histogram_value = (std::max) (histogram_value, surface_change_score);
- angle_elements[histogram_cell].push_back (std::make_pair(index2, surface_change_score));
+ angle_elements[histogram_cell].emplace_back(index2, surface_change_score);
}
// Reset was_touched to false
set(SUBSYS_DESC "Point cloud machine learning library")
set(SUBSYS_DEPS common)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
SET_TARGET_PROPERTIES("${LIB_NAME}" PROPERTIES LINKER_LANGUAGE CXX)
target_link_libraries("${LIB_NAME}" pcl_common)
#include <limits> // for numeric_limits
#include <string> // for string
#include <vector>
+// NOLINTNEXTLINE(bugprone-macro-parentheses)
#define Malloc(type, n) static_cast<type*>(malloc((n) * sizeof(type)))
namespace pcl {
#define INF HUGE_VAL
#define TAU 1e-12
+// NOLINTBEGIN(bugprone-macro-parentheses)
#define Malloc(type, n) static_cast<type*>(malloc((n) * sizeof(type)))
#define Realloc(var, type, n) static_cast<type*>(realloc(var, (n) * sizeof(type)))
+// NOLINTEND(bugprone-macro-parentheses)
static void
print_string_stdout(const char* s)
set(SUBSYS_DESC "Point cloud octree library")
set(SUBSYS_DEPS common)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
)
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/octree_base.h"
"include/pcl/${SUBSYS_NAME}/octree_container.h"
"include/pcl/${SUBSYS_NAME}/octree_impl.h"
set(LIB_NAME "pcl_${SUBSYS_NAME}")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_common)
-target_include_directories(${LIB_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install include files
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- *
- */
-
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-
-// Marking all Boost headers as system headers to remove warnings
-#include <boost/graph/adjacency_list.hpp>
leaf_count_ = 0;
// iterator for binary tree structure vector
- std::vector<char>::const_iterator binary_tree_in_it = binary_tree_in_arg.begin();
- std::vector<char>::const_iterator binary_tree_in_it_end = binary_tree_in_arg.end();
+ auto binary_tree_in_it = binary_tree_in_arg.cbegin();
+ auto binary_tree_in_it_end = binary_tree_in_arg.cend();
deserializeTreeRecursive(root_node_,
depth_mask_,
OctreeKey new_key;
// set data iterator to first element
- typename std::vector<LeafContainerT*>::const_iterator leaf_container_vector_it =
- leaf_container_vector_arg.begin();
+ auto leaf_container_vector_it = leaf_container_vector_arg.cbegin();
// set data iterator to last element
- typename std::vector<LeafContainerT*>::const_iterator leaf_container_vector_it_end =
- leaf_container_vector_arg.end();
+ auto leaf_container_vector_it_end = leaf_container_vector_arg.cend();
// we will rebuild an octree -> reset leafCount
leaf_count_ = 0;
// iterator for binary tree structure vector
- std::vector<char>::const_iterator binary_tree_in_it = binary_tree_in_arg.begin();
- std::vector<char>::const_iterator binary_tree_in_it_end = binary_tree_in_arg.end();
+ auto binary_tree_in_it = binary_tree_in_arg.cbegin();
+ auto binary_tree_in_it_end = binary_tree_in_arg.cend();
deserializeTreeRecursive(root_node_,
depth_mask_,
deleteTree();
// iterator for binary tree structure vector
- std::vector<char>::const_iterator binary_tree_out_it = binary_tree_out_arg.begin();
- std::vector<char>::const_iterator binary_tree_out_it_end = binary_tree_out_arg.end();
+ auto binary_tree_out_it = binary_tree_out_arg.cbegin();
+ auto binary_tree_out_it_end = binary_tree_out_arg.cend();
deserializeTreeRecursive(root_node_,
depth_mask_,
OctreeKey new_key;
// set data iterator to first element
- typename std::vector<LeafContainerT*>::const_iterator leaf_vector_it =
- leaf_container_vector_arg.begin();
+ auto leaf_vector_it = leaf_container_vector_arg.cbegin();
// set data iterator to last element
- typename std::vector<LeafContainerT*>::const_iterator leaf_vector_it_end =
- leaf_container_vector_arg.end();
+ auto leaf_vector_it_end = leaf_container_vector_arg.cend();
// free existing tree before tree rebuild
deleteTree();
// iterator for binary tree structure vector
- std::vector<char>::const_iterator binary_tree_input_it = binary_tree_in_arg.begin();
- std::vector<char>::const_iterator binary_tree_input_it_end = binary_tree_in_arg.end();
+ auto binary_tree_input_it = binary_tree_in_arg.cbegin();
+ auto binary_tree_input_it_end = binary_tree_in_arg.cend();
deserializeTreeRecursive(root_node_,
depth_mask_,
min_pt(2) = static_cast<float>(static_cast<double>(key_arg.z) * voxel_side_len +
this->min_z_);
- max_pt(0) = static_cast<float>(static_cast<double>(key_arg.x + 1) * voxel_side_len +
- this->min_x_);
- max_pt(1) = static_cast<float>(static_cast<double>(key_arg.y + 1) * voxel_side_len +
- this->min_y_);
- max_pt(2) = static_cast<float>(static_cast<double>(key_arg.z + 1) * voxel_side_len +
- this->min_z_);
+ max_pt(0) = min_pt(0) + voxel_side_len;
+ max_pt(1) = min_pt(1) + voxel_side_len;
+ max_pt(2) = min_pt(2) + voxel_side_len;
}
//////////////////////////////////////////////////////////////////////////////////////////////
prioPointQueueEntry point_entry;
std::vector<prioPointQueueEntry> point_candidates;
+ point_candidates.reserve(k);
OctreeKey key;
key.x = key.y = key.z = 0;
// calculate point distance to search point
float squared_dist = pointSquaredDist(candidate_point, point);
- // check if a closer match is found
- if (squared_dist < smallest_squared_dist) {
- prioPointQueueEntry point_entry;
-
- point_entry.point_distance_ = squared_dist;
- point_entry.point_idx_ = point_index;
- point_candidates.push_back(point_entry);
+ const auto insert_into_queue = [&] {
+ point_candidates.emplace(
+ std::upper_bound(point_candidates.begin(),
+ point_candidates.end(),
+ squared_dist,
+ [](float dist, const prioPointQueueEntry& ent) {
+ return dist < ent.point_distance_;
+ }),
+ point_index,
+ squared_dist);
+ };
+ if (point_candidates.size() < K) {
+ insert_into_queue();
+ }
+ else if (point_candidates.back().point_distance_ > squared_dist) {
+ point_candidates.pop_back();
+ insert_into_queue();
}
}
- std::sort(point_candidates.begin(), point_candidates.end());
-
- if (point_candidates.size() > K)
- point_candidates.resize(K);
-
if (point_candidates.size() == K)
smallest_squared_dist = point_candidates.back().point_distance_;
}
std::vector<float>& k_sqr_distances,
uindex_t max_nn) const
{
- // get spatial voxel information
- double voxel_squared_diameter = this->getVoxelSquaredDiameter(tree_depth);
-
// iterate over all children
for (unsigned char child_idx = 0; child_idx < 8; child_idx++) {
if (!this->branchHasChild(*node, child_idx))
child_node = this->getBranchChildPtr(*node, child_idx);
OctreeKey new_key;
- PointT voxel_center;
float squared_dist;
// generate new key for current branch voxel
new_key.y = (key.y << 1) + (!!(child_idx & (1 << 1)));
new_key.z = (key.z << 1) + (!!(child_idx & (1 << 0)));
- // generate voxel center point for voxel at key
- this->genVoxelCenterFromOctreeKey(new_key, tree_depth, voxel_center);
-
- // calculate distance to search point
- squared_dist = pointSquaredDist(static_cast<const PointT&>(voxel_center), point);
-
- // if distance is smaller than search radius
- if (squared_dist + this->epsilon_ <=
- voxel_squared_diameter / 4.0 + radiusSquared +
- sqrt(voxel_squared_diameter * radiusSquared)) {
-
+ // compute min distance between query point and any point in this child node, to
+ // decide whether we can skip it
+ Eigen::Vector3f min_pt, max_pt;
+ this->genVoxelBoundsFromOctreeKey(new_key, tree_depth, min_pt, max_pt);
+ squared_dist = 0.0f;
+ if (point.x < min_pt.x())
+ squared_dist += std::pow(point.x - min_pt.x(), 2);
+ else if (point.x > max_pt.x())
+ squared_dist += std::pow(point.x - max_pt.x(), 2);
+ if (point.y < min_pt.y())
+ squared_dist += std::pow(point.y - min_pt.y(), 2);
+ else if (point.y > max_pt.y())
+ squared_dist += std::pow(point.y - max_pt.y(), 2);
+ if (point.z < min_pt.z())
+ squared_dist += std::pow(point.z - min_pt.z(), 2);
+ else if (point.z > max_pt.z())
+ squared_dist += std::pow(point.z - max_pt.z(), 2);
+ if (squared_dist < (radiusSquared + this->epsilon_)) {
if (child_node->getNodeType() == BRANCH_NODE) {
// we have not reached maximum tree depth
getNeighborsWithinRadiusRecursive(point,
// test if search region overlap with voxel space
- if (!((lower_voxel_corner(0) > max_pt(0)) || (min_pt(0) > upper_voxel_corner(0)) ||
- (lower_voxel_corner(1) > max_pt(1)) || (min_pt(1) > upper_voxel_corner(1)) ||
- (lower_voxel_corner(2) > max_pt(2)) || (min_pt(2) > upper_voxel_corner(2)))) {
+ if ((lower_voxel_corner(0) <= max_pt(0)) && (min_pt(0) <= upper_voxel_corner(0)) &&
+ (lower_voxel_corner(1) <= max_pt(1)) && (min_pt(1) <= upper_voxel_corner(1)) &&
+ (lower_voxel_corner(2) <= max_pt(2)) && (min_pt(2) <= upper_voxel_corner(2))) {
if (child_node->getNodeType() == BRANCH_NODE) {
// we have not reached maximum tree depth
* \param[in] query_index the index in \a cloud representing the query point
* \param[out] result_index the resultant index of the neighbor point
* \param[out] sqr_distance the resultant squared distance to the neighboring point
- * \return number of neighbors found
*/
inline void
approxNearestSearch(const PointCloud& cloud,
* position in the indices vector.
* \param[out] result_index the resultant index of the neighbor point
* \param[out] sqr_distance the resultant squared distance to the neighboring point
- * \return number of neighbors found
*/
void
approxNearestSearch(uindex_t query_index, index_t& result_index, float& sqr_distance);
such as "Neighbors within Voxel Search”, “K Nearest Neighbor Search” and
“Neighbors within Radius Search”. It automatically adjusts its dimension to the point
data set. A set of leaf node classes provide additional functionality, such as
-spacial "occupancy" and "point density per voxel" checks. Functions for serialization
+spatial "occupancy" and "point density per voxel" checks. Functions for serialization
and deserialization enable to efficiently encode the octree structure into a binary format.
Furthermore, a memory pool implementation reduces expensive memory allocation and
deallocation operations in scenarios where octrees needs to be created at high rate.
set(SUBSYS_DESC "Point cloud outofcore library")
set(SUBSYS_DEPS common io filters octree visualization)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
+if(NOT TARGET Boost::filesystem)
+ set(DEFAULT FALSE)
+ set(REASON "Boost filesystem is not available.")
+else()
+ set(DEFAULT TRUE)
+endif()
+
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_ADD_DOC("${SUBSYS_NAME}")
endif()
set(srcs
- src/cJSON.cpp
src/outofcore_node_data.cpp
src/outofcore_base_data.cpp
)
"include/pcl/${SUBSYS_NAME}/outofcore_breadth_first_iterator.h"
"include/pcl/${SUBSYS_NAME}/outofcore_depth_first_iterator.h"
"include/pcl/${SUBSYS_NAME}/boost.h"
- "include/pcl/${SUBSYS_NAME}/cJSON.h"
"include/pcl/${SUBSYS_NAME}/octree_base.h"
"include/pcl/${SUBSYS_NAME}/octree_base_node.h"
"include/pcl/${SUBSYS_NAME}/octree_abstract_node_container.h"
"include/pcl/${SUBSYS_NAME}/visualization/viewport.h"
)
+if(NOT HAVE_CJSON)
+ list(APPEND srcs src/cJSON.cpp)
+ list(APPEND incs "include/pcl/${SUBSYS_NAME}/cJSON.h")
+endif()
+
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs} ${visualization_incs})
#PCL_ADD_SSE_FLAGS("${LIB_NAME}")
target_link_libraries("${LIB_NAME}" pcl_common pcl_visualization ${Boost_SYSTEM_LIBRARY})
+if(HAVE_CJSON)
+ target_link_libraries("${LIB_NAME}" ${CJSON_LIBRARIES})
+endif()
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install include files
+++ /dev/null
-/*
- Copyright (c) 2009 Dave Gamble
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
-*/
-
-#pragma once
-
-#include <pcl/pcl_macros.h>
-
-//make interop with c++ string easier
-#include <string>
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* cJSON Types: */
-#define cJSON_False 0
-#define cJSON_True 1
-#define cJSON_NULL 2
-#define cJSON_Number 3
-#define cJSON_String 4
-#define cJSON_Array 5
-#define cJSON_Object 6
-
-#define cJSON_IsReference 256
-
-/* The cJSON structure: */
-typedef struct cJSON { // NOLINT
- struct cJSON *next,*prev; /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */
- struct cJSON *child; /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */
-
- int type; /* The type of the item, as above. */
-
- char *valuestring; /* The item's string, if type==cJSON_String */
- int valueint; /* The item's number, if type==cJSON_Number */
- double valuedouble; /* The item's number, if type==cJSON_Number */
-
- char *string; /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */
-} cJSON;
-
-typedef struct cJSON_Hooks { // NOLINT
- void *(*malloc_fn)(std::size_t sz);
- void (*free_fn)(void *ptr);
-} cJSON_Hooks;
-
-/* Supply malloc, realloc and free functions to cJSON */
-PCLAPI(void) cJSON_InitHooks(cJSON_Hooks* hooks);
-
-
-/* Supply a block of JSON, and this returns a cJSON object you can interrogate. Call cJSON_Delete when finished. */
-PCLAPI(cJSON *) cJSON_Parse(const char *value);
-/* Render a cJSON entity to text for transfer/storage. Free the char* when finished. */
-PCLAPI(char *) cJSON_Print(cJSON *item);
-/* Render a cJSON entity to text for transfer/storage without any formatting. Free the char* when finished. */
-PCLAPI(char *) cJSON_PrintUnformatted(cJSON *item);
-/* Delete a cJSON entity and all subentities. */
-PCLAPI(void) cJSON_Delete(cJSON *c);
-/* Render a cJSON entity to text for transfer/storage. */
-PCLAPI(void) cJSON_PrintStr(cJSON *item, std::string& s);
-/* Render a cJSON entity to text for transfer/storage without any formatting. */
-PCLAPI(void) cJSON_PrintUnformattedStr(cJSON *item, std::string& s);
-
-/* Returns the number of items in an array (or object). */
-PCLAPI(int) cJSON_GetArraySize(cJSON *array);
-/* Retrieve item number "item" from array "array". Returns NULL if unsuccessful. */
-PCLAPI(cJSON *) cJSON_GetArrayItem(cJSON *array,int item);
-/* Get item "string" from object. Case insensitive. */
-PCLAPI(cJSON *) cJSON_GetObjectItem(cJSON *object,const char *string);
-
-/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
-PCLAPI(const char *) cJSON_GetErrorPtr();
-
-/* These calls create a cJSON item of the appropriate type. */
-PCLAPI(cJSON *) cJSON_CreateNull();
-PCLAPI(cJSON *) cJSON_CreateTrue();
-PCLAPI(cJSON *) cJSON_CreateFalse();
-PCLAPI(cJSON *) cJSON_CreateBool(int b);
-PCLAPI(cJSON *) cJSON_CreateNumber(double num);
-PCLAPI(cJSON *) cJSON_CreateString(const char *string);
-PCLAPI(cJSON *) cJSON_CreateArray();
-PCLAPI(cJSON *) cJSON_CreateObject();
-
-/* These utilities create an Array of count items. */
-PCLAPI(cJSON *) cJSON_CreateIntArray(int *numbers,int count);
-PCLAPI(cJSON *) cJSON_CreateFloatArray(float *numbers,int count);
-PCLAPI(cJSON *) cJSON_CreateDoubleArray(double *numbers,int count);
-PCLAPI(cJSON *) cJSON_CreateStringArray(const char **strings,int count);
-
-/* Append item to the specified array/object. */
-PCLAPI(void) cJSON_AddItemToArray(cJSON *array, cJSON *item);
-PCLAPI(void) cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item);
-/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */
-PCLAPI(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
-PCLAPI(void) cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item);
-
-/* Remove/Detach items from Arrays/Objects. */
-PCLAPI(cJSON *) cJSON_DetachItemFromArray(cJSON *array,int which);
-PCLAPI(void) cJSON_DeleteItemFromArray(cJSON *array,int which);
-PCLAPI(cJSON *) cJSON_DetachItemFromObject(cJSON *object,const char *string);
-PCLAPI(void) cJSON_DeleteItemFromObject(cJSON *object,const char *string);
-
-/* Update array items. */
-PCLAPI(void) cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem);
-PCLAPI(void) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);
-
-#define cJSON_AddNullToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateNull())
-#define cJSON_AddTrueToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateTrue())
-#define cJSON_AddFalseToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateFalse())
-#define cJSON_AddNumberToObject(object,name,n) cJSON_AddItemToObject(object, name, cJSON_CreateNumber(n))
-#define cJSON_AddStringToObject(object,name,s) cJSON_AddItemToObject(object, name, cJSON_CreateString(s))
-
-#ifdef __cplusplus
-}
-#endif
#include <pcl/outofcore/octree_base.h>
// JSON
+#include <pcl/pcl_config.h> // for HAVE_CJSON
+#if defined(HAVE_CJSON)
+#include <cjson/cJSON.h>
+#else
#include <pcl/outofcore/cJSON.h>
+#endif
#include <pcl/filters/random_sample.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/extract_indices.h>
// JSON
+#include <pcl/pcl_config.h> // for HAVE_CJSON
+#if defined(HAVE_CJSON)
+#include <cjson/cJSON.h>
+#else
#include <pcl/outofcore/cJSON.h>
+#endif
namespace pcl
{
// Boost
#include <boost/random/bernoulli_distribution.hpp>
#include <boost/random/uniform_int.hpp>
+#include <boost/random/variate_generator.hpp> // for boost::variate_generator
#include <boost/uuid/uuid_io.hpp>
// PCL
if (!stack_.empty ())
{
std::pair<OutofcoreOctreeBaseNode<ContainerT, PointT>*, unsigned char>& stackEntry = stack_.back ();
- stack_.pop_back ();
this->currentNode_ = stackEntry.first;
currentChildIdx_ = stackEntry.second;
+ stack_.pop_back (); // stackEntry is a reference, so pop_back after accessing it!
//don't do anything with the keys here...
this->currentOctreeDepth_--;
#include <pcl/PCLPointCloud2.h>
#include <shared_mutex>
+#include <list>
namespace pcl
{
* recursively in this state. This class provides an the interface
* for:
* -# Point/Region insertion methods
- * -# Frustrum/box/region queries
+ * -# Frustum/box/region queries
* -# Parameterization of resolution, container type, etc...
*
* For lower-level node access, there is a Depth-First iterator
std::uint64_t
addDataToLeaf_and_genLOD (AlignedPointTVector &p);
- // Frustrum/Box/Region REQUESTS/QUERIES: DB Accessors
+ // Frustum/Box/Region REQUESTS/QUERIES: DB Accessors
// -----------------------------------------------------------------------
void
queryFrustum (const double *planes, std::list<std::string>& file_names) const;
/** \brief Returns a random subsample of points within the given bounding box at \c query_depth.
*
- * \param[in] min The minimum corner of the boudning box to query.
+ * \param[in] min The minimum corner of the bounding box to query.
* \param[out] max The maximum corner of the bounding box to query.
* \param[in] query_depth The depth in the tree at which to look for the points. Only returns points within the given bounding box at the specified \c query_depth.
* \param percent
#include <memory>
#include <mutex>
#include <random>
+#include <list>
#include <pcl/common/io.h>
#include <pcl/PCLPointCloud2.h>
#include <string>
// Boost
+#include <boost/random/mersenne_twister.hpp> // for boost::mt19937
#include <boost/uuid/random_generator.hpp>
#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
+#include <pcl/pcl_config.h> // for HAVE_CJSON
+#if defined(HAVE_CJSON)
+#include <cjson/cJSON.h>
+#else
#include <pcl/outofcore/cJSON.h>
+#endif
#include <pcl/outofcore/metadata.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
+#include <pcl/pcl_config.h> // for HAVE_CJSON
+#if defined(HAVE_CJSON)
+#include <cjson/cJSON.h>
+#else
#include <pcl/outofcore/cJSON.h>
+#endif
#include <pcl/common/eigen.h>
+++ /dev/null
-/*
- Copyright (c) 2009 Dave Gamble
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
-*/
-
-/* cJSON */
-/* JSON parser in C. */
-
-#include <pcl/outofcore/cJSON.h>
-#include <pcl/pcl_macros.h>
-
-#include <algorithm>
-#include <cstdio>
-#include <cmath>
-#include <cstdlib>
-#include <cstring>
-#include <cctype>
-#include <limits>
-
-static const char *ep;
-
-const char *cJSON_GetErrorPtr() {return ep;}
-
-static int cJSON_strcasecmp(const char *s1,const char *s2)
-{
- if (!s1) return (s1==s2)?0:1;
- if (!s2) return 1;
- for(; tolower(*s1) == tolower(*s2); ++s1, ++s2) if(*s1 == 0) return 0;
- return tolower(* reinterpret_cast<const unsigned char *> (s1) ) - tolower(* reinterpret_cast<const unsigned char *> (s2) );
-}
-
-static void *(*cJSON_malloc)(std::size_t sz) = malloc;
-static void (*cJSON_free)(void *ptr) = free;
-
-static char* cJSON_strdup(const char* str)
-{
- std::size_t len;
- char* copy;
-
- len = strlen(str) + 1;
- if (!(copy = static_cast<char*> ( cJSON_malloc(len))))
- {
- return (nullptr);
- }
-
- std::copy(str, str + len, copy);
- return (copy);
-}
-
-void cJSON_InitHooks(cJSON_Hooks* hooks)
-{
- if (!hooks) { /* Reset hooks */
- cJSON_malloc = malloc;
- cJSON_free = free;
- return;
- }
-
- cJSON_malloc = (hooks->malloc_fn)?hooks->malloc_fn:malloc;
- cJSON_free = (hooks->free_fn)?hooks->free_fn:free;
-}
-
-/* Internal constructor. */
-static cJSON *cJSON_New_Item()
-{
- cJSON* node = static_cast<cJSON*> (cJSON_malloc(sizeof(cJSON)));
- if (node) {
- *node = cJSON{};
- }
- return node;
-}
-
-/* Delete a cJSON structure. */
-void cJSON_Delete(cJSON *c)
-{
- cJSON *next;
- while (c)
- {
- next=c->next;
- if (!(c->type&cJSON_IsReference) && c->child) cJSON_Delete(c->child);
- if (!(c->type&cJSON_IsReference) && c->valuestring) cJSON_free(c->valuestring);
- if (c->string) cJSON_free(c->string);
- cJSON_free(c);
- c=next;
- }
-}
-
-/* Parse the input text to generate a number, and populate the result into item. */
-static const char *parse_number(cJSON *item,const char *num)
-{
- double n=0,sign=1,scale=0;int subscale=0,signsubscale=1;
-
- /* Could use sscanf for this? */
- if (*num=='-') sign=-1,num++; /* Has sign? */
- if (*num=='0') num++; /* is zero */
- if (*num>='1' && *num<='9') do n=(n*10.0)+(*num++ -'0'); while (*num>='0' && *num<='9'); /* Number? */
- if (*num=='.') {num++; do n=(n*10.0)+(*num++ -'0'),scale--; while (*num>='0' && *num<='9');} /* Fractional part? */
- if (*num=='e' || *num=='E') /* Exponent? */
- { num++;if (*num=='+') num++; else if (*num=='-') signsubscale=-1,num++; /* With sign? */
- while (*num>='0' && *num<='9') subscale=(subscale*10)+(*num++ - '0'); /* Number? */
- }
-
- n=sign*n*pow(10.0,(scale+subscale*signsubscale)); /* number = +/- number.fraction * 10^+/- exponent */
-
- item->valuedouble=n;
- item->valueint=static_cast<int> (n);
- item->type=cJSON_Number;
- return num;
-}
-
-/* Render the number nicely from the given item into a string. */
-static char *print_number(cJSON *item)
-{
- char *str;
- double d=item->valuedouble;
- if (std::abs((static_cast<double>(item->valueint)-d))<=std::numeric_limits<double>::epsilon() &&
- d <= std::numeric_limits<int>::max() && d >= std::numeric_limits<int>::min())
- {
- str=static_cast<char*>(cJSON_malloc(21)); /* 2^64+1 can be represented in 21 chars. */
- if (str) sprintf(str,"%d",item->valueint);
- }
- else
- {
- str=static_cast<char*>(cJSON_malloc(64)); /* This is a nice tradeoff. */
- if (str)
- {
- if (std::abs(std::floor(d)-d)<=std::numeric_limits<double>::epsilon()) sprintf(str,"%.0f",d);
- else sprintf(str,"%.16g",d);
- }
- }
- return str;
-}
-
-/* Parse the input text into an unescaped cstring, and populate item. */
-static const unsigned char firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
-static const char *parse_string(cJSON *item,const char *str)
-{
- const char *ptr=str+1; char *ptr2; char *out; int len=0; unsigned uc;
- if (*str!='\"') {ep=str;return nullptr;} /* not a string! */
-
- while (*ptr!='\"' && *ptr && ++len) if (*ptr++ == '\\') ptr++; /* Skip escaped quotes. */
-
- out=static_cast<char*> (cJSON_malloc(len+1)); /* This is how long we need for the string, roughly. */
- if (!out) return nullptr;
-
- ptr=str+1;ptr2=out;
- while (*ptr!='\"' && *ptr)
- {
- if (*ptr!='\\') *ptr2++=*ptr++;
- else
- {
- ptr++;
- switch (*ptr)
- {
- case 'b': *ptr2++='\b'; break;
- case 'f': *ptr2++='\f'; break;
- case 'n': *ptr2++='\n'; break;
- case 'r': *ptr2++='\r'; break;
- case 't': *ptr2++='\t'; break;
- case 'u': /* transcode utf16 to utf8. DOES NOT SUPPORT SURROGATE PAIRS CORRECTLY. */
- sscanf(ptr+1,"%4x",&uc); /* get the unicode char. */
- len=3;if (uc<0x80) len=1;else if (uc<0x800) len=2;ptr2+=len;
-
- switch (len) {
- case 3: *--ptr2 = static_cast<char>(( (uc) | 0x80) & 0xBF );
- uc >>= 6;
- PCL_FALLTHROUGH
- case 2: *--ptr2 = static_cast<char>(( (uc) | 0x80) & 0xBF );
- uc >>= 6;
- PCL_FALLTHROUGH
- case 1: *--ptr2 = static_cast<char>( (uc) | firstByteMark[len] );
- }
- ptr2+=len;ptr+=4;
- break;
- default: *ptr2++=*ptr; break;
- }
- ptr++;
- }
- }
- *ptr2=0;
- if (*ptr=='\"') ptr++;
- item->valuestring=out;
- item->type=cJSON_String;
- return ptr;
-}
-
-/* Render the cstring provided to an escaped version that can be printed. */
-static char *print_string_ptr(const char *str)
-{
- const char *ptr;char *ptr2,*out;int len=0;unsigned char token;
-
- if (!str) return cJSON_strdup("");
- ptr=str;while ((token=*ptr) && ++len) {if (strchr("\"\\\b\f\n\r\t",token)) len++; else if (token<32) len+=5;ptr++;}
-
- out=static_cast<char*>(cJSON_malloc(len+3));
- if (!out) return nullptr;
-
- ptr2=out;ptr=str;
- *ptr2++='\"';
- while (*ptr)
- {
- if (static_cast<unsigned char>(*ptr)>31 && *ptr!='\"' && *ptr!='\\') *ptr2++=*ptr++;
- else
- {
- *ptr2++='\\';
- switch (token=*ptr++)
- {
- case '\\': *ptr2++='\\'; break;
- case '\"': *ptr2++='\"'; break;
- case '\b': *ptr2++='b'; break;
- case '\f': *ptr2++='f'; break;
- case '\n': *ptr2++='n'; break;
- case '\r': *ptr2++='r'; break;
- case '\t': *ptr2++='t'; break;
- default: sprintf(ptr2,"u%04x",token);ptr2+=5; break; /* escape and print */
- }
- }
- }
- *ptr2++='\"';*ptr2++=0;
- return out;
-}
-/* Invote print_string_ptr (which is useful) on an item. */
-static char *print_string(cJSON *item) {return print_string_ptr(item->valuestring);}
-
-/* Predeclare these prototypes. */
-static const char *parse_value(cJSON *item,const char *value);
-static char *print_value(cJSON *item,int depth,int fmt);
-static const char *parse_array(cJSON *item,const char *value);
-static char *print_array(cJSON *item,int depth,int fmt);
-static const char *parse_object(cJSON *item,const char *value);
-static char *print_object(cJSON *item,int depth,int fmt);
-
-/* Utility to jump whitespace and cr/lf */
-static const char *skip(const char *in) {while (in && *in && static_cast<unsigned char>(*in)<=32) in++; return in;}
-
-/* Parse an object - create a new root, and populate. */
-cJSON *cJSON_Parse(const char *value)
-{
- cJSON *c=cJSON_New_Item();
- ep=nullptr;
- if (!c) return nullptr; /* memory fail */
-
- if (!parse_value(c,skip(value))) {cJSON_Delete(c);return nullptr;}
- return c;
-}
-
-/* Render a cJSON item/entity/structure to text. */
-char *cJSON_Print(cJSON *item) {return print_value(item,0,1);}
-char *cJSON_PrintUnformatted(cJSON *item) {return print_value(item,0,0);}
-
-void cJSON_PrintStr(cJSON *item, std::string& s)
-{
- char* c = cJSON_Print(item);
- s.assign(c);
- free(c);
-}
-
-void cJSON_PrintUnformattedStr(cJSON *item, std::string& s)
-{
- char* c = cJSON_PrintUnformatted(item);
- s.assign(c);
- free(c);
-}
-
-/* Parser core - when encountering text, process appropriately. */
-static const char *parse_value(cJSON *item,const char *value)
-{
- if (!value) return nullptr; /* Fail on null. */
- if (!strncmp(value,"null",4)) { item->type=cJSON_NULL; return value+4; }
- if (!strncmp(value,"false",5)) { item->type=cJSON_False; return value+5; }
- if (!strncmp(value,"true",4)) { item->type=cJSON_True; item->valueint=1; return value+4; }
- if (*value=='\"') { return parse_string(item,value); }
- if (*value=='-' || (*value>='0' && *value<='9')) { return parse_number(item,value); }
- if (*value=='[') { return parse_array(item,value); }
- if (*value=='{') { return parse_object(item,value); }
-
- ep=value;return nullptr; /* failure. */
-}
-
-/* Render a value to text. */
-static char *print_value(cJSON *item,int depth,int fmt)
-{
- char *out=nullptr;
- if (!item) return nullptr;
- switch ((item->type)&255)
- {
- case cJSON_NULL: out=cJSON_strdup("null"); break;
- case cJSON_False: out=cJSON_strdup("false");break;
- case cJSON_True: out=cJSON_strdup("true"); break;
- case cJSON_Number: out=print_number(item);break;
- case cJSON_String: out=print_string(item);break;
- case cJSON_Array: out=print_array(item,depth,fmt);break;
- case cJSON_Object: out=print_object(item,depth,fmt);break;
- }
- return out;
-}
-
-/* Build an array from input text. */
-static const char *parse_array(cJSON *item,const char *value)
-{
- cJSON *child;
- if (*value!='[') {ep=value;return nullptr;} /* not an array! */
-
- item->type=cJSON_Array;
- value=skip(value+1);
- if (*value==']') return value+1; /* empty array. */
-
- item->child=child=cJSON_New_Item();
- if (!item->child) return nullptr; /* memory fail */
- value=skip(parse_value(child,skip(value))); /* skip any spacing, get the value. */
- if (!value) return nullptr;
-
- while (*value==',')
- {
- cJSON *new_item;
- if (!(new_item=cJSON_New_Item())) return nullptr; /* memory fail */
- child->next=new_item;new_item->prev=child;child=new_item;
- value=skip(parse_value(child,skip(value+1)));
- if (!value) return nullptr; /* memory fail */
- }
-
- if (*value==']') return value+1; /* end of array */
- ep=value;return nullptr; /* malformed. */
-}
-
-/* Render an array to text */
-static char *print_array(cJSON *item,int depth,int fmt)
-{
- char **entries;
- char *out=nullptr,*ptr,*ret;std::size_t len=5;
- cJSON *child=item->child;
- int numentries=0,i=0,fail=0;
-
- /* How many entries in the array? */
- while (child) numentries++,child=child->next;
- /* Allocate an array to hold the values for each */
- entries=static_cast<char**>(cJSON_malloc(numentries*sizeof(char*)));
- if (!entries) return nullptr;
- std::fill_n(entries, numentries, nullptr);
- /* Retrieve all the results: */
- child=item->child;
- while (child && !fail)
- {
- ret=print_value(child,depth+1,fmt);
- entries[i++]=ret;
- if (ret) len+=strlen(ret)+2+(fmt?1:0); else fail=1;
- child=child->next;
- }
-
- /* If we didn't fail, try to malloc the output string */
- if (!fail) out=static_cast<char*>(cJSON_malloc(len));
- /* If that fails, we fail. */
- if (!out) fail=1;
-
- /* Handle failure. */
- if (fail)
- {
- for (i=0;i<numentries;i++) if (entries[i]) cJSON_free(entries[i]);
- cJSON_free(entries);
- return nullptr;
- }
-
- /* Compose the output array. */
- *out='[';
- ptr=out+1;*ptr=0;
- for (i=0;i<numentries;i++)
- {
- strcpy(ptr,entries[i]);ptr+=strlen(entries[i]);
- if (i!=numentries-1) {*ptr++=',';if(fmt)*ptr++=' ';*ptr=0;}
- cJSON_free(entries[i]);
- }
- cJSON_free(entries);
- *ptr++=']';*ptr++=0;
- return out;
-}
-
-/* Build an object from the text. */
-static const char *parse_object(cJSON *item,const char *value)
-{
- cJSON *child;
- if (*value!='{') {ep=value;return nullptr;} /* not an object! */
-
- item->type=cJSON_Object;
- value=skip(value+1);
- if (*value=='}') return value+1; /* empty array. */
-
- item->child=child=cJSON_New_Item();
- if (!item->child) return nullptr;
- value=skip(parse_string(child,skip(value)));
- if (!value) return nullptr;
- child->string=child->valuestring;child->valuestring=nullptr;
- if (*value!=':') {ep=value;return nullptr;} /* fail! */
- value=skip(parse_value(child,skip(value+1))); /* skip any spacing, get the value. */
- if (!value) return nullptr;
-
- while (*value==',')
- {
- cJSON *new_item;
- if (!(new_item=cJSON_New_Item())) return nullptr; /* memory fail */
- child->next=new_item;new_item->prev=child;child=new_item;
- value=skip(parse_string(child,skip(value+1)));
- if (!value) return nullptr;
- child->string=child->valuestring;child->valuestring=nullptr;
- if (*value!=':') {ep=value;return nullptr;} /* fail! */
- value=skip(parse_value(child,skip(value+1))); /* skip any spacing, get the value. */
- if (!value) return nullptr;
- }
-
- if (*value=='}') return value+1; /* end of array */
- ep=value;return nullptr; /* malformed. */
-}
-
-/* Render an object to text. */
-static char *print_object(cJSON *item,int depth,int fmt)
-{
- char *out=nullptr;
- std::size_t len=7;
- cJSON *child=item->child;
- std::size_t numentries=0,fail=0;
- /* Count the number of entries. */
- while (child) numentries++,child=child->next;
- /* Allocate space for the names and the objects */
- char **entries = static_cast<char**>(cJSON_malloc(numentries*sizeof(char*)));
- if (!entries) return nullptr;
- char **names=static_cast<char**>(cJSON_malloc(numentries*sizeof(char*)));
- if (!names) {cJSON_free(entries);return nullptr;}
- std::fill_n(entries, numentries, nullptr);
- std::fill_n(names, numentries, nullptr);
-
- /* Collect all the results into our arrays: */
- child=item->child;depth++;if (fmt) len+=depth;
- int childId = 0;
- while (child)
- {
- char *str = print_string_ptr(child->string);
- names[childId] = str;
- char *ret = print_value(child,depth,fmt);
- entries[childId++] = ret;
- if (str && ret) len+=strlen(ret)+strlen(str)+2+(fmt?2+depth:0); else fail=1;
- child=child->next;
- }
-
- /* Try to allocate the output string */
- if (!fail) out=static_cast<char*> (cJSON_malloc(len));
- if (!out) fail=1;
-
- /* Handle failure */
- if (fail)
- {
- for (std::size_t i=0;i<numentries;i++) {if (names[i]) cJSON_free(names[i]);if (entries[i]) cJSON_free(entries[i]);}
- cJSON_free(names);cJSON_free(entries);
- return nullptr;
- }
-
- /* Compose the output: */
- *out='{';
- char *ptr = out+1;
- if (fmt)*ptr++='\n';
- *ptr=0;
- for (std::size_t i=0;i<numentries;i++)
- {
- if (fmt) for (int j=0; j < depth; j++) *ptr++='\t';
- strcpy(ptr,names[i]);ptr+=strlen(names[i]);
- *ptr++=':';if (fmt) *ptr++='\t';
- strcpy(ptr,entries[i]);ptr+=strlen(entries[i]);
- if (i!=numentries-1) *ptr++=',';
- if (fmt) *ptr++='\n';
- *ptr=0;
- cJSON_free(names[i]);cJSON_free(entries[i]);
- }
-
- cJSON_free(names);cJSON_free(entries);
- if (fmt) for (int i=0; i < depth-1; i++) *ptr++='\t';
- *ptr++='}';*ptr++=0;
- return out;
-}
-
-/* Get Array size/item / object item. */
-int cJSON_GetArraySize(cJSON *array) {cJSON *c=array->child;int i=0;while(c)i++,c=c->next;return i;}
-cJSON *cJSON_GetArrayItem(cJSON *array,int item) {cJSON *c=array->child; while (c && item>0) item--,c=c->next; return c;}
-cJSON *cJSON_GetObjectItem(cJSON *object,const char *string) {cJSON *c=object->child; while (c && cJSON_strcasecmp(c->string,string)) c=c->next; return c;}
-
-/* Utility for array list handling. */
-static void suffix_object(cJSON *prev,cJSON *item) {prev->next=item;item->prev=prev;}
-/* Utility for handling references. */
-static cJSON*
-create_reference(cJSON* item)
-{
- cJSON* ref = cJSON_New_Item();
- if (!ref) {
- return nullptr;
- }
- *ref = *item;
- ref->string = nullptr;
- ref->type |= cJSON_IsReference;
- ref->next = ref->prev = nullptr;
- return ref;
-}
-
-/* Add item to array/object. */
-void cJSON_AddItemToArray(cJSON *array, cJSON *item) {cJSON *c=array->child;if (!item) return; if (!c) {array->child=item;} else {while (c && c->next) c=c->next; suffix_object(c,item);}}
-void cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item) {if (!item) return; if (item->string) cJSON_free(item->string);item->string=cJSON_strdup(string);cJSON_AddItemToArray(object,item);}
-void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) {cJSON_AddItemToArray(array,create_reference(item));}
-void cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item) {cJSON_AddItemToObject(object,string,create_reference(item));}
-
-cJSON *cJSON_DetachItemFromArray(cJSON *array,int which) {
- cJSON *c=array->child;
- while (c && which>0) c=c->next,which--;
- if (!c) return nullptr;
- if (c->prev) c->prev->next=c->next;
- if (c->next) c->next->prev=c->prev;
- if (c==array->child) array->child=c->next;
- c->prev=c->next=nullptr;
- return c;
-}
-void cJSON_DeleteItemFromArray(cJSON *array,int which) {cJSON_Delete(cJSON_DetachItemFromArray(array,which));}
-cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string) {int i=0;cJSON *c=object->child;while (c && cJSON_strcasecmp(c->string,string)) i++,c=c->next;if (c) return cJSON_DetachItemFromArray(object,i);return nullptr;}
-void cJSON_DeleteItemFromObject(cJSON *object,const char *string) {cJSON_Delete(cJSON_DetachItemFromObject(object,string));}
-
-/* Replace array/object items with new ones. */
-void cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem) {cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return;
- newitem->next=c->next;newitem->prev=c->prev;if (newitem->next) newitem->next->prev=newitem;
- if (c==array->child) array->child=newitem; else newitem->prev->next=newitem;c->next=c->prev=nullptr;cJSON_Delete(c);}
-void cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem){int i=0;cJSON *c=object->child;while(c && cJSON_strcasecmp(c->string,string))i++,c=c->next;if(c){newitem->string=cJSON_strdup(string);cJSON_ReplaceItemInArray(object,i,newitem);}}
-
-/* Create basic types: */
-cJSON *cJSON_CreateNull() {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_NULL;return item;}
-cJSON *cJSON_CreateTrue() {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_True;return item;}
-cJSON *cJSON_CreateFalse() {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_False;return item;}
-cJSON *cJSON_CreateBool(int b) {cJSON *item=cJSON_New_Item();if(item)item->type=b?cJSON_True:cJSON_False;return item;}
-cJSON *cJSON_CreateNumber(double num) {cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_Number;item->valuedouble=num;item->valueint=static_cast<int>(num);}return item;}
-cJSON *cJSON_CreateString(const char *string) {cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_String;item->valuestring=cJSON_strdup(string);}return item;}
-cJSON *cJSON_CreateArray() {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Array;return item;}
-cJSON *cJSON_CreateObject() {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Object;return item;}
-
-/* Create Arrays: */
-cJSON *cJSON_CreateIntArray(int *numbers,int count) {cJSON *p=nullptr,*a=cJSON_CreateArray();for(int i=0;a && i<count;i++){cJSON *n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
-cJSON *cJSON_CreateFloatArray(float *numbers,int count) {cJSON *p=nullptr,*a=cJSON_CreateArray();for(int i=0;a && i<count;i++){cJSON *n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
-cJSON *cJSON_CreateDoubleArray(double *numbers,int count) {cJSON *p=nullptr,*a=cJSON_CreateArray();for(int i=0;a && i<count;i++){cJSON *n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
-cJSON *cJSON_CreateStringArray(const char **strings,int count) {cJSON *p=nullptr,*a=cJSON_CreateArray();for(int i=0;a && i<count;i++){cJSON *n=cJSON_CreateString(strings[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
else()
set(DEFAULT TRUE)
set(REASON)
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
set(srcs outofcore_viewer.cpp
../src/visualization/camera.cpp
// Ensure the compiler is meeting the minimum C++ standard
// MSVC is not checked via __cplusplus due to
// https://developercommunity.visualstudio.com/content/problem/120156/-cplusplus-macro-still-defined-as-pre-c11-value.html
-#if (!defined(_MSC_VER) && __cplusplus < 201402L) || (defined(_MSC_VER) && _MSC_VER < 1900)
- #error PCL requires C++14 or above
+#if defined(__cplusplus) && ((!defined(_MSC_VER) && __cplusplus < ${PCL__cplusplus}) || (defined(_MSC_VER) && _MSC_VER < ${PCL_REQUIRES_MSC_VER}) || (defined(_MSVC_LANG) && _MSVC_LANG < ${PCL__cplusplus}))
+ #error C++ standard too low (PCL requires ${PCL__cplusplus} or above)
#endif
#define BUILD_@CMAKE_BUILD_TYPE@
+#cmakedefine PCL_SYMBOL_VISIBILITY_HIDDEN
/* PCL version information */
#define PCL_MAJOR_VERSION ${PCL_VERSION_MAJOR}
#define PCL_MINOR_VERSION ${PCL_VERSION_MINOR}
#define PCL_REVISION_VERSION ${PCL_VERSION_PATCH}
#define PCL_DEV_VERSION ${PCL_DEV_VERSION}
#define PCL_VERSION_PRETTY "${PCL_VERSION_PRETTY}"
-#define PCL_VERSION_CALC(MAJ, MIN, PATCH) (MAJ*100000+MIN*100+PATCH)
+#define PCL_VERSION_CALC(MAJ, MIN, PATCH) ((MAJ)*100000+(MIN)*100+(PATCH))
#define PCL_VERSION \
PCL_VERSION_CALC(PCL_MAJOR_VERSION, PCL_MINOR_VERSION, PCL_REVISION_VERSION)
#define PCL_VERSION_COMPARE(OP, MAJ, MIN, PATCH) \
#endif //PCL_MINOR_VERSION
#endif
+#define PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC ${PCL_USES_EIGEN_HANDMADE_ALIGNED_MALLOC}
+
#cmakedefine HAVE_OPENNI 1
#cmakedefine HAVE_OPENNI2 1
#cmakedefine HAVE_ZLIB
+#cmakedefine HAVE_CJSON
+
+#cmakedefine PCL_PREFER_BOOST_FILESYSTEM
+
/* Precompile for a minimal set of point types instead of all. */
#cmakedefine PCL_ONLY_CORE_POINT_TYPES
+#define PCL_XYZ_POINT_TYPES @PCL_XYZ_POINT_TYPES@
+
+#define PCL_NORMAL_POINT_TYPES @PCL_NORMAL_POINT_TYPES@
+
/* Do not precompile for any point types at all. */
#cmakedefine PCL_NO_PRECOMPILE
set(SUBSYS_DESC "Point cloud people library")
set(SUBSYS_DEPS common kdtree search sample_consensus filters io visualization geometry segmentation octree)
-if(NOT VTK_FOUND)
- set(DEFAULT FALSE)
- set(REASON "VTK was not found.")
-else()
- set(DEFAULT TRUE)
- set(REASON)
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-endif()
-
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries(${LIB_NAME} pcl_common pcl_filters pcl_kdtree pcl_sample_consensus pcl_segmentation pcl_visualization)
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl" ${impl_incs})
-#SET_TARGET_PROPERTIES("${LIB_NAME}" PROPERTIES LINKER_LANGUAGE CXX)
-
if(WITH_OPENNI)
PCL_ADD_EXECUTABLE(pcl_ground_based_rgbd_people_detector COMPONENT ${SUBSYS_NAME} SOURCES apps/main_ground_based_people_detection.cpp BUNDLE)
target_link_libraries(pcl_ground_based_rgbd_people_detector pcl_common pcl_kdtree pcl_search pcl_sample_consensus pcl_filters pcl_io pcl_visualization pcl_segmentation pcl_people)
{
float min_distance_between_cluster_centers = 0.4; // meters
float normalize_factor = std::pow(sqrt_ground_coeffs_, 2); // sqrt_ground_coeffs ^ 2 (precomputed for speed)
- Eigen::Vector3f head_ground_coeffs = ground_coeffs_.head(3); // ground plane normal (precomputed for speed)
+ Eigen::Vector3f head_ground_coeffs = ground_coeffs_.head<3>(); // ground plane normal (precomputed for speed)
std::vector <std::vector<int> > connected_clusters;
connected_clusters.resize(input_clusters.size());
std::vector<bool> used_clusters; // 0 in correspondence of clusters remained to process, 1 for already used clusters
{
// create new clusters from the current cluster and put corresponding indices into sub_clusters_indices:
float normalize_factor = std::pow(sqrt_ground_coeffs_, 2); // sqrt_ground_coeffs ^ 2 (precomputed for speed)
- Eigen::Vector3f head_ground_coeffs = ground_coeffs_.head(3); // ground plane normal (precomputed for speed)
+ Eigen::Vector3f head_ground_coeffs = ground_coeffs_.head<3>(); // ground plane normal (precomputed for speed)
Eigen::Matrix3Xf maxima_projected(3,maxima_number); // matrix containing the projection of maxima onto the ground plane
Eigen::VectorXi subclusters_number_of_points(maxima_number); // subclusters number of points
std::vector <pcl::Indices> sub_clusters_indices; // vector of vectors with the cluster indices for every maximum
PointT* p_current = &(*cloud_)[maxima_cloud_indices_[i]]; // pointcloud point referring to the current maximum
Eigen::Vector3f p_current_eigen(p_current->x, p_current->y, p_current->z); // conversion to eigen
- float t = p_current_eigen.dot(ground_coeffs_.head(3)) / std::pow(sqrt_ground_coeffs_, 2); // height from the ground
- p_current_eigen -= ground_coeffs_.head(3) * t; // projection of the point on the groundplane
+ float t = p_current_eigen.dot(ground_coeffs_.head<3>()) / std::pow(sqrt_ground_coeffs_, 2); // height from the ground
+ p_current_eigen -= ground_coeffs_.head<3>() * t; // projection of the point on the groundplane
int j = i-1;
while ((j >= 0) && (good_maximum))
{
PointT* p_previous = &(*cloud_)[maxima_cloud_indices_[j]]; // pointcloud point referring to an already validated maximum
Eigen::Vector3f p_previous_eigen(p_previous->x, p_previous->y, p_previous->z); // conversion to eigen
- float t = p_previous_eigen.dot(ground_coeffs_.head(3)) / std::pow(sqrt_ground_coeffs_, 2); // height from the ground
- p_previous_eigen -= ground_coeffs_.head(3) * t; // projection of the point on the groundplane
+ float t = p_previous_eigen.dot(ground_coeffs_.head<3>()) / std::pow(sqrt_ground_coeffs_, 2); // height from the ground
+ p_previous_eigen -= ground_coeffs_.head<3>() * t; // projection of the point on the groundplane
// distance of the projection of the points on the groundplane:
float distance = (p_current_eigen-p_previous_eigen).norm();
set(SUBSYS_DESC "Point cloud recognition library")
set(SUBSYS_DEPS common io search kdtree octree features filters registration sample_consensus ml)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
+set(DEFAULT ON)
+
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
PCL_ADD_DOC("${SUBSYS_NAME}")
)
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/color_gradient_dot_modality.h"
"include/pcl/${SUBSYS_NAME}/color_gradient_modality.h"
"include/pcl/${SUBSYS_NAME}/color_modality.h"
"include/pcl/${SUBSYS_NAME}/dense_quantized_multi_mod_template.h"
"include/pcl/${SUBSYS_NAME}/sparse_quantized_multi_mod_template.h"
"include/pcl/${SUBSYS_NAME}/surface_normal_modality.h"
- "include/pcl/${SUBSYS_NAME}/line_rgbd.h"
"include/pcl/${SUBSYS_NAME}/implicit_shape_model.h"
- "include/pcl/${SUBSYS_NAME}/auxiliary.h"
- "include/pcl/${SUBSYS_NAME}/hypothesis.h"
- "include/pcl/${SUBSYS_NAME}/model_library.h"
- "include/pcl/${SUBSYS_NAME}/rigid_transform_space.h"
- "include/pcl/${SUBSYS_NAME}/obj_rec_ransac.h"
- "include/pcl/${SUBSYS_NAME}/orr_graph.h"
- "include/pcl/${SUBSYS_NAME}/orr_octree_zprojection.h"
- "include/pcl/${SUBSYS_NAME}/trimmed_icp.h"
- "include/pcl/${SUBSYS_NAME}/orr_octree.h"
- "include/pcl/${SUBSYS_NAME}/simple_octree.h"
- "include/pcl/${SUBSYS_NAME}/voxel_structure.h"
- "include/pcl/${SUBSYS_NAME}/bvh.h"
)
set(ransac_based_incs
)
set(impl_incs
- "include/pcl/${SUBSYS_NAME}/impl/line_rgbd.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/simple_octree.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/voxel_structure.hpp"
"include/pcl/${SUBSYS_NAME}/impl/implicit_shape_model.hpp"
)
src/implicit_shape_model.cpp
)
-if(HAVE_METSLIB)
- set(metslib_incs "")
-else()
- set(metslib_incs
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/abstract-search.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/local-search.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/mets.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/metslib_config.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/model.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/observer.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/simulated-annealing.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/tabu-search.hh"
- "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/termination-criteria.hh"
- )
-endif()
+set(metslib_incs
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/abstract-search.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/local-search.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/mets.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/metslib_config.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/model.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/observer.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/simulated-annealing.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/tabu-search.hh"
+ "include/pcl/${SUBSYS_NAME}/3rdparty/metslib/termination-criteria.hh"
+)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs} ${face_detection_incs} ${ransac_based_incs} ${ransac_based_impl_incs} ${hv_incs} ${hv_impl_incs} ${cg_incs} ${cg_impl_incs} ${metslib_incs})
target_link_libraries("${LIB_NAME}" pcl_common pcl_kdtree pcl_octree pcl_search pcl_features pcl_registration pcl_sample_consensus pcl_filters pcl_ml pcl_io)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl/cg" ${cg_impl_incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/linemod" ${LINEMOD_INCLUDES})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl/linemod" ${LINEMOD_IMPLS})
-
-if(NOT HAVE_METSLIB)
- PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/3rdparty/metslib" ${metslib_incs})
-endif()
+PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/3rdparty/metslib" ${metslib_incs})
+++ /dev/null
-#include <pcl/recognition/ransac_based/auxiliary.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/auxiliary.h> instead")
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-
-#include <boost/graph/graph_traits.hpp>
+++ /dev/null
-#include <pcl/recognition/ransac_based/bvh.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/bvh.h> instead")
convolution.setInputCloud (rgb_input_);
convolution.setKernel (gaussian_kernel);
-
+ convolution.setBordersPolicy(pcl::filters::Convolution<pcl::RGB, pcl::RGB>::BORDERS_POLICY_DUPLICATE);
convolution.convolve (*smoothed_input_);
// extract color gradients
/**
* \brief Template matching using the DOTMOD approach.
* \author Stefan Holzer, Stefan Hinterstoisser
+ * \ingroup recognition
*/
class PCL_EXPORTS DOTMOD
{
#pragma once
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/memory.h>
#include <pcl/ml/dt/decision_tree_data_provider.h>
#include <pcl/recognition/face_detection/face_common.h>
#include <boost/algorithm/string.hpp>
-#include <boost/filesystem/operations.hpp>
#include <fstream>
#include <string>
-namespace bf = boost::filesystem;
-
namespace pcl
{
namespace face_detection
int patches_per_image_;
int min_images_per_bin_;
- void getFilesInDirectory(bf::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths, std::string & ext)
+ void getFilesInDirectory(pcl_fs::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths, std::string & ext)
{
- for (const auto& dir_entry : bf::directory_iterator(dir))
+ for (const auto& dir_entry : pcl_fs::directory_iterator(dir))
{
//check if its a directory, then get models in it
- if (bf::is_directory (dir_entry))
+ if (pcl_fs::is_directory (dir_entry))
{
std::string so_far = rel_path_so_far + (dir_entry.path ().filename ()).string () + "/";
- bf::path curr_path = dir_entry.path ();
+ pcl_fs::path curr_path = dir_entry.path ();
getFilesInDirectory (curr_path, so_far, relative_paths, ext);
} else
{
/**
* \brief A greedy hypothesis verification method
* \author Aitor Aldoma
+ * \ingroup recognition
*/
template<typename ModelT, typename SceneT>
+++ /dev/null
-#include <pcl/recognition/ransac_based/hypothesis.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/hypothesis.h> instead")
PCL_ERROR("[pcl::extractEuclideanClusters] Number of points in the input point cloud different than normals!\n");
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
continue;
}
- for (std::size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
Eigen::Vector3f scene_p_normal = (*scene_normals_)[it->first].getNormalVector3fMap ();
Eigen::Vector3f model_p_normal =
(*recog_model->normals_)[it->second->at(closest).first].getNormalVector3fMap();
- float dotp = scene_p_normal.dot (model_p_normal) * 1.f; //[-1,1] from antiparallel trough perpendicular to parallel
+ float dotp = scene_p_normal.dot (model_p_normal) * 1.f; //[-1,1] from antiparallel through perpendicular to parallel
if (dotp < 0.f)
dotp = 0.f;
//using normals to weight clutter points
Eigen::Vector3f scene_p_normal = (*scene_normals_)[neighborhood_index.first].getNormalVector3fMap ();
Eigen::Vector3f model_p_normal = (*scene_normals_)[recog_model->explained_[neighborhood_index.second]].getNormalVector3fMap ();
- float dotp = scene_p_normal.dot (model_p_normal); //[-1,1] from antiparallel trough perpendicular to parallel
+ float dotp = scene_p_normal.dot (model_p_normal); //[-1,1] from antiparallel through perpendicular to parallel
if (dotp < 0)
dotp = 0.f;
* POSSIBILITY OF SUCH DAMAGE.
*
*
- * Implementation of the ISM algorithm described in "Hough Transforms and 3D SURF for robust three dimensional classication"
+ * Implementation of the ISM algorithm described in "Hough Transforms and 3D SURF for robust three dimensional classification"
* by Jan Knopp, Mukta Prasad, Geert Willems, Radu Timofte, and Luc Van Gool
*
* Authors: Roman Shapovalov, Alexander Velizhev, Sergey Ushakov
+++ /dev/null
-#include <pcl/recognition/impl/linemod/line_rgbd.hpp>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/impl/linemod/line_rgbd.hpp> instead")
+++ /dev/null
-#include <pcl/recognition/impl/ransac_based/simple_octree.hpp>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/impl/ransac_based/simple_octree.hpp> instead")
+++ /dev/null
-#include <pcl/recognition/impl/ransac_based/voxel_structure.hpp>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/impl/ransac_based/voxel_structure.hpp> instead")
namespace ism
{
/** \brief This class implements Implicit Shape Model algorithm described in
- * "Hough Transforms and 3D SURF for robust three dimensional classication"
+ * "Hough Transforms and 3D SURF for robust three dimensional classification"
* by Jan Knopp, Mukta Prasad, Geert Willems, Radu Timofte, and Luc Van Gool.
* It has two main member functions. One for training, using the data for which we know
* which class it belongs to. And second for investigating a cloud for the presence
* of the class of interest.
- * Implementation of the ISM algorithm described in "Hough Transforms and 3D SURF for robust three dimensional classication"
+ * Implementation of the ISM algorithm described in "Hough Transforms and 3D SURF for robust three dimensional classification"
* by Jan Knopp, Mukta Prasad, Geert Willems, Radu Timofte, and Luc Van Gool
*
* Authors: Roman Shapovalov, Alexander Velizhev, Sergey Ushakov
+++ /dev/null
-#include <pcl/recognition/linemod/line_rgbd.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/linemod/line_rgbd.h> instead")
/** \brief High-level class for template matching using the LINEMOD approach based on RGB and Depth data.
* \author Stefan Holzer
+ * \ingroup recognition
*/
template <typename PointXYZT, typename PointRGBT=PointXYZT>
class PCL_EXPORTS LineRGBD
+++ /dev/null
-#include <pcl/recognition/ransac_based/model_library.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/model_library.h> instead")
+++ /dev/null
-#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/obj_rec_ransac.h> instead")
+++ /dev/null
-#include <pcl/recognition/ransac_based/orr_graph.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/orr_graph.h> instead")
+++ /dev/null
-#include <pcl/recognition/ransac_based/orr_octree.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/orr_octree.h> instead")
+++ /dev/null
-#include <pcl/recognition/ransac_based/orr_octree_zprojection.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/orr_octree_zprojection.h> instead")
inline bool
intersect(const float box[6]) const
{
- return !(box[1] < bounds_[0] || box[3] < bounds_[2] || box[5] < bounds_[4] ||
- box[0] > bounds_[1] || box[2] > bounds_[3] || box[4] > bounds_[5]);
+ return (box[1] >= bounds_[0] && box[3] >= bounds_[2] && box[5] >= bounds_[4] &&
+ box[0] <= bounds_[1] && box[2] <= bounds_[3] && box[4] <= bounds_[5]);
}
/** \brief Computes and returns the volume of the bounding box of this node. */
+++ /dev/null
-#include <pcl/recognition/ransac_based/rigid_transform_space.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/rigid_transform_space.h> instead")
+++ /dev/null
-#include <pcl/recognition/ransac_based/simple_octree.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/simple_octree.h> instead")
+++ /dev/null
-#include <pcl/recognition/ransac_based/trimmed_icp.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/trimmed_icp.h> instead")
+++ /dev/null
-#include <pcl/recognition/ransac_based/voxel_structure.h>
-PCL_DEPRECATED_HEADER(1, 15, "Please use <pcl/recognition/ransac_based/voxel_structure.h> instead")
{
std::string start;
std::string ext = std::string ("pcd");
- bf::path dir = data_dir;
+ pcl_fs::path dir = data_dir;
std::vector < std::string > files;
getFilesInDirectory (dir, start, files, ext);
if (readMatrixFromFile (pose_file, pose_mat))
{
- Eigen::Vector3f ea = pose_mat.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
+ Eigen::Vector3f ea = pose_mat.topLeftCorner<3, 3> ().eulerAngles (0, 1, 2);
ea *= 57.2957795f; //transform it to degrees to do the binning
int y = static_cast<int>(pcl_round ((ea[0] + static_cast<float>(std::abs (min_yaw))) / res_yaw));
int p = static_cast<int>(pcl_round ((ea[1] + static_cast<float>(std::abs (min_pitch))) / res_pitch));
pose_mat.setIdentity (4, 4);
readMatrixFromFile (pose_file, pose_mat);
- Eigen::Vector3f ea = pose_mat.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
+ Eigen::Vector3f ea = pose_mat.topLeftCorner<3, 3> ().eulerAngles (0, 1, 2);
Eigen::Vector3f trans_vector = Eigen::Vector3f (pose_mat (0, 3), pose_mat (1, 3), pose_mat (2, 3));
pcl::PointXYZ center_point;
Eigen::Matrix4f guess;
guess.setIdentity ();
- guess.block<3, 3> (0, 0) = matrixxx;
+ guess.topLeftCorner<3, 3> () = matrixxx;
guess (0, 3) = head_clusters_centers_[i][0];
guess (1, 3) = head_clusters_centers_[i][1];
guess (2, 3) = head_clusters_centers_[i][2];
head_clusters_centers_[i][1] = icp_trans (1, 3);
head_clusters_centers_[i][2] = icp_trans (2, 3);
- Eigen::Vector3f ea = icp_trans.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
+ Eigen::Vector3f ea = icp_trans.topLeftCorner<3, 3> ().eulerAngles (0, 1, 2);
head_clusters_rotation_[i][0] = ea[0];
head_clusters_rotation_[i][1] = ea[1];
head_clusters_rotation_[i][2] = ea[2];
#include <pcl/recognition/impl/implicit_shape_model.hpp>
// Instantiations of specific point types
-template class pcl::features::ISMVoteList<pcl::PointXYZ>;
+template class PCL_EXPORTS pcl::features::ISMVoteList<pcl::PointXYZ>;
-template class pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal>;
+template class PCL_EXPORTS pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal>;
HashTableCell* cell = hash_table_.getVoxel (key);
// Insert the pair (data1,data2) belonging to 'model'
- (*cell)[model].push_back (std::pair<const ORROctree::Node::Data*, const ORROctree::Node::Data*> (data1, data2));
+ (*cell)[model].emplace_back(data1, data2);
return (true);
}
i = 0;
// Now create the graph connectivity such that each two neighboring rotation spaces are neighbors in the graph
- for ( std::vector<HypothesisOctree::Node*>::const_iterator hypo = hypo_leaves.begin () ; hypo != hypo_leaves.end () ; ++hypo, ++i )
+ for ( auto hypo = hypo_leaves.cbegin () ; hypo != hypo_leaves.cend () ; ++hypo, ++i )
{
// Compute the fitness of the graph node
graph.getNodes ()[i]->setFitness (static_cast<int> ((*hypo)->getData ().explained_pixels_.size ()));
set(SUBSYS_DESC "Point cloud registration library")
set(SUBSYS_DEPS common octree kdtree search sample_consensus features filters)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS OpenMP)
endif()
set(incs
- "include/pcl/${SUBSYS_NAME}/eigen.h"
- "include/pcl/${SUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/boost_graph.h"
"include/pcl/${SUBSYS_NAME}/convergence_criteria.h"
"include/pcl/${SUBSYS_NAME}/default_convergence_criteria.h"
"include/pcl/${SUBSYS_NAME}/pyramid_feature_matching.h"
"include/pcl/${SUBSYS_NAME}/registration.h"
- "include/pcl/${SUBSYS_NAME}/transforms.h"
"include/pcl/${SUBSYS_NAME}/transformation_estimation.h"
"include/pcl/${SUBSYS_NAME}/transformation_estimation_2D.h"
"include/pcl/${SUBSYS_NAME}/transformation_estimation_svd.h"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_estimation_normal_shooting.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_estimation_backprojection.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_estimation_organized_projection.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_distance.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_median_distance.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_surface_normal.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_features.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_one_to_one.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_poly.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_sample_consensus.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_sample_consensus_2d.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_trimmed.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_var_trimmed.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_organized_boundary.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_types.hpp"
"include/pcl/${SUBSYS_NAME}/impl/ia_ransac.hpp"
"include/pcl/${SUBSYS_NAME}/impl/icp.hpp"
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_kdtree pcl_search pcl_sample_consensus pcl_features pcl_filters)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#pragma once
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-#if defined __GNUC__
-#pragma GCC system_header
-#endif
-
-//#include <boost/graph/adjacency_list.hpp>
-#include <boost/graph/dijkstra_shortest_paths.hpp>
-#include <boost/graph/graph_traits.hpp>
-#include <boost/noncopyable.hpp>
-#include <boost/property_map/property_map.hpp>
return (target_);
}
+ /** \brief Set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value back to
+ * automatic)
+ */
+ void
+ setNumberOfThreads(unsigned int nr_threads)
+ {
+#ifdef _OPENMP
+ num_threads_ = nr_threads != 0 ? nr_threads : omp_get_num_procs();
+#else
+ if (nr_threads != 1) {
+ PCL_WARN("OpenMP is not available. Keeping number of threads unchanged at 1\n");
+ }
+ num_threads_ = 1;
+#endif
+ }
+
/** \brief See if this rejector requires source normals */
virtual bool
requiresSourceNormals() const
/** \brief A flag which, if set, means the tree operating on the source cloud
* will never be recomputed*/
bool force_no_recompute_reciprocal_{false};
+
+ unsigned int num_threads_{1};
};
-/** \brief @b CorrespondenceEstimation represents the base class for
+/** \brief @b CorrespondenceEstimation represents a simple class for
* determining correspondences between target and query point
- * sets/features.
+ * sets/features, using nearest neighbor search.
*
* Code example:
*
Ptr copy(new CorrespondenceEstimation<PointSource, PointTarget, Scalar>(*this));
return (copy);
}
+
+protected:
+ using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::num_threads_;
};
} // namespace registration
} // namespace pcl
// Check if the representations are valid
if (!feature_representation_->isValid(feat_src) ||
!feature_representation_->isValid(feat_tgt)) {
- PCL_ERROR("[pcl::registration::%s::getCorrespondenceScore] Invalid feature "
- "representation given!\n",
- this->getClassName().c_str());
+ PCL_ERROR(
+ "[pcl::registration::CorrespondenceRejectorFeatures::FeatureContainer::"
+ "getCorrespondenceScore] Invalid feature representation given!\n");
return (std::numeric_limits<double>::max());
}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-#pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-
-#include <Eigen/Core>
-#include <Eigen/Dense>
-#include <Eigen/Geometry>
-#include <unsupported/Eigen/Polynomials>
* The original code uses GSL and ANN while in ours we use FLANN and Newton's method
* for optimization (call `useBFGS` to switch to BFGS optimizer, however Newton
* is usually faster and more accurate).
+ * Basic usage example:
+ * \code
+ * pcl::GeneralizedIterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> reg;
+ * reg.setInputSource(src);
+ * reg.setInputTarget(tgt);
+ * // use default parameters or set them yourself, for example:
+ * // reg.setMaximumIterations(...);
+ * // reg.setTransformationEpsilon(...);
+ * // reg.setRotationEpsilon(...);
+ * // reg.setCorrespondenceRandomness(...);
+ * pcl::PointCloud<pcl::PointXYZ>::Ptr output(new pcl::PointCloud<pcl::PointXYZ>);
+ * // supply a better guess, if possible:
+ * Eigen::Matrix4f guess = Eigen::Matrix4f::Identity();
+ * reg.align(*output, guess);
+ * std::cout << reg.getFinalTransformation() << std::endl;
+ * \endcode
* \author Nizar Sallem
* \ingroup registration
*/
max_iterations_ = 200;
transformation_epsilon_ = 5e-4;
corr_dist_threshold_ = 5.;
+ setNumberOfThreads(0);
rigid_transformation_estimation_ = [this](const PointCloudSource& cloud_src,
const pcl::Indices& indices_src,
const PointCloudTarget& cloud_tgt,
return rotation_gradient_tolerance_;
}
+ /** \brief Initialize the scheduler and set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value back to
+ * automatic)
+ */
+ void
+ setNumberOfThreads(unsigned int nr_threads = 0);
+
protected:
/** \brief The number of neighbors used for covariances computation.
* default: 20
Eigen::Matrix3d& ddR_dTheta_dTheta,
Eigen::Matrix3d& ddR_dTheta_dPsi,
Eigen::Matrix3d& ddR_dPsi_dPsi) const;
+
+ /** \brief The number of threads the scheduler should use. */
+ unsigned int threads_;
};
} // namespace pcl
* \param[in] match_indices indices of match M
* \param[out] correspondences resulting correspondences
*/
+ PCL_DEPRECATED(1, 18, "this function has a bug and is generally not needed")
virtual void
linkMatchWithBase(const pcl::Indices& base_indices,
pcl::Indices& match_indices,
*
* \param[in] base_indices indices of base B
* \param[in] match_indices indices of match M
- * \param[in] correspondences corresondences between source and target
+ * \param[in] correspondences correspondences between source and target
* \param[out] transformation resulting transformation matrix
* \return
* * < 0 MSE bigger than max_mse_
* on keypoints as described in: "Markerless point cloud registration with
* keypoint-based 4-points congruent sets", Pascal Theiler, Jan Dirk Wegner, Konrad
* Schindler. ISPRS Annals II-5/W2, 2013. Presented at ISPRS Workshop Laser Scanning,
- * Antalya, Turkey, 2013. \note Method has since been improved and some variations to
- * the paper exist. \author P.W.Theiler \ingroup registration
+ * Antalya, Turkey, 2013.
+ * \note Method has since been improved and some variations to the paper exist.
+ *
+ * The main differences to FPCSInitialAlignment are:
+ * <ol>
+ * <li> KFPCSInitialAlignment stores all solution candidates instead of only the best
+ * one
+ * <li> KFPCSInitialAlignment uses an MSAC approach to score candidates instead of
+ * counting inliers
+ * </ol>
+ * \author P.W.Theiler
+ * \ingroup registration
*/
template <typename PointSource,
typename PointTarget,
using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::fitness_score_;
using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
score_threshold_;
- using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::
- linkMatchWithBase;
using FPCSInitialAlignment<PointSource, PointTarget, NormalT, Scalar>::validateMatch;
/** \brief Internal computation initialization. */
return (use_reciprocal_correspondence_);
}
+ /** \brief Set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value back to
+ * automatic)
+ */
+ void
+ setNumberOfThreads(unsigned int nr_threads)
+ {
+ correspondence_estimation_->setNumberOfThreads(nr_threads);
+ }
+
protected:
/** \brief Apply a rigid transform to a given dataset. Here we check whether
* the dataset has surface normals in addition to XYZ, and rotate normals as well.
#include <pcl/common/copy_point.h>
#include <pcl/common/io.h>
+#include <pcl/common/point_tests.h> // for isXYZFinite
namespace pcl {
pcl::Indices index(1);
std::vector<float> distance(1);
- pcl::Correspondence corr;
- unsigned int nr_valid_correspondences = 0;
+ std::vector<pcl::Correspondences> per_thread_correspondences(num_threads_);
+ for (auto& corrs : per_thread_correspondences) {
+ corrs.reserve(2 * indices_->size() / num_threads_);
+ }
double max_dist_sqr = max_distance * max_distance;
+#pragma omp parallel for default(none) \
+ shared(max_dist_sqr, per_thread_correspondences) firstprivate(index, distance) \
+ num_threads(num_threads_)
// Iterate over the input set of source indices
- for (const auto& idx : (*indices_)) {
+ for (int i = 0; i < static_cast<int>(indices_->size()); i++) {
+ const auto& idx = (*indices_)[i];
// Check if the template types are the same. If true, avoid a copy.
// Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
// macro!
const auto& pt = detail::pointCopyOrRef<PointTarget, PointSource>(input_, idx);
+ if (!input_->is_dense && !pcl::isXYZFinite(pt))
+ continue;
tree_->nearestKSearch(pt, 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
+ pcl::Correspondence corr;
corr.index_query = idx;
corr.index_match = index[0];
corr.distance = distance[0];
- correspondences[nr_valid_correspondences++] = corr;
+
+#ifdef _OPENMP
+ const int thread_num = omp_get_thread_num();
+#else
+ const int thread_num = 0;
+#endif
+
+ per_thread_correspondences[thread_num].emplace_back(corr);
}
- correspondences.resize(nr_valid_correspondences);
+ if (num_threads_ == 1) {
+ correspondences = std::move(per_thread_correspondences.front());
+ }
+ else {
+ const unsigned int nr_correspondences = std::accumulate(
+ per_thread_correspondences.begin(),
+ per_thread_correspondences.end(),
+ static_cast<unsigned int>(0),
+ [](const auto sum, const auto& corr) { return sum + corr.size(); });
+ correspondences.resize(nr_correspondences);
+
+ // Merge per-thread correspondences while keeping them ordered
+ auto insert_loc = correspondences.begin();
+ for (const auto& corrs : per_thread_correspondences) {
+ const auto new_insert_loc = std::move(corrs.begin(), corrs.end(), insert_loc);
+ std::inplace_merge(correspondences.begin(),
+ insert_loc,
+ insert_loc + corrs.size(),
+ [](const auto& lhs, const auto& rhs) {
+ return lhs.index_query < rhs.index_query;
+ });
+ insert_loc = new_insert_loc;
+ }
+ }
deinitCompute();
}
std::vector<float> distance(1);
pcl::Indices index_reciprocal(1);
std::vector<float> distance_reciprocal(1);
- pcl::Correspondence corr;
- unsigned int nr_valid_correspondences = 0;
- int target_idx = 0;
+ std::vector<pcl::Correspondences> per_thread_correspondences(num_threads_);
+ for (auto& corrs : per_thread_correspondences) {
+ corrs.reserve(2 * indices_->size() / num_threads_);
+ }
+#pragma omp parallel for default(none) \
+ shared(max_dist_sqr, per_thread_correspondences) \
+ firstprivate(index, distance, index_reciprocal, distance_reciprocal) \
+ num_threads(num_threads_)
// Iterate over the input set of source indices
- for (const auto& idx : (*indices_)) {
+ for (int i = 0; i < static_cast<int>(indices_->size()); i++) {
+ const auto& idx = (*indices_)[i];
// Check if the template types are the same. If true, avoid a copy.
// Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT
// macro!
const auto& pt_src = detail::pointCopyOrRef<PointTarget, PointSource>(input_, idx);
-
+ if (!input_->is_dense && !pcl::isXYZFinite(pt_src))
+ continue;
tree_->nearestKSearch(pt_src, 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
- target_idx = index[0];
+ const auto target_idx = index[0];
const auto& pt_tgt =
detail::pointCopyOrRef<PointSource, PointTarget>(target_, target_idx);
if (distance_reciprocal[0] > max_dist_sqr || idx != index_reciprocal[0])
continue;
+ pcl::Correspondence corr;
corr.index_query = idx;
corr.index_match = index[0];
corr.distance = distance[0];
- correspondences[nr_valid_correspondences++] = corr;
+
+#ifdef _OPENMP
+ const int thread_num = omp_get_thread_num();
+#else
+ const int thread_num = 0;
+#endif
+
+ per_thread_correspondences[thread_num].emplace_back(corr);
}
- correspondences.resize(nr_valid_correspondences);
+
+ if (num_threads_ == 1) {
+ correspondences = std::move(per_thread_correspondences.front());
+ }
+ else {
+ const unsigned int nr_correspondences = std::accumulate(
+ per_thread_correspondences.begin(),
+ per_thread_correspondences.end(),
+ static_cast<unsigned int>(0),
+ [](const auto sum, const auto& corr) { return sum + corr.size(); });
+ correspondences.resize(nr_correspondences);
+
+ // Merge per-thread correspondences while keeping them ordered
+ auto insert_loc = correspondences.begin();
+ for (const auto& corrs : per_thread_correspondences) {
+ const auto new_insert_loc = std::move(corrs.begin(), corrs.end(), insert_loc);
+ std::inplace_merge(correspondences.begin(),
+ insert_loc,
+ insert_loc + corrs.size(),
+ [](const auto& lhs, const auto& rhs) {
+ return lhs.index_query < rhs.index_query;
+ });
+ insert_loc = new_insert_loc;
+ }
+ }
+
deinitCompute();
}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_DISTANCE_HPP_
-#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_DISTANCE_HPP_
-PCL_DEPRECATED_HEADER(1, 15, "")
-#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_DISTANCE_HPP_ */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_MEDIAN_DISTANCE_HPP_
-#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_MEDIAN_DISTANCE_HPP_
-PCL_DEPRECATED_HEADER(1, 15, "")
-#endif // PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_MEDIAN_DISTANCE_HPP_
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ONE_TO_ONE_HPP_
-#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ONE_TO_ONE_HPP_
-PCL_DEPRECATED_HEADER(1, 15, "")
-#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ONE_TO_ONE_HPP_ */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2009-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- * Copyright (c) Alexandru-Eugen Ichim
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ORGANIZED_BOUNDARY_HPP_
-#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ORGANIZED_BOUNDARY_HPP_
-PCL_DEPRECATED_HEADER(1, 15, "")
-#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_ORGANIZED_BOUNDARY_HPP_ */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SURFACE_NORMAL_HPP_
-#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SURFACE_NORMAL_HPP_
-PCL_DEPRECATED_HEADER(1, 15, "")
-#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SURFACE_NORMAL_HPP_ */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_TRIMMED_HPP_
-#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_TRIMMED_HPP_
-PCL_DEPRECATED_HEADER(1, 15, "")
-#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_TRIMMED_HPP_ */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_VAR_TRIMMED_HPP_
-#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_VAR_TRIMMED_HPP_
-PCL_DEPRECATED_HEADER(1, 15, "")
-#endif /* PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_VAR_TRIMMED_HPP_ */
}
correspondences_cur_mse_ = calculateMSE(correspondences_);
- PCL_DEBUG("[pcl::DefaultConvergenceCriteria::hasConverged] Previous / Current MSE "
- "for correspondences distances is: %f / %f.\n",
- correspondences_prev_mse_,
- correspondences_cur_mse_);
+ if (std::numeric_limits<double>::max() == correspondences_prev_mse_) {
+ PCL_DEBUG("[pcl::DefaultConvergenceCriteria::hasConverged] Previous / Current MSE "
+ "for correspondences distances is: INIT / %f.\n",
+ correspondences_cur_mse_);
+ }
+ else {
+ PCL_DEBUG("[pcl::DefaultConvergenceCriteria::hasConverged] Previous / Current MSE "
+ "for correspondences distances is: %f / %f.\n",
+ correspondences_prev_mse_,
+ correspondences_cur_mse_);
+ }
// 3. The relative sum of Euclidean squared errors is smaller than a user defined
// threshold Absolute
PointCloudPtr tmp(new PointCloud);
// Eigen::Vector4f diff = pose_start - pose_end;
- // Eigen::Translation3f translation (diff.head (3));
+ // Eigen::Translation3f translation (diff.head<3> ());
// Eigen::Affine3f trans = translation * Eigen::Quaternionf::Identity ();
// pcl::transformPointCloud (*(*loop_graph_)[loop_end_].cloud, *tmp, trans);
// TODO use pose
// Eigen::Vector4f cend;
// pcl::compute3DCentroid (*((*loop_graph_)[loop_end_].cloud), cend);
- // Eigen::Translation3f tend (cend.head (3));
+ // Eigen::Translation3f tend (cend.head<3> ());
// Eigen::Affine3f aend (tend);
// Eigen::Affine3f aendI = aend.inverse ();
namespace pcl {
+template <typename PointSource, typename PointTarget, typename Scalar>
+void
+GeneralizedIterativeClosestPoint<PointSource, PointTarget, Scalar>::setNumberOfThreads(
+ unsigned int nr_threads)
+{
+#ifdef _OPENMP
+ if (nr_threads == 0)
+ threads_ = omp_get_num_procs();
+ else
+ threads_ = nr_threads;
+ PCL_DEBUG("[pcl::GeneralizedIterativeClosestPoint::setNumberOfThreads] Setting "
+ "number of threads to %u.\n",
+ threads_);
+#else
+ threads_ = 1;
+ if (nr_threads != 1)
+ PCL_WARN("[pcl::GeneralizedIterativeClosestPoint::setNumberOfThreads] "
+ "Parallelization is requested, but OpenMP is not available! Continuing "
+ "without parallelization.\n");
+#endif // _OPENMP
+}
+
template <typename PointSource, typename PointTarget, typename Scalar>
template <typename PointT>
void
}
Eigen::Vector3d mean;
+ Eigen::Matrix3d cov;
pcl::Indices nn_indices(k_correspondences_);
std::vector<float> nn_dist_sq(k_correspondences_);
if (cloud_covariances.size() < cloud->size())
cloud_covariances.resize(cloud->size());
- auto matrices_iterator = cloud_covariances.begin();
- for (auto points_iterator = cloud->begin(); points_iterator != cloud->end();
- ++points_iterator, ++matrices_iterator) {
- const PointT& query_point = *points_iterator;
- Eigen::Matrix3d& cov = *matrices_iterator;
+#pragma omp parallel for num_threads(threads_) schedule(dynamic, 32) \
+ shared(cloud, cloud_covariances) firstprivate(mean, cov, nn_indices, nn_dist_sq)
+ for (std::ptrdiff_t i = 0; i < static_cast<std::ptrdiff_t>(cloud->size()); ++i) {
+ const PointT& query_point = (*cloud)[i];
// Zero out the cov and mean
cov.setZero();
mean.setZero();
v = gicp_epsilon_;
cov += v * col * col.transpose();
}
+ cloud_covariances[i] = cov;
}
}
p_trans_src[1] - p_tgt[1],
p_trans_src[2] - p_tgt[2]);
const Eigen::Matrix3d& M = gicp_->mahalanobis(src_idx);
- const Eigen::Vector3d Md(M * d); // Md = M*d
- gradient.head<3>() += Md; // translation gradient
- hessian.block<3, 3>(0, 0) += M; // translation-translation hessian
+ const Eigen::Vector3d Md(M * d); // Md = M*d
+ gradient.head<3>() += Md; // translation gradient
+ hessian.topLeftCorner<3, 3>() += M; // translation-translation hessian
p_trans_src = base_transformation_float * p_src;
const Eigen::Vector3d p_base_src(p_trans_src[0], p_trans_src[1], p_trans_src[2]);
dCost_dR_T.noalias() += p_base_src * Md.transpose();
gradient.head<3>() *= 2.0 / m; // translation gradient
dCost_dR_T *= 2.0 / m;
gicp_->computeRDerivative(x, dCost_dR_T, gradient); // rotation gradient
- hessian.block<3, 3>(0, 0) *= 2.0 / m; // translation-translation hessian
+ hessian.topLeftCorner<3, 3>() *= 2.0 / m; // translation-translation hessian
// translation-rotation hessian
dCost_dR_T1.row(0) = dCost_dR_T1b.col(0);
dCost_dR_T1.row(1) = dCost_dR_T2b.col(0);
#include <pcl/common/distances.h>
#include <pcl/common/time.h>
#include <pcl/common/utils.h>
+#include <pcl/filters/random_sample.h>
#include <pcl/registration/ia_fpcs.h>
#include <pcl/registration/transformation_estimation_3point.h>
#include <pcl/sample_consensus/sac_model_plane.h>
}
// check terminate early (time or fitness_score threshold reached)
- abort = (!candidates.empty() ? candidates[0].fitness_score < score_threshold_
- : abort);
- abort = (abort ? abort : timer.getTimeSeconds() > max_runtime_);
+ if (!candidates.empty() && candidates[0].fitness_score < score_threshold_) {
+ PCL_DEBUG("[%s::computeTransformation] Terminating because fitness score "
+ "(%g) is below threshold (%g).\n",
+ reg_name_.c_str(),
+ candidates[0].fitness_score,
+ score_threshold_);
+ abort = true;
+ }
+ else if (timer.getTimeSeconds() > max_runtime_) {
+ PCL_DEBUG("[%s::computeTransformation] Terminating because time exceeded.\n",
+ reg_name_.c_str());
+ abort = true;
+ }
#pragma omp flush(abort)
}
// if a sample size for the point clouds is given; preferably no sampling of target
// cloud
- if (nr_samples_ != 0) {
- const int ss = static_cast<int>(indices_->size());
- const int sample_fraction_src = std::max(1, static_cast<int>(ss / nr_samples_));
-
+ if (nr_samples_ > 0 && static_cast<std::size_t>(nr_samples_) < indices_->size()) {
source_indices_ = pcl::IndicesPtr(new pcl::Indices);
- for (int i = 0; i < ss; i++)
- if (rand() % sample_fraction_src == 0)
- source_indices_->push_back((*indices_)[i]);
+ pcl::RandomSample<PointSource> random_sampling;
+ random_sampling.setInputCloud(input_);
+ random_sampling.setIndices(indices_);
+ random_sampling.setSample(nr_samples_);
+ random_sampling.setSeed(seed);
+ random_sampling.filter(*source_indices_);
}
else
source_indices_ = indices_;
float max_base_diameter = diameter_ * approx_overlap_ * 2.f;
max_base_diameter_sqr_ = max_base_diameter * max_base_diameter;
+#ifdef _OPENMP
+ if (nr_threads_ < 1) {
+ nr_threads_ = omp_get_num_procs();
+ PCL_DEBUG("[%s::initCompute] Setting number of threads to %i.\n",
+ reg_name_.c_str(),
+ nr_threads_);
+ }
+#endif // _OPENMP
+
// normalize the delta
if (normalize_delta_) {
float mean_dist = getMeanPointDensity<PointTarget>(
static_cast<double>(min_iterations)));
max_iterations_ =
static_cast<int>(first_est / (diameter_fraction * approx_overlap_ * 2.f));
+ PCL_DEBUG("[%s::initCompute] Estimated max iterations as %i\n",
+ reg_name_.c_str(),
+ max_iterations_);
}
// set further parameter
coincidation_limit_ = delta_ * 2.f; // EDITED: originally std::sqrt (delta_ * 2.f)
max_mse_ = powf(delta_ * 2.f, 2.f);
max_inlier_dist_sqr_ = powf(delta_ * 2.f, 2.f);
+ PCL_DEBUG("[%s::initCompute] delta_=%g, max_inlier_dist_sqr_=%g, "
+ "coincidation_limit_=%g, max_edge_diff_=%g, max_pair_diff_=%g\n",
+ reg_name_.c_str(),
+ delta_,
+ max_inlier_dist_sqr_,
+ coincidation_limit_,
+ max_edge_diff_,
+ max_pair_diff_);
// reset fitness_score
fitness_score_ = std::numeric_limits<float>::max();
float d1 = pcl::squaredEuclideanDistance(*pt4, *pt1);
float d2 = pcl::squaredEuclideanDistance(*pt4, *pt2);
float d3 = pcl::squaredEuclideanDistance(*pt4, *pt3);
- float d4 = (pt4->getVector3fMap() - centre_pt.head(3)).squaredNorm();
+ float d4 = (pt4->getVector3fMap() - centre_pt.head<3>()).squaredNorm();
// check distance between points w.r.t minimum sampling distance; EDITED -> 4th
// point now also limited by max base line
continue;
}
- pairs.push_back(pcl::Correspondence(*it_in, *it_out, dist));
- pairs.push_back(pcl::Correspondence(*it_out, *it_in, dist));
+ pairs.emplace_back(*it_in, *it_out, dist);
+ pairs.emplace_back(*it_out, *it_in, dist);
}
}
}
pairs_a[static_cast<int>(std::floor((id / 2.f)))].index_query;
match_indices[2] = pair.index_match;
match_indices[3] = pair.index_query;
+ if (match_indices[0] == match_indices[2] ||
+ match_indices[0] == match_indices[3] ||
+ match_indices[1] == match_indices[2] ||
+ match_indices[1] == match_indices[3])
+ continue;
// EDITED: added coarse check of match based on edge length (due to rigid-body )
if (checkBaseMatch(match_indices, dist_base) < 0)
}
// return unsuccessful if no match was found
- return (!matches.empty() ? 0 : -1);
+ if (matches.empty()) {
+ PCL_DEBUG("[%s::determineBaseMatches] no matches found\n", reg_name_.c_str());
+ return -1;
+ }
+ else {
+ PCL_DEBUG("[%s::determineBaseMatches] found %zu matches\n",
+ reg_name_.c_str(),
+ matches.size());
+ return 0;
+ }
}
///////////////////////////////////////////////////////////////////////////////////////////
for (auto& match : matches) {
Eigen::Matrix4f transformation_temp;
pcl::Correspondences correspondences_temp;
-
- // determine corresondences between base and match according to their distance to
- // centroid
- linkMatchWithBase(base_indices, match, correspondences_temp);
+ correspondences_temp.emplace_back(match[0], base_indices[0], 0.0);
+ correspondences_temp.emplace_back(match[1], base_indices[1], 0.0);
+ correspondences_temp.emplace_back(match[2], base_indices[2], 0.0);
+ correspondences_temp.emplace_back(match[3], base_indices[3], 0.0);
// check match based on residuals of the corresponding points after
if (validateMatch(base_indices, match, correspondences_temp, transformation_temp) <
}
// assign new correspondence and update indices of matched targets
- correspondences.push_back(pcl::Correspondence(best_index, *it_base, best_diff_sqr));
+ correspondences.emplace_back(best_index, *it_base, best_diff_sqr);
*it_match_orig = best_index;
}
}
: static_cast<std::size_t>((1.f - fitness_score) * nr_points);
float inlier_score_temp = 0;
- pcl::Indices ids;
- std::vector<float> dists_sqr;
+ pcl::Indices ids(1);
+ std::vector<float> dists_sqr(1);
auto it = source_transformed.begin();
for (std::size_t i = 0; i < nr_points; it++, i++) {
best_index = i;
}
}
+ PCL_DEBUG(
+ "[%s::finalCompute] best score is %g (iteration %i), out of %zu iterations.\n",
+ reg_name_.c_str(),
+ best_score,
+ best_index,
+ candidates.size());
// check if a valid candidate was available
if (!(best_index < 0)) {
// generate a subset of indices of size ransac_iterations_ on which to evaluate
// candidates on
- std::size_t nr_indices = indices_->size();
- if (nr_indices < static_cast<std::size_t>(ransac_iterations_))
+ if (indices_->size() <= static_cast<std::size_t>(ransac_iterations_) ||
+ ransac_iterations_ <= 0)
indices_validation_ = indices_;
- else
- for (int i = 0; i < ransac_iterations_; i++)
- indices_validation_->push_back((*indices_)[rand() % nr_indices]);
+ else {
+ indices_validation_.reset(new pcl::Indices);
+ pcl::RandomSample<PointSource> random_sampling;
+ random_sampling.setInputCloud(input_);
+ random_sampling.setIndices(indices_);
+ random_sampling.setSample(ransac_iterations_);
+ random_sampling.filter(*indices_validation_);
+ }
+ PCL_DEBUG("[%s::initCompute] delta_=%g, max_inlier_dist_sqr_=%g, "
+ "coincidation_limit_=%g, max_edge_diff_=%g, max_pair_diff_=%g\n",
+ reg_name_.c_str(),
+ delta_,
+ max_inlier_dist_sqr_,
+ coincidation_limit_,
+ max_edge_diff_,
+ max_pair_diff_);
return (true);
}
std::numeric_limits<float>::max(); // reset to std::numeric_limits<float>::max()
// to accept all candidates and not only best
- // determine corresondences between base and match according to their distance to
- // centroid
- linkMatchWithBase(base_indices, match, correspondences_temp);
+ correspondences_temp.emplace_back(match[0], base_indices[0], 0.0);
+ correspondences_temp.emplace_back(match[1], base_indices[1], 0.0);
+ correspondences_temp.emplace_back(match[2], base_indices[2], 0.0);
+ correspondences_temp.emplace_back(match[3], base_indices[3], 0.0);
// check match based on residuals of the corresponding points after transformation
if (validateMatch(base_indices, match, correspondences_temp, transformation_temp) <
validateTransformation(transformation_temp, fitness_score);
// store all valid match as well as associated score and transformation
- candidates.push_back(
- MatchingCandidate(fitness_score, correspondences_temp, transformation_temp));
+ candidates.emplace_back(fitness_score, correspondences_temp, transformation_temp);
+ }
+ // make sure that candidate with best fitness score is at the front, for early
+ // termination check
+ if (!candidates.empty()) {
+ auto best_candidate = candidates.begin();
+ for (auto iter = candidates.begin(); iter < candidates.end(); ++iter)
+ if (iter->fitness_score < best_candidate->fitness_score)
+ best_candidate = iter;
+ if (best_candidate != candidates.begin())
+ std::swap(*best_candidate, *candidates.begin());
}
}
float score_a = 0.f, score_b = 0.f;
// residual costs based on mse
- pcl::Indices ids;
- std::vector<float> dists_sqr;
+ pcl::Indices ids(1);
+ std::vector<float> dists_sqr(1);
for (const auto& source : source_transformed) {
// search for nearest point using kd tree search
tree_->nearestKSearch(source, 1, ids, dists_sqr);
// translation score (solutions with small translation are down-voted)
float scale = 1.f;
if (use_trl_score_) {
- float trl = transformation.rightCols<1>().head(3).norm();
+ float trl = transformation.rightCols<1>().head<3>().norm();
float trl_ratio =
(trl - lower_trl_boundary_) / (upper_trl_boundary_ - lower_trl_boundary_);
fitness_score_ = candidates_[0].fitness_score;
final_transformation_ = candidates_[0].transformation;
*correspondences_ = candidates_[0].correspondences;
+ PCL_DEBUG("[%s::finalCompute] best score is %g, out of %zu candidate solutions.\n",
+ reg_name_.c_str(),
+ fitness_score_,
+ candidates_.size());
// here we define convergence if resulting score is above threshold
converged_ = fitness_score_ < score_threshold_;
for (const auto& c2 : candidates) {
Eigen::Matrix4f diff =
candidate.transformation.colPivHouseholderQr().solve(c2.transformation);
- const float angle3d = Eigen::AngleAxisf(diff.block<3, 3>(0, 0)).angle();
+ const float angle3d = Eigen::AngleAxisf(diff.topLeftCorner<3, 3>()).angle();
const float translation3d = diff.block<3, 1>(0, 3).norm();
unique = angle3d > min_angle3d && translation3d > min_translation3d;
if (!unique) {
for (const auto& c2 : candidates) {
Eigen::Matrix4f diff =
candidate.transformation.colPivHouseholderQr().solve(c2.transformation);
- const float angle3d = Eigen::AngleAxisf(diff.block<3, 3>(0, 0)).angle();
+ const float angle3d = Eigen::AngleAxisf(diff.topLeftCorner<3, 3>()).angle();
const float translation3d = diff.block<3, 1>(0, 3).norm();
unique = angle3d > min_angle3d && translation3d > min_translation3d;
if (!unique) {
* $Id$
*
*/
-#ifndef PCL_REGISTRATION_ICP_NL_HPP_
-#define PCL_REGISTRATION_ICP_NL_HPP_
-
-#endif /* PCL_REGISTRATION_ICP_NL_HPP_ */
+PCL_DEPRECATED_HEADER(1, 18, "Do not include icp_nl.hpp, it is empty.")
// Fill in elements of G and B
if (vj > 0)
- G.block(6 * (vi - 1), 6 * (vj - 1), 6, 6) = -(*slam_graph_)[e].cinv_;
- G.block(6 * (vi - 1), 6 * (vi - 1), 6, 6) += (*slam_graph_)[e].cinv_;
+ G.block<6, 6>(6 * (vi - 1), 6 * (vj - 1)) = -(*slam_graph_)[e].cinv_;
+ G.block<6, 6>(6 * (vi - 1), 6 * (vi - 1)) += (*slam_graph_)[e].cinv_;
B.segment(6 * (vi - 1), 6) += (present1 ? 1 : -1) * (*slam_graph_)[e].cinvd_;
}
}
// Update the poses
float sum = 0.0;
for (int vi = 1; vi != n; ++vi) {
- Eigen::Vector6f difference_pose = static_cast<Eigen::Vector6f>(
+ auto difference_pose = static_cast<Eigen::Vector6f>(
-incidenceCorrection(getPose(vi)).inverse() * X.segment(6 * (vi - 1), 6));
sum += difference_pose.norm();
setPose(vi, getPose(vi) + difference_pose);
// the sufficient decrease, Equation 1.1, and curvature condition, Equation 1.2 [More,
// Thuente 1994]
while (!interval_converged && step_iterations < max_step_iterations &&
- !(psi_t <= 0 /*Sufficient Decrease*/ &&
- d_phi_t <= -nu * d_phi_0 /*Curvature Condition*/)) {
+ (psi_t > 0 /*Sufficient Decrease*/ ||
+ d_phi_t > -nu * d_phi_0 /*Curvature Condition*/)) {
// Use auxiliary function if interval I is not closed
if (open_interval) {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
if (!initializeHistogram())
return;
+ std::vector<float> feature_vector; // put here to reuse memory
for (const auto& point : *input_) {
- std::vector<float> feature_vector;
// NaN is converted to very high number that gives out of bound exception.
if (!pcl::isFinite(point))
continue;
// For each point in the source dataset
int nr = 0;
for (const auto& point : input_transformed) {
+ if (!input_->is_dense && !pcl::isXYZFinite(point))
+ continue;
// Find its nearest neighbor in the target
tree_->nearestKSearch(point, 1, nn_indices, nn_dists);
deinitCompute();
}
-} // namespace pcl
+} // namespace pcl
\ No newline at end of file
// Select a random number
sample_indices[i] = getRandomIndex(static_cast<int>(cloud.size()) - i);
- // Run trough list of numbers, starting at the lowest, to avoid duplicates
+ // Run through list of numbers, starting at the lowest, to avoid duplicates
for (int j = 0; j < i; j++) {
// Move value up if it is higher than previous selections to ensure true
// randomness
// Assemble the correlation matrix H = source * target'
Eigen::Matrix<Scalar, 3, 3> H =
- (cloud_src_demean * cloud_tgt_demean.transpose()).topLeftCorner(3, 3);
+ (cloud_src_demean * cloud_tgt_demean.transpose()).template topLeftCorner<3, 3>();
float angle = std::atan2((H(0, 1) - H(1, 0)), (H(0, 0) + H(1, 1)));
R(1, 0) = std::sin(angle);
// Return the correct transformation
- transformation_matrix.topLeftCorner(3, 3).matrix() = R;
- const Eigen::Matrix<Scalar, 3, 1> Rc(R * centroid_src.head(3).matrix());
- transformation_matrix.block(0, 3, 3, 1).matrix() = centroid_tgt.head(3) - Rc;
+ transformation_matrix.template topLeftCorner<3, 3>().matrix() = R;
+ const Eigen::Matrix<Scalar, 3, 1> Rc(R * centroid_src.template head<3>().matrix());
+ transformation_matrix.template block<3, 1>(0, 3).matrix() =
+ centroid_tgt.template head<3>() - Rc;
}
} // namespace registration
target_it.reset();
Eigen::Matrix<Scalar, 3, 1> s1 =
- source_demean.col(1).head(3) - source_demean.col(0).head(3);
+ source_demean.col(1).template head<3>() - source_demean.col(0).template head<3>();
s1.normalize();
Eigen::Matrix<Scalar, 3, 1> s2 =
- source_demean.col(2).head(3) - source_demean.col(0).head(3);
+ source_demean.col(2).template head<3>() - source_demean.col(0).template head<3>();
s2 -= s2.dot(s1) * s1;
s2.normalize();
source_rot.col(2) = s1.cross(s2);
Eigen::Matrix<Scalar, 3, 1> t1 =
- target_demean.col(1).head(3) - target_demean.col(0).head(3);
+ target_demean.col(1).template head<3>() - target_demean.col(0).template head<3>();
t1.normalize();
Eigen::Matrix<Scalar, 3, 1> t2 =
- target_demean.col(2).head(3) - target_demean.col(0).head(3);
+ target_demean.col(2).template head<3>() - target_demean.col(0).template head<3>();
t2 -= t2.dot(t1) * t1;
t2.normalize();
// Eigen::Matrix <Scalar, 3, 3> R = source_rot * target_rot.transpose ();
Eigen::Matrix<Scalar, 3, 3> R = target_rot * source_rot.transpose();
- transformation_matrix.topLeftCorner(3, 3) = R;
- // transformation_matrix.block (0, 3, 3, 1) = source_mean.head (3) - R *
- // target_mean.head (3);
- transformation_matrix.block(0, 3, 3, 1) =
- target_mean.head(3) - R * source_mean.head(3);
+ transformation_matrix.template topLeftCorner<3, 3>() = R;
+ // transformation_matrix.block<3, 1>(0, 3) = source_mean.head<3>() - R *
+ // target_mean.head<3>();
+ transformation_matrix.template block<3, 1>(0, 3) =
+ target_mean.template head<3>() - R * source_mean.template head<3>();
}
//#define PCL_INSTANTIATE_TransformationEstimation3Point(T,U) template class PCL_EXPORTS
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- *
- */
-
-#ifndef PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_DQ_HPP_
-#define PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_DQ_HPP_
-
-#include <pcl/common/eigen.h>
-PCL_DEPRECATED_HEADER(1,
- 15,
- "TransformationEstimationDQ has been renamed to "
- "TransformationEstimationDualQuaternion.");
-
-namespace pcl {
-
-namespace registration {
-
-template <typename PointSource, typename PointTarget, typename Scalar>
-inline void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- Matrix4& transformation_matrix) const
-{
- const auto nr_points = cloud_src.size();
- if (cloud_tgt.size() != nr_points) {
- PCL_ERROR("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number "
- "or points in source (%zu) differs than target (%zu)!\n",
- static_cast<std::size_t>(nr_points),
- static_cast<std::size_t>(cloud_tgt.size()));
- return;
- }
-
- ConstCloudIterator<PointSource> source_it(cloud_src);
- ConstCloudIterator<PointTarget> target_it(cloud_tgt);
- estimateRigidTransformation(source_it, target_it, transformation_matrix);
-}
-
-template <typename PointSource, typename PointTarget, typename Scalar>
-void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::Indices& indices_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- Matrix4& transformation_matrix) const
-{
- if (indices_src.size() != cloud_tgt.size()) {
- PCL_ERROR("[pcl::TransformationDQ::estimateRigidTransformation] Number or points "
- "in source (%zu) differs than target (%zu)!\n",
- indices_src.size(),
- static_cast<std::size_t>(cloud_tgt.size()));
- return;
- }
-
- ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it(cloud_tgt);
- estimateRigidTransformation(source_it, target_it, transformation_matrix);
-}
-
-template <typename PointSource, typename PointTarget, typename Scalar>
-inline void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::Indices& indices_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- const pcl::Indices& indices_tgt,
- Matrix4& transformation_matrix) const
-{
- if (indices_src.size() != indices_tgt.size()) {
- PCL_ERROR("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number "
- "or points in source (%zu) differs than target (%zu)!\n",
- indices_src.size(),
- indices_tgt.size());
- return;
- }
-
- ConstCloudIterator<PointSource> source_it(cloud_src, indices_src);
- ConstCloudIterator<PointTarget> target_it(cloud_tgt, indices_tgt);
- estimateRigidTransformation(source_it, target_it, transformation_matrix);
-}
-
-template <typename PointSource, typename PointTarget, typename Scalar>
-void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- const pcl::Correspondences& correspondences,
- Matrix4& transformation_matrix) const
-{
- ConstCloudIterator<PointSource> source_it(cloud_src, correspondences, true);
- ConstCloudIterator<PointTarget> target_it(cloud_tgt, correspondences, false);
- estimateRigidTransformation(source_it, target_it, transformation_matrix);
-}
-
-template <typename PointSource, typename PointTarget, typename Scalar>
-inline void
-TransformationEstimationDQ<PointSource, PointTarget, Scalar>::
- estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4& transformation_matrix) const
-{
- const int npts = static_cast<int>(source_it.size());
-
- transformation_matrix.setIdentity();
-
- // dual quaternion optimization
- Eigen::Matrix<Scalar, 4, 4> C1 = Eigen::Matrix<Scalar, 4, 4>::Zero();
- Eigen::Matrix<Scalar, 4, 4> C2 = Eigen::Matrix<Scalar, 4, 4>::Zero();
- Scalar* c1 = C1.data();
- Scalar* c2 = C2.data();
-
- for (int i = 0; i < npts; i++) {
- const PointSource& a = *source_it;
- const PointTarget& b = *target_it;
- const Scalar axbx = a.x * b.x;
- const Scalar ayby = a.y * b.y;
- const Scalar azbz = a.z * b.z;
- const Scalar axby = a.x * b.y;
- const Scalar aybx = a.y * b.x;
- const Scalar axbz = a.x * b.z;
- const Scalar azbx = a.z * b.x;
- const Scalar aybz = a.y * b.z;
- const Scalar azby = a.z * b.y;
- c1[0] += axbx - azbz - ayby;
- c1[5] += ayby - azbz - axbx;
- c1[10] += azbz - axbx - ayby;
- c1[15] += axbx + ayby + azbz;
- c1[1] += axby + aybx;
- c1[2] += axbz + azbx;
- c1[3] += aybz - azby;
- c1[6] += azby + aybz;
- c1[7] += azbx - axbz;
- c1[11] += axby - aybx;
-
- c2[1] += a.z + b.z;
- c2[2] -= a.y + b.y;
- c2[3] += a.x - b.x;
- c2[6] += a.x + b.x;
- c2[7] += a.y - b.y;
- c2[11] += a.z - b.z;
- source_it++;
- target_it++;
- }
-
- c1[4] = c1[1];
- c1[8] = c1[2];
- c1[9] = c1[6];
- c1[12] = c1[3];
- c1[13] = c1[7];
- c1[14] = c1[11];
- c2[4] = -c2[1];
- c2[8] = -c2[2];
- c2[12] = -c2[3];
- c2[9] = -c2[6];
- c2[13] = -c2[7];
- c2[14] = -c2[11];
-
- C1 *= -2.0f;
- C2 *= 2.0f;
-
- const Eigen::Matrix<Scalar, 4, 4> A =
- (0.25f / float(npts)) * C2.transpose() * C2 - C1;
-
- const Eigen::EigenSolver<Eigen::Matrix<Scalar, 4, 4>> es(A);
-
- ptrdiff_t i;
- es.eigenvalues().real().maxCoeff(&i);
- const Eigen::Matrix<Scalar, 4, 1> qmat = es.eigenvectors().col(i).real();
- const Eigen::Matrix<Scalar, 4, 1> smat = -(0.5f / float(npts)) * C2 * qmat;
-
- const Eigen::Quaternion<Scalar> q(qmat(3), qmat(0), qmat(1), qmat(2));
- const Eigen::Quaternion<Scalar> s(smat(3), smat(0), smat(1), smat(2));
-
- const Eigen::Quaternion<Scalar> t = s * q.conjugate();
-
- const Eigen::Matrix<Scalar, 3, 3> R(q.toRotationMatrix());
-
- for (int i = 0; i < 3; ++i)
- for (int j = 0; j < 3; ++j)
- transformation_matrix(i, j) = R(i, j);
-
- transformation_matrix(0, 3) = -t.x();
- transformation_matrix(1, 3) = -t.y();
- transformation_matrix(2, 3) = -t.z();
-}
-
-} // namespace registration
-} // namespace pcl
-
-#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_DQ_HPP_ */
// Assemble the correlation matrix H = source * target'
Eigen::Matrix<Scalar, 3, 3> H =
- (cloud_src_demean * cloud_tgt_demean.transpose()).topLeftCorner(3, 3);
+ (cloud_src_demean * cloud_tgt_demean.transpose()).template topLeftCorner<3, 3>();
// Compute the Singular Value Decomposition
Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3>> svd(
Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose();
// Return the correct transformation
- transformation_matrix.topLeftCorner(3, 3) = R;
- const Eigen::Matrix<Scalar, 3, 1> Rc(R * centroid_src.head(3));
- transformation_matrix.block(0, 3, 3, 1) = centroid_tgt.head(3) - Rc;
+ transformation_matrix.template topLeftCorner<3, 3>() = R;
+ const Eigen::Matrix<Scalar, 3, 1> Rc(R * centroid_src.template head<3>());
+ transformation_matrix.template block<3, 1>(0, 3) =
+ centroid_tgt.template head<3>() - Rc;
if (pcl::console::isVerbosityLevelEnabled(pcl::console::L_DEBUG)) {
size_t N = cloud_src_demean.cols();
// Assemble the correlation matrix H = source * target'
Eigen::Matrix<Scalar, 3, 3> H =
- (cloud_src_demean * cloud_tgt_demean.transpose()).topLeftCorner(3, 3);
+ (cloud_src_demean * cloud_tgt_demean.transpose()).template topLeftCorner<3, 3>();
// Compute the Singular Value Decomposition
Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3>> svd(
// rotated cloud
Eigen::Matrix<Scalar, 4, 4> R4;
- R4.block(0, 0, 3, 3) = R;
+ R4.template topLeftCorner<3, 3>() = R;
R4(0, 3) = 0;
R4(1, 3) = 0;
R4(2, 3) = 0;
}
float scale = sum_tt / sum_ss;
- transformation_matrix.topLeftCorner(3, 3) = scale * R;
- const Eigen::Matrix<Scalar, 3, 1> Rc(scale * R * centroid_src.head(3));
- transformation_matrix.block(0, 3, 3, 1) = centroid_tgt.head(3) - Rc;
+ transformation_matrix.template topLeftCorner<3, 3>() = scale * R;
+ const Eigen::Matrix<Scalar, 3, 1> Rc(scale * R * centroid_src.template head<3>());
+ transformation_matrix.template block<3, 1>(0, 3) =
+ centroid_tgt.template head<3>() - Rc;
}
} // namespace registration
// Prevents unnecessary voxel initiations
if (resolution_ != resolution) {
resolution_ = resolution;
- if (input_) {
+ if (target_) { // init() needs target_
init();
}
}
* \return outlier ratio
*/
inline double
+ getOutlierRatio() const
+ {
+ return outlier_ratio_;
+ }
+
+ /** \brief Get the point cloud outlier ratio.
+ * \return outlier ratio
+ */
+ PCL_DEPRECATED(1,
+ 18,
+ "The method `getOulierRatio` has been renamed to "
+ "`getOutlierRatio`.")
+ inline double
getOulierRatio() const
{
return outlier_ratio_;
}
+ /** \brief Set/change the point cloud outlier ratio.
+ * \param[in] outlier_ratio outlier ratio
+ */
+ inline void
+ setOutlierRatio(double outlier_ratio)
+ {
+ outlier_ratio_ = outlier_ratio;
+ }
+
+ PCL_DEPRECATED(1,
+ 18,
+ "The method `setOulierRatio` has been renamed to "
+ "`setOutlierRatio`.")
/** \brief Set/change the point cloud outlier ratio.
* \param[in] outlier_ratio outlier ratio
*/
return nr_iterations_;
}
+ /** \brief Get access to the `VoxelGridCovariance` generated from target cloud
+ * containing point means and covariances. Set the input target cloud before calling
+ * this. Useful for debugging, e.g.
+ * \code
+ * pcl::PointCloud<PointXYZ> visualize_cloud;
+ * ndt.getTargetCells().getDisplayCloud(visualize_cloud);
+ * \endcode
+ */
+ inline const TargetGrid&
+ getTargetCells() const
+ {
+ return target_cells_;
+ }
+
/** \brief Convert 6 element transformation vector to affine transformation.
* \param[in] x transformation vector of the form [x, y, z, roll, pitch, yaw]
* \param[out] trans affine transform corresponding to given transformation
target_cells_.setInputCloud(target_);
// Initiate voxel structure.
target_cells_.filter(true);
+ PCL_DEBUG("[pcl::%s::init] Computed voxel structure, got %zu voxels with valid "
+ "normal distributions.\n",
+ getClassName().c_str(),
+ target_cells_.getCentroids()->size());
}
/** \brief Compute derivatives of likelihood function w.r.t. the
computeHessian(Eigen::Matrix<double, 6, 6>& hessian,
const PointCloudSource& trans_cloud);
- /** \brief Compute hessian of likelihood function w.r.t. the transformation
- * vector.
- * \note Equation 6.13 [Magnusson 2009].
- * \param[out] hessian the hessian matrix of the likelihood function
- * w.r.t. the transformation vector
- * \param[in] trans_cloud transformed point cloud
- * \param[in] transform the current transform vector
- */
- PCL_DEPRECATED(1, 15, "Parameter `transform` is not required")
- void
- computeHessian(Eigen::Matrix<double, 6, 6>& hessian,
- const PointCloudSource& trans_cloud,
- const Eigen::Matrix<double, 6, 1>& transform)
- {
- pcl::utils::ignore(transform);
- computeHessian(hessian, trans_cloud);
- }
-
/** \brief Compute individual point contributions to hessian of likelihood
* function w.r.t. the transformation vector.
* \note Equation 6.13 [Magnusson 2009].
std::size_t
operator()(const HashKeyStruct& s) const noexcept
{
- const std::size_t h1 = std::hash<int>{}(s.first);
- const std::size_t h2 = std::hash<int>{}(s.second.first);
- const std::size_t h3 = std::hash<int>{}(s.second.second.first);
- const std::size_t h4 = std::hash<int>{}(s.second.second.second);
- return h1 ^ (h2 << 1) ^ (h3 << 2) ^ (h4 << 3);
+ /// RS hash function https://www.partow.net/programming/hashfunctions/index.html
+ std::size_t b_ = 378551;
+ std::size_t a_ = 63689;
+ std::size_t hash = 0;
+ hash = hash * a_ + s.first;
+ a_ = a_ * b_;
+ hash = hash * a_ + s.second.first;
+ a_ = a_ * b_;
+ hash = hash * a_ + s.second.second.first;
+ a_ = a_ * b_;
+ hash = hash * a_ + s.second.second.second;
+ return hash;
}
};
using FeatureHashMapType =
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- *
- */
-
-#pragma once
-
-#include <pcl/registration/transformation_estimation.h>
-#include <pcl/cloud_iterator.h>
-PCL_DEPRECATED_HEADER(1,
- 15,
- "TransformationEstimationDQ has been renamed to "
- "TransformationEstimationDualQuaternion.");
-
-namespace pcl {
-namespace registration {
-/** @b TransformationEstimationDQ implements dual quaternion based estimation of
- * the transformation aligning the given correspondences.
- *
- * \note The class is templated on the source and target point types as well as on the
- * output scalar of the transformation matrix (i.e., float or double). Default: float.
- * \author Sergey Zagoruyko
- * \ingroup registration
- */
-template <typename PointSource, typename PointTarget, typename Scalar = float>
-class TransformationEstimationDQ
-: public TransformationEstimation<PointSource, PointTarget, Scalar> {
-public:
- using Ptr = shared_ptr<TransformationEstimationDQ<PointSource, PointTarget, Scalar>>;
- using ConstPtr =
- shared_ptr<const TransformationEstimationDQ<PointSource, PointTarget, Scalar>>;
-
- using Matrix4 =
- typename TransformationEstimation<PointSource, PointTarget, Scalar>::Matrix4;
-
- TransformationEstimationDQ(){};
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * point cloud using dual quaternion optimization \param[in] cloud_src the source
- * point cloud dataset \param[in] cloud_tgt the target point cloud dataset \param[out]
- * transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- Matrix4& transformation_matrix) const;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * point cloud using dual quaternion optimization \param[in] cloud_src the source
- * point cloud dataset \param[in] indices_src the vector of indices describing the
- * points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::Indices& indices_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- Matrix4& transformation_matrix) const;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * point cloud using dual quaternion optimization \param[in] cloud_src the source
- * point cloud dataset \param[in] indices_src the vector of indices describing the
- * points of interest in \a cloud_src
- * \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the
- * interest points from \a indices_src
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- inline void
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::Indices& indices_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- const pcl::Indices& indices_tgt,
- Matrix4& transformation_matrix) const;
-
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * point cloud using dual quaternion optimization \param[in] cloud_src the source
- * point cloud dataset \param[in] cloud_tgt the target point cloud dataset \param[in]
- * correspondences the vector of correspondences between source and target point cloud
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation(const pcl::PointCloud<PointSource>& cloud_src,
- const pcl::PointCloud<PointTarget>& cloud_tgt,
- const pcl::Correspondences& correspondences,
- Matrix4& transformation_matrix) const;
-
-protected:
- /** \brief Estimate a rigid rotation transformation between a source and a target
- * \param[in] source_it an iterator over the source point cloud dataset
- * \param[in] target_it an iterator over the target point cloud dataset
- * \param[out] transformation_matrix the resultant transformation matrix
- */
- void
- estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it,
- ConstCloudIterator<PointTarget>& target_it,
- Matrix4& transformation_matrix) const;
-};
-
-} // namespace registration
-} // namespace pcl
-
-#include <pcl/registration/impl/transformation_estimation_dq.hpp>
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#pragma once
-PCL_DEPRECATED_HEADER(1, 15, "Please include pcl/common/transforms.h directly.")
-#include <pcl/common/transforms.h>
pnt_out.z = static_cast<float>(
transform_matrix_(2, 0) * pnt_in.x + transform_matrix_(2, 1) * pnt_in.y +
transform_matrix_(2, 2) * pnt_in.z + transform_matrix_(2, 3));
- // pnt_out.getVector3fMap () = transform_matrix_.topLeftCorner (3, 3) *
+ // pnt_out.getVector3fMap () = transform_matrix_.topLeftCorner<3, 3> () *
// pnt_in.getVector3fMap () +
- // transform_matrix_.block (0, 3, 3, 1);
+ // transform_matrix_.block<3, 1> (0, 3);
// pnt_out.data[3] = pnt_in.data[3];
}
trans(2, 2) = 1; // Rotation around the Z-axis
// Copy the rotation and translation components
- trans.block(0, 3, 4, 1) = Eigen::Matrix<Scalar, 4, 1>(p[0], p[1], 0, 1.0);
+ trans.template block<4, 1>(0, 3) = Eigen::Matrix<Scalar, 4, 1>(p[0], p[1], 0, 1.0);
// Compute w from the unit quaternion
Eigen::Rotation2D<Scalar> r(p[2]);
- trans.topLeftCorner(2, 2) = r.toRotationMatrix();
+ trans.template topLeftCorner<2, 2>() = r.toRotationMatrix();
}
};
} // namespace registration
Eigen::Quaternion<Scalar> q(0, p[3], p[4], p[5]);
q.w() = static_cast<Scalar>(std::sqrt(1 - q.dot(q)));
q.normalize();
- transform_matrix_.topLeftCorner(3, 3) = q.toRotationMatrix();
+ transform_matrix_.template topLeftCorner<3, 3>() = q.toRotationMatrix();
}
};
} // namespace registration
registration algorithms for both organized and unorganized (general purpose)
datasets.
+PCL's methods to register one point cloud to another can be divided into two groups: the first group needs an initial guess of the transformation (pcl::IterativeClosestPoint, pcl::IterativeClosestPointWithNormals, pcl::IterativeClosestPointNonLinear, pcl::GeneralizedIterativeClosestPoint, pcl::GeneralizedIterativeClosestPoint6D, pcl::NormalDistributionsTransform, pcl::NormalDistributionsTransform2D), and the second group does not need a guess but is usually slower and less accurate (pcl::registration::FPCSInitialAlignment, pcl::registration::KFPCSInitialAlignment, pcl::SampleConsensusInitialAlignment, pcl::SampleConsensusPrerejective, pcl::PPFRegistration). Many parts of the registration process can be configured and swapped out, like the correspondence estimation, correspondence rejection, or the transformation estimation. And finally, PCL also has methods if there are more than two point clouds to align (pcl::registration::ELCH, pcl::registration::LUM, pcl::PairwiseGraphRegistration, pcl::registration::IncrementalRegistration, pcl::registration::MetaRegistration).
+
\image html http://www.pointclouds.org/assets/images/contents/documentation/registration_outdoor.png
\image html http://www.pointclouds.org/assets/images/contents/documentation/registration_closeup.png
PCL_DEBUG("[pcl::%s::computeTransformation] Convergence failed\n",
getClassName().c_str());
}
- // for some reason the static equivalent method raises an error
- // final_transformation_.block<3,3> (0,0) = (transformation_.block<3,3> (0,0)) *
- // (guess.block<3,3> (0,0)); final_transformation_.block <3, 1> (0, 3) =
- // transformation_.block <3, 1> (0, 3) + guess.rightCols<1>.block <3, 1> (0, 3);
- final_transformation_.topLeftCorner(3, 3) =
- previous_transformation_.topLeftCorner(3, 3) * guess.topLeftCorner(3, 3);
+ final_transformation_.topLeftCorner<3, 3>() =
+ previous_transformation_.topLeftCorner<3, 3>() * guess.topLeftCorner<3, 3>();
final_transformation_(0, 3) = previous_transformation_(0, 3) + guess(0, 3);
final_transformation_(1, 3) = previous_transformation_(1, 3) + guess(1, 3);
final_transformation_(2, 3) = previous_transformation_(2, 3) + guess(2, 3);
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Alexandru-Eugen Ichim
- * Willow Garage, Inc
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: transformation_estimation_svd.cpp 7153 2012-09-16 22:24:29Z aichim $
- *
- */
-
-#include <pcl/registration/transformation_estimation_dq.h>
set(SUBSYS_DESC "Point cloud sample consensus library")
set(SUBSYS_DEPS common search)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
set(srcs
src/sac.cpp
+ src/sac_model_ellipse3d.cpp
src/sac_model_circle.cpp
src/sac_model_circle3d.cpp
src/sac_model_cylinder.cpp
src/sac_model_plane.cpp
src/sac_model_registration.cpp
src/sac_model_sphere.cpp
- src/sac_model_ellipse3d.cpp
+ src/sac_model_torus.cpp
)
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
- "include/pcl/${SUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/lmeds.h"
"include/pcl/${SUBSYS_NAME}/method_types.h"
"include/pcl/${SUBSYS_NAME}/mlesac.h"
"include/pcl/${SUBSYS_NAME}/sac_model_registration_2d.h"
"include/pcl/${SUBSYS_NAME}/sac_model_sphere.h"
"include/pcl/${SUBSYS_NAME}/sac_model_ellipse3d.h"
+ "include/pcl/${SUBSYS_NAME}/sac_model_torus.h"
)
set(impl_incs
"include/pcl/${SUBSYS_NAME}/impl/sac_model_registration_2d.hpp"
"include/pcl/${SUBSYS_NAME}/impl/sac_model_sphere.hpp"
"include/pcl/${SUBSYS_NAME}/impl/sac_model_ellipse3d.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/sac_model_torus.hpp"
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
-target_link_libraries("${LIB_NAME}" pcl_common)
+target_link_libraries("${LIB_NAME}" pcl_common pcl_search)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install include files
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-
-#include <boost/random.hpp>
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-
-#include <Eigen/Core>
-#include <unsupported/Eigen/NonLinearOptimization>
const unsigned max_skip = max_iterations_ * 10;
// Number of samples to try randomly
- std::size_t fraction_nr_points = pcl_lrint (static_cast<double>(sac_model_->getIndices ()->size ()) * fraction_nr_pretest_ / 100.0);
+ const std::size_t fraction_nr_points = (fraction_nr_pretest_ < 0.0 ? nr_samples_pretest_ : pcl_lrint (static_cast<double>(sac_model_->getIndices ()->size ()) * fraction_nr_pretest_ / 100.0));
+ PCL_DEBUG ("[pcl::RandomizedMEstimatorSampleConsensus::computeModel] Using %lu points for RMSAC pre-test.\n", fraction_nr_points);
// Iterate
while (iterations_ < k && skipped_count < max_skip)
const unsigned max_skip = max_iterations_ * 10;
// Number of samples to try randomly
- const std::size_t fraction_nr_points = pcl_lrint (static_cast<double>(sac_model_->getIndices ()->size ()) * fraction_nr_pretest_ / 100.0);
+ const std::size_t fraction_nr_points = (fraction_nr_pretest_ < 0.0 ? nr_samples_pretest_ : pcl_lrint (static_cast<double>(sac_model_->getIndices ()->size ()) * fraction_nr_pretest_ / 100.0));
+ PCL_DEBUG ("[pcl::RandomizedRandomSampleConsensus::computeModel] Using %lu points for RRANSAC pre-test.\n", fraction_nr_points);
// Iterate
while (iterations_ < k)
// Check if the squared norm of the cross-product is non-zero, otherwise
// common_helper_vec, which plays an important role in computeModelCoefficients,
// would likely be ill-formed.
- if ((p1 - p0).cross(p1 - p2).squaredNorm() < Eigen::NumTraits<float>::dummy_precision ())
+ if ((p1 - p0).cross(p1 - p2).squaredNorm() < Eigen::NumTraits<double>::dummy_precision ())
{
PCL_ERROR ("[pcl::SampleConsensusModelCircle3D::isSampleGood] Sample points too similar or collinear!\n");
return (false);
// The same check is implemented in isSampleGood, so be sure to look there too
// if you find the need to change something here.
- if (common_helper_vec.squaredNorm() < Eigen::NumTraits<float>::dummy_precision ())
+ if (common_helper_vec.squaredNorm() < Eigen::NumTraits<double>::dummy_precision ())
{
PCL_ERROR ("[pcl::SampleConsensusModelCircle3D::computeModelCoefficients] Sample points too similar or collinear!\n");
return (false);
return;
}
inliers.clear ();
+ error_sqr_dists_.clear ();
inliers.reserve (indices_->size ());
+ error_sqr_dists_.reserve (indices_->size ());
const auto squared_threshold = threshold * threshold;
// Iterate through the 3d points and calculate the distances from them to the sphere
Eigen::Vector3d K = C + r * helper_vectorP_projC.normalized ();
Eigen::Vector3d distanceVector = P - K;
- if (distanceVector.squaredNorm () < squared_threshold)
+ const double sqr_dist = distanceVector.squaredNorm ();
+ if (sqr_dist < squared_threshold)
{
// Returns the indices of the points whose distances are smaller than the threshold
inliers.push_back ((*indices_)[i]);
+ error_sqr_dists_.push_back (sqr_dist);
}
}
}
OptimizationFunctor functor (this, inliers);
Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm (num_diff);
- Eigen::VectorXd coeff;
+ Eigen::VectorXd coeff = optimized_coefficients.cast<double>();
int info = lm.minimize (coeff);
+ coeff.tail(3).normalize(); // normalize the cylinder axis
for (Eigen::Index i = 0; i < coeff.size (); ++i)
optimized_coefficients[i] = static_cast<float> (coeff[i]);
pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the plane
- for (std::size_t i = 0; i < inliers.size (); ++i)
+ for (const auto &inlier : inliers)
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P ((*input_)[inliers[i]].x, (*input_)[inliers[i]].y, (*input_)[inliers[i]].z);
+ Eigen::Vector3d P ((*input_)[inlier].x, (*input_)[inlier].y, (*input_)[inlier].z);
// C : Circle Center
Eigen::Vector3d C (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// N : Circle (Plane) Normal
// K : Point on Circle
Eigen::Vector3d K = C + r * helper_vectorP_projC.normalized ();
- projected_points[i].x = static_cast<float> (K[0]);
- projected_points[i].y = static_cast<float> (K[1]);
- projected_points[i].z = static_cast<float> (K[2]);
+ projected_points[inlier].x = static_cast<float> (K[0]);
+ projected_points[inlier].y = static_cast<float> (K[1]);
+ projected_points[inlier].z = static_cast<float> (K[2]);
}
}
else
Eigen::Vector4f ortho31 = n3.cross3(n1);
float denominator = n1.dot(ortho23);
+ if (std::abs(denominator) < Eigen::NumTraits<float>::dummy_precision ())
+ {
+ PCL_ERROR ("[pcl::SampleConsensusModelCone::computeModelCoefficients] Impossible to compute stable model with these points.\n");
+ return (false);
+ }
float d1 = p1.dot (n1);
float d2 = p2.dot (n2);
model_coefficients[10] = static_cast<float>(x_axis[2]);
- PCL_DEBUG ("[pcl::SampleConsensusModelEllipse3D::computeModelCoefficients] Model is (%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g).\n",
+ PCL_DEBUG ("[pcl::SampleConsensusModelEllipse3D::computeModelCoefficients] Model is (%g,%g,%g,%g,%g,%g,%g,%g,%g,%g,%g).\n",
model_coefficients[0], model_coefficients[1], model_coefficients[2], model_coefficients[3],
model_coefficients[4], model_coefficients[5], model_coefficients[6], model_coefficients[7],
model_coefficients[8], model_coefficients[9], model_coefficients[10]);
Indices &inliers)
{
inliers.clear();
+ error_sqr_dists_.clear();
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
{
return;
}
inliers.reserve (indices_->size ());
+ error_sqr_dists_.reserve (indices_->size ());
// c : Ellipse Center
const Eigen::Vector3f c(model_coefficients[0], model_coefficients[1], model_coefficients[2]);
float th_opt;
const Eigen::Vector2f distanceVector = dvec2ellipse(params, p_(0), p_(1), th_opt);
- if (distanceVector.squaredNorm() < squared_threshold)
+ const double sqr_dist = distanceVector.squaredNorm();
+ if (sqr_dist < squared_threshold)
{
// Returns the indices of the points whose distances are smaller than the threshold
inliers.push_back ((*indices_)[i]);
+ error_sqr_dists_.push_back (sqr_dist);
}
}
}
OptimizationFunctor functor(this, inliers);
Eigen::NumericalDiff<OptimizationFunctor> num_diff(functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm(num_diff);
- Eigen::VectorXd coeff;
+ Eigen::VectorXd coeff = model_coefficients.cast<double>();
int info = lm.minimize(coeff);
- for (Eigen::Index i = 0; i < coeff.size (); ++i)
- optimized_coefficients[i] = static_cast<float> (coeff[i]);
+ optimized_coefficients = coeff.cast<float>();
// Compute the L2 norm of the residuals
- PCL_DEBUG ("[pcl::SampleConsensusModelEllipse3D::optimizeModelCoefficients] LM solver finished with exit code %i, having a residual norm of %g. \nInitial solution: %g %g %g %g %g %g %g %g %g %g %g %g %g \nFinal solution: %g %g %g %g %g %g %g %g %g %g %g %g %g\n",
+ PCL_DEBUG ("[pcl::SampleConsensusModelEllipse3D::optimizeModelCoefficients] LM solver finished with exit code %i, having a residual norm of %g. \nInitial solution: %g %g %g %g %g %g %g %g %g %g %g\nFinal solution: %g %g %g %g %g %g %g %g %g %g %g\n",
info, lm.fvec.norm (),
model_coefficients[0],
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_NORMAL_PLANE_H_
#include <pcl/sample_consensus/sac_model_normal_plane.h>
-#include <pcl/sample_consensus/impl/sac_model_plane.hpp> // for dist4, dist8
#include <pcl/common/common.h> // for getAngle3D
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
return (true);
}
-#define AT(POS) ((*input_)[(*indices_)[(POS)]])
-
-#ifdef __AVX__
-// This function computes the distances of 8 points to the plane
-template <typename PointT> inline __m256 pcl::SampleConsensusModelPlane<PointT>::dist8 (const std::size_t i, const __m256 &a_vec, const __m256 &b_vec, const __m256 &c_vec, const __m256 &d_vec, const __m256 &abs_help) const
-{
- // The andnot-function realizes an abs-operation: the sign bit is removed
- return _mm256_andnot_ps (abs_help,
- _mm256_add_ps (_mm256_add_ps (_mm256_mul_ps (a_vec, _mm256_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x, AT(i+4).x, AT(i+5).x, AT(i+6).x, AT(i+7).x)),
- _mm256_mul_ps (b_vec, _mm256_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y, AT(i+4).y, AT(i+5).y, AT(i+6).y, AT(i+7).y))),
- _mm256_add_ps (_mm256_mul_ps (c_vec, _mm256_set_ps (AT(i ).z, AT(i+1).z, AT(i+2).z, AT(i+3).z, AT(i+4).z, AT(i+5).z, AT(i+6).z, AT(i+7).z)),
- d_vec))); // TODO this could be replaced by three fmadd-instructions (if available), but the speed gain would probably be minimal
-}
-#endif // ifdef __AVX__
-
-#ifdef __SSE__
-// This function computes the distances of 4 points to the plane
-template <typename PointT> inline __m128 pcl::SampleConsensusModelPlane<PointT>::dist4 (const std::size_t i, const __m128 &a_vec, const __m128 &b_vec, const __m128 &c_vec, const __m128 &d_vec, const __m128 &abs_help) const
-{
- // The andnot-function realizes an abs-operation: the sign bit is removed
- return _mm_andnot_ps (abs_help,
- _mm_add_ps (_mm_add_ps (_mm_mul_ps (a_vec, _mm_set_ps (AT(i ).x, AT(i+1).x, AT(i+2).x, AT(i+3).x)),
- _mm_mul_ps (b_vec, _mm_set_ps (AT(i ).y, AT(i+1).y, AT(i+2).y, AT(i+3).y))),
- _mm_add_ps (_mm_mul_ps (c_vec, _mm_set_ps (AT(i ).z, AT(i+1).z, AT(i+2).z, AT(i+3).z)),
- d_vec))); // TODO this could be replaced by three fmadd-instructions (if available), but the speed gain would probably be minimal
-}
-#endif // ifdef __SSE__
-
-#undef AT
-
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelPlane<PointT>::getDistancesToModel (
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2009-2010, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id$
+ *
+ */
+
+#ifndef PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_TORUS_H_
+#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_TORUS_H_
+
+// clang-format off
+#include <pcl/sample_consensus/sac_model_torus.h>
+#include <pcl/common/concatenate.h>
+// clang-format on
+
+#include <unsupported/Eigen/NonLinearOptimization> // for LevenbergMarquardt
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+bool
+pcl::SampleConsensusModelTorus<PointT, PointNT>::isSampleGood(
+ const Indices& samples) const
+{
+ if (samples.size() != sample_size_) {
+ PCL_ERROR("[pcl::SampleConsensusTorus::isSampleGood] Wrong number of samples (is "
+ "%lu, should be %lu)!\n",
+ samples.size(),
+ sample_size_);
+ return (false);
+ }
+
+ Eigen::Vector3f n0 = Eigen::Vector3f((*normals_)[samples[0]].getNormalVector3fMap());
+ Eigen::Vector3f n1 = Eigen::Vector3f((*normals_)[samples[1]].getNormalVector3fMap());
+ Eigen::Vector3f n2 = Eigen::Vector3f((*normals_)[samples[2]].getNormalVector3fMap());
+ Eigen::Vector3f n3 = Eigen::Vector3f((*normals_)[samples[3]].getNormalVector3fMap());
+
+ // Required for numeric stability on computeModelCoefficients
+ if (std::abs((n0).cross(n1).squaredNorm()) <
+ Eigen::NumTraits<float>::dummy_precision() ||
+ std::abs((n0).cross(n2).squaredNorm()) <
+ Eigen::NumTraits<float>::dummy_precision() ||
+ std::abs((n0).cross(n3).squaredNorm()) <
+ Eigen::NumTraits<float>::dummy_precision() ||
+ std::abs((n1).cross(n2).squaredNorm()) <
+ Eigen::NumTraits<float>::dummy_precision() ||
+ std::abs((n1).cross(n3).squaredNorm()) <
+ Eigen::NumTraits<float>::dummy_precision()) {
+ PCL_ERROR("[pcl::SampleConsensusModelEllipse3D::isSampleGood] Sample points "
+ "normals too similar or collinear!\n");
+ return (false);
+ }
+ return (true);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+float
+crossDot(const Eigen::Vector3f& v1, const Eigen::Vector3f& v2, const Eigen::Vector3f& v3)
+{
+ return v1.cross(v2).dot(v3);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+bool
+pcl::SampleConsensusModelTorus<PointT, PointNT>::computeModelCoefficients(
+ const Indices& samples, Eigen::VectorXf& model_coefficients) const
+{
+
+ // Make sure that the samples are valid
+ if (!isSampleGood(samples)) {
+ PCL_ERROR("[pcl::SampleConsensusModelTorus::computeModelCoefficients] Invalid set "
+ "of samples given!\n");
+ return (false);
+ }
+
+ if (!normals_) {
+ PCL_ERROR("[pcl::SampleConsensusModelTorus::computeModelCoefficients] No input "
+ "dataset containing normals was given!\n");
+ return (false);
+ }
+ // Find axis using:
+
+ // @article{article,
+ // author = {Lukacs, G. and Marshall, David and Martin, R.},
+ // year = {2001},
+ // month = {09},
+ // pages = {},
+ // title = {Geometric Least-Squares Fitting of Spheres, Cylinders, Cones and Tori}
+ //}
+
+ const Eigen::Vector3f n0 = Eigen::Vector3f((*normals_)[samples[0]].getNormalVector3fMap());
+ const Eigen::Vector3f n1 = Eigen::Vector3f((*normals_)[samples[1]].getNormalVector3fMap());
+ const Eigen::Vector3f n2 = Eigen::Vector3f((*normals_)[samples[2]].getNormalVector3fMap());
+ const Eigen::Vector3f n3 = Eigen::Vector3f((*normals_)[samples[3]].getNormalVector3fMap());
+
+ const Eigen::Vector3f p0 = Eigen::Vector3f((*input_)[samples[0]].getVector3fMap());
+ const Eigen::Vector3f p1 = Eigen::Vector3f((*input_)[samples[1]].getVector3fMap());
+ const Eigen::Vector3f p2 = Eigen::Vector3f((*input_)[samples[2]].getVector3fMap());
+ const Eigen::Vector3f p3 = Eigen::Vector3f((*input_)[samples[3]].getVector3fMap());
+
+ const float a01 = crossDot(n0, n1, n2);
+ const float b01 = crossDot(n0, n1, n3);
+ const float a0 = crossDot(p2 - p1, n0, n2);
+ const float a1 = crossDot(p0 - p2, n1, n2);
+ const float b0 = crossDot(p3 - p1, n0, n3);
+ const float b1 = crossDot(p0 - p3, n1, n3);
+ const float a = crossDot(p0 - p2, p1 - p0, n2);
+ const float b = crossDot(p0 - p3, p1 - p0, n3);
+
+ // a10*t0*t1 + a0*t0 + a1*t1 + a = 0
+ // b10*t0*t1 + b0*t0 + b1*t1 + b = 0
+ //
+ // (a0 - b0*a10/b10)* t0 + (a1-b1*a10/b10) *t1 + a - b*a10/b10
+ // t0 = k * t1 + p
+
+ const float k = -(a1 - b1 * a01 / b01) / (a0 - b0 * a01 / b01);
+ const float p = -(a - b * a01 / b01) / (a0 - b0 * a01 / b01);
+
+ // Second deg eqn.
+ //
+ // b10*k*t1*t1 + b10*p*t1 | + b0*k *t1 + b0*p | + b1*t1 | + b = 0
+ //
+ // (b10*k) * t1*t1 + (b10*p + b0*k + b1) * t1 + (b0*p + b)
+
+ const float _a = (b01 * k);
+ const float _b = (b01 * p + b0 * k + b1);
+ const float _c = (b0 * p + b);
+
+ const float eps = Eigen::NumTraits<float>::dummy_precision();
+
+ // Check for imaginary solutions, or small denominators.
+ if ((_b * _b - 4 * _a * _c) < 0 || std::abs(a0 - b0 * a01) < eps ||
+ std::abs(b01) < eps || std::abs(_a) < eps) {
+ PCL_DEBUG("[pcl::SampleConsensusModelTorus::computeModelCoefficients] Can't "
+ "compute model coefficients with this method!\n");
+ return (false);
+ }
+
+ const float s0 = (-_b + std::sqrt(_b * _b - 4 * _a * _c)) / (2 * _a);
+ const float s1 = (-_b - std::sqrt(_b * _b - 4 * _a * _c)) / (2 * _a);
+
+ float r_maj_stddev_cycle1 = std::numeric_limits<float>::max();
+
+ for (float s : {s0, s1}) {
+
+ const float t1 = s;
+ const float t0 = k * t1 + p;
+
+ // Direction vector
+ Eigen::Vector3f d = ((p1 + n1 * t1) - (p0 + n0 * t0));
+ d.normalize();
+ // Flip direction, so that the first element of the direction vector is
+ // positive, for consistency.
+ if (d[0] < 0) {
+ d *= -1;
+ }
+
+ // Flip normals if required. Note |d| = 1
+ // d
+ // if (n0.dot(d) / n0.norm() < M_PI / 2 ) n0 = -n0;
+ // if (n1.dot(d) / n1.norm() < M_PI / 2 ) n1 = -n1;
+ // if (n2.dot(d) / n2.norm() < M_PI / 2 ) n2 = -n2;
+ // if (n3.dot(d) / n3.norm() < M_PI / 2 ) n3 = -n3;
+
+ // We fit the points to the plane of the torus.
+ // Ax + By + Cz + D = 0
+ // We know that all for each point plus its normal
+ // times the minor radius will give us a point
+ // in that plane
+ // Pplane_i = P_i + n_i * r
+ // we substitute A,x,B,y,C,z
+ // dx *( P_i_x + n_i_x * r ) + dy *( P_i_y + n_i_y * r ) +dz *( P_i_z + n_i_z * r )
+ // + D = 0 and finally (dx*P_i_x + dy*P_i_y + dz*P_i_z) + (dx*n_i_x + dy*n_i_y +
+ // dz*n_i_z ) * r + D = 0 We can set up a linear least squares system of two
+ // variables r and D
+ //
+ Eigen::MatrixXf A(4, 2);
+ A << d.dot(n0), 1, d.dot(n1), 1, d.dot(n2), 1, d.dot(n3), 1;
+
+ Eigen::Matrix<float, -1, -1> B(4, 1);
+ B << -d.dot(p0), -d.dot(p1), -d.dot(p2), -d.dot(p3);
+
+ Eigen::Matrix<float, -1, -1> sol;
+ sol = A.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(B);
+
+ const float r_min = -sol(0);
+ const float D = sol(1);
+
+ // Axis line and plane intersect to find the centroid of the torus
+ // We take a random point on the line. We find P_rand + lambda * d belongs in the
+ // plane
+
+ const Eigen::Vector3f Pany = (p1 + n1 * t1);
+
+ const float lambda = (-d.dot(Pany) - D) / d.dot(d);
+
+ const Eigen::Vector3f centroid = Pany + d * lambda;
+
+ // Finally, the major radius. The least square solution will be
+ // the average in this case.
+ const float r_maj = std::sqrt(((p0 - r_min * n0 - centroid).squaredNorm() +
+ (p1 - r_min * n1 - centroid).squaredNorm() +
+ (p2 - r_min * n2 - centroid).squaredNorm() +
+ (p3 - r_min * n3 - centroid).squaredNorm()) /
+ 4.f);
+
+ const float r_maj_stddev =
+ std::sqrt((std::pow(r_maj - (p0 - r_min * n0 - centroid).norm(), 2) +
+ std::pow(r_maj - (p1 - r_min * n1 - centroid).norm(), 2) +
+ std::pow(r_maj - (p2 - r_min * n2 - centroid).norm(), 2) +
+ std::pow(r_maj - (p3 - r_min * n3 - centroid).norm(), 2)) /
+ 4.f);
+ // We select the minimum stddev cycle
+ if (r_maj_stddev < r_maj_stddev_cycle1) {
+ r_maj_stddev_cycle1 = r_maj_stddev;
+ }
+ else {
+ break;
+ }
+
+ model_coefficients.resize(model_size_);
+ model_coefficients[0] = r_maj;
+ model_coefficients[1] = r_min;
+
+ model_coefficients[2] = centroid[0];
+ model_coefficients[3] = centroid[1];
+ model_coefficients[4] = centroid[2];
+
+ model_coefficients[5] = d[0];
+ model_coefficients[6] = d[1];
+ model_coefficients[7] = d[2];
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+void
+pcl::SampleConsensusModelTorus<PointT, PointNT>::getDistancesToModel(
+ const Eigen::VectorXf& model_coefficients, std::vector<double>& distances) const
+{
+
+ if (!isModelValid(model_coefficients)) {
+ distances.clear();
+ return;
+ }
+
+ distances.resize(indices_->size());
+
+ // Iterate through the 3d points and calculate the distances to the closest torus
+ // point
+ for (std::size_t i = 0; i < indices_->size(); ++i) {
+ const Eigen::Vector3f pt = (*input_)[(*indices_)[i]].getVector3fMap();
+ const Eigen::Vector3f pt_n = (*normals_)[(*indices_)[i]].getNormalVector3fMap();
+
+ Eigen::Vector3f torus_closest;
+ projectPointToTorus(pt, pt_n, model_coefficients, torus_closest);
+
+ distances[i] = (torus_closest - pt).norm();
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+void
+pcl::SampleConsensusModelTorus<PointT, PointNT>::selectWithinDistance(
+ const Eigen::VectorXf& model_coefficients, const double threshold, Indices& inliers)
+{
+ // Check if the model is valid given the user constraints
+ if (!isModelValid(model_coefficients)) {
+ inliers.clear();
+ return;
+ }
+ inliers.clear();
+ error_sqr_dists_.clear();
+ inliers.reserve(indices_->size());
+ error_sqr_dists_.reserve(indices_->size());
+
+ for (std::size_t i = 0; i < indices_->size(); ++i) {
+ const Eigen::Vector3f pt = (*input_)[(*indices_)[i]].getVector3fMap();
+ const Eigen::Vector3f pt_n = (*normals_)[(*indices_)[i]].getNormalVector3fMap();
+
+ Eigen::Vector3f torus_closest;
+ projectPointToTorus(pt, pt_n, model_coefficients, torus_closest);
+
+ const float distance = (torus_closest - pt).norm();
+
+ if (distance < threshold) {
+ // Returns the indices of the points whose distances are smaller than the
+ // threshold
+ inliers.push_back((*indices_)[i]);
+ error_sqr_dists_.push_back(distance);
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+std::size_t
+pcl::SampleConsensusModelTorus<PointT, PointNT>::countWithinDistance(
+ const Eigen::VectorXf& model_coefficients, const double threshold) const
+{
+ if (!isModelValid(model_coefficients))
+ return (0);
+
+ std::size_t nr_p = 0;
+
+ for (std::size_t i = 0; i < indices_->size(); ++i) {
+ const Eigen::Vector3f pt = (*input_)[(*indices_)[i]].getVector3fMap();
+ const Eigen::Vector3f pt_n = (*normals_)[(*indices_)[i]].getNormalVector3fMap();
+
+ Eigen::Vector3f torus_closest;
+ projectPointToTorus(pt, pt_n, model_coefficients, torus_closest);
+
+ const float distance = (torus_closest - pt).norm();
+
+ if (distance < threshold) {
+ nr_p++;
+ }
+ }
+ return (nr_p);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+void
+pcl::SampleConsensusModelTorus<PointT, PointNT>::optimizeModelCoefficients(
+ const Indices& inliers,
+ const Eigen::VectorXf& model_coefficients,
+ Eigen::VectorXf& optimized_coefficients) const
+{
+
+ optimized_coefficients = model_coefficients;
+
+ // Needs a set of valid model coefficients
+ if (!isModelValid(model_coefficients)) {
+ PCL_ERROR("[pcl::SampleConsensusModelTorus::optimizeModelCoefficients] Given model "
+ "is invalid!\n");
+ return;
+ }
+
+ // Need more than the minimum sample size to make a difference
+ if (inliers.size() <= sample_size_) {
+ PCL_ERROR("[pcl::SampleConsensusModelTorus::optimizeModelCoefficients] Not enough "
+ "inliers to refine/optimize the model's coefficients (%lu)! Returning "
+ "the same coefficients.\n",
+ inliers.size());
+ return;
+ }
+
+ OptimizationFunctor functor(this, inliers);
+ Eigen::NumericalDiff<OptimizationFunctor> num_diff(functor);
+ Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm(
+ num_diff);
+
+ Eigen::VectorXd coeff = model_coefficients.cast<double>();
+ int info = lm.minimize(coeff);
+
+ if (!info) {
+ PCL_ERROR(
+ "[pcl::SampleConsensusModelTorus::optimizeModelCoefficients] Optimizer returned"
+ "with error (%i)! Returning ",
+ info);
+ return;
+ }
+
+ // Normalize direction vector
+ coeff.tail<3>().normalize();
+ optimized_coefficients = coeff.cast<float>();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+void
+pcl::SampleConsensusModelTorus<PointT, PointNT>::projectPointToTorus(
+ const Eigen::Vector3f& p_in,
+ const Eigen::Vector3f& p_n,
+ const Eigen::VectorXf& model_coefficients,
+ Eigen::Vector3f& pt_out) const
+{
+
+ // Fetch optimization parameters
+ const float& R = model_coefficients[0];
+ const float& r = model_coefficients[1];
+
+ const float& x0 = model_coefficients[2];
+ const float& y0 = model_coefficients[3];
+ const float& z0 = model_coefficients[4];
+
+ const float& nx = model_coefficients[5];
+ const float& ny = model_coefficients[6];
+ const float& nz = model_coefficients[7];
+
+ // Normal of the plane where the torus circle lies
+ Eigen::Vector3f n{nx, ny, nz};
+ n.normalize();
+
+ Eigen::Vector3f pt0{x0, y0, z0};
+
+ // Ax + By + Cz + D = 0
+ const float D = -n.dot(pt0);
+
+ // Project to the torus circle plane folling the point normal
+ // we want to find lambda such that p + pn_n*lambda lies on the
+ // torus plane.
+ // A*(pt_x + lambda*pn_x) + B *(pt_y + lambda*pn_y) + ... + D = 0
+ // given that: n = [A,B,C]
+ // n.dot(P) + lambda*n.dot(pn) + D = 0
+ //
+
+ Eigen::Vector3f pt_proj;
+ // If the point lies in the torus plane, we just use it as projected
+
+ // C++20 -> [[likely]]
+ if (std::abs(n.dot(p_n)) > Eigen::NumTraits<float>::dummy_precision()) {
+ float lambda = (-D - n.dot(p_in)) / n.dot(p_n);
+ pt_proj = p_in + lambda * p_n;
+ }
+ else {
+ pt_proj = p_in;
+ }
+
+ // Closest point from the inner circle to the current point
+ const Eigen::Vector3f circle_closest = (pt_proj - pt0).normalized() * R + pt0;
+
+ // From the that closest point we move towards the goal point until we
+ // meet the surface of the torus
+ pt_out = (p_in - circle_closest).normalized() * r + circle_closest;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+void
+pcl::SampleConsensusModelTorus<PointT, PointNT>::projectPoints(
+ const Indices& inliers,
+ const Eigen::VectorXf& model_coefficients,
+ PointCloud& projected_points,
+ bool copy_data_fields) const
+{
+ // Needs a valid set of model coefficients
+ if (!isModelValid(model_coefficients)) {
+ PCL_ERROR(
+ "[pcl::SampleConsensusModelCylinder::projectPoints] Given model is invalid!\n");
+ return;
+ }
+
+ // Copy all the data fields from the input cloud to the projected one?
+ if (copy_data_fields) {
+ // Allocate enough space and copy the basics
+ projected_points.resize(input_->size());
+ projected_points.width = input_->width;
+ projected_points.height = input_->height;
+
+ using FieldList = typename pcl::traits::fieldList<PointT>::type;
+ // Iterate over each point
+ for (std::size_t i = 0; i < input_->size(); ++i)
+ // Iterate over each dimension
+ pcl::for_each_type<FieldList>(
+ NdConcatenateFunctor<PointT, PointT>((*input_)[i], projected_points[i]));
+
+ // Iterate through the 3d points and calculate the distances from them to the plane
+ for (const auto& inlier : inliers) {
+ Eigen::Vector3f q;
+ const Eigen::Vector3f pt_n = (*normals_)[inlier].getNormalVector3fMap();
+ projectPointToTorus(
+ (*input_)[inlier].getVector3fMap(), pt_n, model_coefficients, q);
+ projected_points[inlier].getVector3fMap() = q;
+ }
+ }
+ else {
+ // Allocate enough space and copy the basics
+ projected_points.resize(inliers.size());
+ projected_points.width = inliers.size();
+ projected_points.height = 1;
+
+ using FieldList = typename pcl::traits::fieldList<PointT>::type;
+ // Iterate over each point
+ for (std::size_t i = 0; i < inliers.size(); ++i) {
+ // Iterate over each dimension
+ pcl::for_each_type<FieldList>(NdConcatenateFunctor<PointT, PointT>(
+ (*input_)[inliers[i]], projected_points[i]));
+ }
+
+ for (const auto& inlier : inliers) {
+ Eigen::Vector3f q;
+ const Eigen::Vector3f pt_n = (*normals_)[inlier].getNormalVector3fMap();
+ projectPointToTorus(
+ (*input_)[inlier].getVector3fMap(), pt_n, model_coefficients, q);
+ projected_points[inlier].getVector3fMap() = q;
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+bool
+pcl::SampleConsensusModelTorus<PointT, PointNT>::doSamplesVerifyModel(
+ const std::set<index_t>& indices,
+ const Eigen::VectorXf& model_coefficients,
+ const double threshold) const
+{
+
+ for (const auto& index : indices) {
+ const Eigen::Vector3f pt_n = (*normals_)[index].getNormalVector3fMap();
+ Eigen::Vector3f torus_closest;
+ projectPointToTorus((*input_)[index].getVector3fMap(), pt_n, model_coefficients, torus_closest);
+
+ if (((*input_)[index].getVector3fMap() - torus_closest).squaredNorm() > threshold * threshold)
+ return (false);
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT, typename PointNT>
+bool
+pcl::SampleConsensusModelTorus<PointT, PointNT>::isModelValid(
+ const Eigen::VectorXf& model_coefficients) const
+{
+ if (!SampleConsensusModel<PointT>::isModelValid(model_coefficients))
+ return (false);
+
+ if (radius_min_ != std::numeric_limits<double>::lowest() &&
+ (model_coefficients[0] < radius_min_ || model_coefficients[1] < radius_min_)) {
+ PCL_DEBUG(
+ "[pcl::SampleConsensusModelTorus::isModelValid] Major radius OR minor radius "
+ "of torus is/are too small: should be larger than %g, but are {%g, %g}.\n",
+ radius_min_,
+ model_coefficients[0],
+ model_coefficients[1]);
+ return (false);
+ }
+ if (radius_max_ != std::numeric_limits<double>::max() &&
+ (model_coefficients[0] > radius_max_ || model_coefficients[1] > radius_max_)) {
+ PCL_DEBUG(
+ "[pcl::SampleConsensusModelTorus::isModelValid] Major radius OR minor radius "
+ "of torus is/are too big: should be smaller than %g, but are {%g, %g}.\n",
+ radius_max_,
+ model_coefficients[0],
+ model_coefficients[1]);
+ return (false);
+ }
+ return (true);
+}
+
+#define PCL_INSTANTIATE_SampleConsensusModelTorus(PointT, PointNT) \
+ template class PCL_EXPORTS pcl::SampleConsensusModelTorus<PointT, PointNT>;
+
+#endif // PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_TORUS_H_
*/
RandomizedMEstimatorSampleConsensus (const SampleConsensusModelPtr &model)
: SampleConsensus<PointT> (model)
- , fraction_nr_pretest_ (10.0) // Number of samples to try randomly in percents
+ , fraction_nr_pretest_ (-1.0)
+ , nr_samples_pretest_ (1)
{
// Maximum number of trials before we give up.
max_iterations_ = 10000;
*/
RandomizedMEstimatorSampleConsensus (const SampleConsensusModelPtr &model, double threshold)
: SampleConsensus<PointT> (model, threshold)
- , fraction_nr_pretest_ (10.0) // Number of samples to try randomly in percents
+ , fraction_nr_pretest_ (-1.0)
+ , nr_samples_pretest_ (1)
{
// Maximum number of trials before we give up.
max_iterations_ = 10000;
computeModel (int debug_verbosity_level = 0) override;
/** \brief Set the percentage of points to pre-test.
+ * This is an alternative to setNrSamplesPretest.
* \param[in] nr_pretest percentage of points to pre-test
*/
inline void
- setFractionNrPretest (double nr_pretest) { fraction_nr_pretest_ = nr_pretest; }
+ setFractionNrPretest (double nr_pretest)
+ {
+ fraction_nr_pretest_ = nr_pretest;
+ nr_samples_pretest_ = 0;
+ }
/** \brief Get the percentage of points to pre-test. */
inline double
getFractionNrPretest () const { return (fraction_nr_pretest_); }
+ /** \brief Set the absolute number of points to pre-test.
+ * This is an alternative to setFractionNrPretest.
+ * \param[in] nr_pretest absolute number of points to pre-test
+ */
+ inline void
+ setNrSamplesPretest (std::size_t nr_pretest)
+ {
+ nr_samples_pretest_ = nr_pretest;
+ fraction_nr_pretest_ = -1.0;
+ }
+
+ /** \brief Get the absolute number of points to pre-test. */
+ inline std::size_t
+ getNrSamplesPretest () const { return (nr_samples_pretest_); }
+
private:
- /** \brief Number of samples to randomly pre-test, in percents. */
+ /** \brief Number of samples to randomly pre-test, in percents. This is an alternative and mutually exclusive to nr_samples_pretest_. */
double fraction_nr_pretest_;
+
+ /** \brief Absolute number of samples to randomly pre-test. This is an alternative and mutually exclusive to fraction_nr_pretest_. */
+ std::size_t nr_samples_pretest_;
};
}
*/
RandomizedRandomSampleConsensus (const SampleConsensusModelPtr &model)
: SampleConsensus<PointT> (model)
- , fraction_nr_pretest_ (10.0) // Number of samples to try randomly in percents
+ , fraction_nr_pretest_ (-1.0)
+ , nr_samples_pretest_ (1)
{
// Maximum number of trials before we give up.
max_iterations_ = 10000;
*/
RandomizedRandomSampleConsensus (const SampleConsensusModelPtr &model, double threshold)
: SampleConsensus<PointT> (model, threshold)
- , fraction_nr_pretest_ (10.0) // Number of samples to try randomly in percents
+ , fraction_nr_pretest_ (-1.0)
+ , nr_samples_pretest_ (1)
{
// Maximum number of trials before we give up.
max_iterations_ = 10000;
computeModel (int debug_verbosity_level = 0) override;
/** \brief Set the percentage of points to pre-test.
+ * This is an alternative to setNrSamplesPretest.
* \param[in] nr_pretest percentage of points to pre-test
*/
inline void
- setFractionNrPretest (double nr_pretest) { fraction_nr_pretest_ = nr_pretest; }
+ setFractionNrPretest (double nr_pretest)
+ {
+ fraction_nr_pretest_ = nr_pretest;
+ nr_samples_pretest_ = 0;
+ }
/** \brief Get the percentage of points to pre-test. */
inline double
getFractionNrPretest () const { return (fraction_nr_pretest_); }
+ /** \brief Set the absolute number of points to pre-test.
+ * This is an alternative to setFractionNrPretest.
+ * \param[in] nr_pretest absolute number of points to pre-test
+ */
+ inline void
+ setNrSamplesPretest (std::size_t nr_pretest)
+ {
+ nr_samples_pretest_ = nr_pretest;
+ fraction_nr_pretest_ = -1.0;
+ }
+
+ /** \brief Get the absolute number of points to pre-test. */
+ inline std::size_t
+ getNrSamplesPretest () const { return (nr_samples_pretest_); }
+
private:
- /** \brief Number of samples to randomly pre-test, in percents. */
+ /** \brief Number of samples to randomly pre-test, in percents. This is an alternative and mutually exclusive to nr_samples_pretest_. */
double fraction_nr_pretest_;
+
+ /** \brief Absolute number of samples to randomly pre-test. This is an alternative and mutually exclusive to fraction_nr_pretest_. */
+ std::size_t nr_samples_pretest_;
};
}
using SampleConsensusModel<PointT>::indices_;
using SampleConsensusModel<PointT>::radius_min_;
using SampleConsensusModel<PointT>::radius_max_;
+ using SampleConsensusModel<PointT>::error_sqr_dists_;
using PointCloud = typename SampleConsensusModel<PointT>::PointCloud;
using PointCloudPtr = typename SampleConsensusModel<PointT>::PointCloudPtr;
*/
int operator() (const Eigen::VectorXd &x, Eigen::VectorXd &fvec) const
{
+ // Same for all points, so define outside of loop:
+ // C : Circle Center
+ const Eigen::Vector3d C (x[0], x[1], x[2]);
+ // N : Circle (Plane) Normal
+ const Eigen::Vector3d N (x[4], x[5], x[6]);
+ // r : Radius
+ const double r = x[3];
for (int i = 0; i < values (); ++i)
{
// what i have:
// P : Sample Point
Eigen::Vector3d P =
(*model_->input_)[indices_[i]].getVector3fMap().template cast<double>();
- // C : Circle Center
- Eigen::Vector3d C (x[0], x[1], x[2]);
- // N : Circle (Plane) Normal
- Eigen::Vector3d N (x[4], x[5], x[6]);
- // r : Radius
- double r = x[3];
Eigen::Vector3d helperVectorPC = P - C;
// 1.1. get line parameter
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/common/distances.h>
+#include <pcl/pcl_exports.h>
namespace pcl
{
namespace internal {
- int optimizeModelCoefficientsCone (Eigen::VectorXf& coeff, const Eigen::ArrayXf& pts_x, const Eigen::ArrayXf& pts_y, const Eigen::ArrayXf& pts_z);
+ PCL_EXPORTS int optimizeModelCoefficientsCone (Eigen::VectorXf& coeff, const Eigen::ArrayXf& pts_x, const Eigen::ArrayXf& pts_y, const Eigen::ArrayXf& pts_z);
} // namespace internal
/** \brief @b SampleConsensusModelCone defines a model for 3D cone segmentation.
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/common/distances.h>
+#include <pcl/pcl_exports.h>
namespace pcl
{
namespace internal {
- int optimizeModelCoefficientsCylinder (Eigen::VectorXf& coeff, const Eigen::ArrayXf& pts_x, const Eigen::ArrayXf& pts_y, const Eigen::ArrayXf& pts_z);
+ PCL_EXPORTS int optimizeModelCoefficientsCylinder (Eigen::VectorXf& coeff, const Eigen::ArrayXf& pts_x, const Eigen::ArrayXf& pts_y, const Eigen::ArrayXf& pts_z);
} // namespace internal
/** \brief @b SampleConsensusModelCylinder defines a model for 3D cylinder segmentation.
using SampleConsensusModel<PointT>::indices_;
using SampleConsensusModel<PointT>::radius_min_;
using SampleConsensusModel<PointT>::radius_max_;
+ using SampleConsensusModel<PointT>::error_sqr_dists_;
using PointCloud = typename SampleConsensusModel<PointT>::PointCloud;
using PointCloudPtr = typename SampleConsensusModel<PointT>::PointCloudPtr;
* \ingroup sample_consensus
*/
template <typename PointT>
- class SampleConsensusModelPlane : public SampleConsensusModel<PointT>
+ class PCL_EXPORTS SampleConsensusModelPlane : public SampleConsensusModel<PointT>
{
public:
using SampleConsensusModel<PointT>::model_name_;
std::size_t i = 0) const;
#endif
+#define PCLAT(POS) ((*input_)[(*indices_)[(POS)]])
+
#ifdef __AVX__
- inline __m256 dist8 (const std::size_t i, const __m256 &a_vec, const __m256 &b_vec, const __m256 &c_vec, const __m256 &d_vec, const __m256 &abs_help) const;
-#endif
+// This function computes the distances of 8 points to the plane
+inline __m256 dist8 (const std::size_t i, const __m256 &a_vec, const __m256 &b_vec, const __m256 &c_vec, const __m256 &d_vec, const __m256 &abs_help) const
+{
+ // The andnot-function realizes an abs-operation: the sign bit is removed
+ return _mm256_andnot_ps (abs_help,
+ _mm256_add_ps (_mm256_add_ps (_mm256_mul_ps (a_vec, _mm256_set_ps (PCLAT(i ).x, PCLAT(i+1).x, PCLAT(i+2).x, PCLAT(i+3).x, PCLAT(i+4).x, PCLAT(i+5).x, PCLAT(i+6).x, PCLAT(i+7).x)),
+ _mm256_mul_ps (b_vec, _mm256_set_ps (PCLAT(i ).y, PCLAT(i+1).y, PCLAT(i+2).y, PCLAT(i+3).y, PCLAT(i+4).y, PCLAT(i+5).y, PCLAT(i+6).y, PCLAT(i+7).y))),
+ _mm256_add_ps (_mm256_mul_ps (c_vec, _mm256_set_ps (PCLAT(i ).z, PCLAT(i+1).z, PCLAT(i+2).z, PCLAT(i+3).z, PCLAT(i+4).z, PCLAT(i+5).z, PCLAT(i+6).z, PCLAT(i+7).z)),
+ d_vec))); // TODO this could be replaced by three fmadd-instructions (if available), but the speed gain would probably be minimal
+}
+#endif // ifdef __AVX__
#ifdef __SSE__
- inline __m128 dist4 (const std::size_t i, const __m128 &a_vec, const __m128 &b_vec, const __m128 &c_vec, const __m128 &d_vec, const __m128 &abs_help) const;
-#endif
+// This function computes the distances of 4 points to the plane
+inline __m128 dist4 (const std::size_t i, const __m128 &a_vec, const __m128 &b_vec, const __m128 &c_vec, const __m128 &d_vec, const __m128 &abs_help) const
+{
+ // The andnot-function realizes an abs-operation: the sign bit is removed
+ return _mm_andnot_ps (abs_help,
+ _mm_add_ps (_mm_add_ps (_mm_mul_ps (a_vec, _mm_set_ps (PCLAT(i ).x, PCLAT(i+1).x, PCLAT(i+2).x, PCLAT(i+3).x)),
+ _mm_mul_ps (b_vec, _mm_set_ps (PCLAT(i ).y, PCLAT(i+1).y, PCLAT(i+2).y, PCLAT(i+3).y))),
+ _mm_add_ps (_mm_mul_ps (c_vec, _mm_set_ps (PCLAT(i ).z, PCLAT(i+1).z, PCLAT(i+2).z, PCLAT(i+3).z)),
+ d_vec))); // TODO this could be replaced by three fmadd-instructions (if available), but the speed gain would probably be minimal
+}
+#endif // ifdef __SSE__
+
+#undef PCLAT
private:
/** \brief Check if a sample of indices results in a good sample of points
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
+#include <pcl/pcl_exports.h>
namespace pcl
{
namespace internal {
- int optimizeModelCoefficientsSphere (Eigen::VectorXf& coeff, const Eigen::ArrayXf& pts_x, const Eigen::ArrayXf& pts_y, const Eigen::ArrayXf& pts_z);
+ PCL_EXPORTS int optimizeModelCoefficientsSphere (Eigen::VectorXf& coeff, const Eigen::ArrayXf& pts_x, const Eigen::ArrayXf& pts_y, const Eigen::ArrayXf& pts_z);
} // namespace internal
/** \brief SampleConsensusModelSphere defines a model for 3D sphere segmentation.
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010-2011, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id$
+ *
+ */
+
+#pragma once
+
+#include <pcl/common/distances.h>
+#include <pcl/sample_consensus/model_types.h>
+#include <pcl/sample_consensus/sac_model.h>
+
+namespace pcl {
+
+/** \brief @b SampleConsensusModelTorus defines a model for 3D torus segmentation.
+ * The model coefficients are defined as:
+ * - \b radii.inner : the torus's inner radius
+ * - \b radii.outer : the torus's outer radius
+ * - \b torus_center_point.x : the X coordinate of the center of the torus
+ * - \b torus_center_point.y : the Y coordinate of the center of the torus
+ * - \b torus_center_point.z : the Z coordinate of the center of the torus
+ * - \b torus_normal.x : the X coordinate of the normal of the torus
+ * - \b torus_normal.y : the Y coordinate of the normal of the torus
+ * - \b torus_normal.z : the Z coordinate of the normal of the torus
+ *
+ * \author lasdasdas
+ * \ingroup sample_consensus
+ */
+template <typename PointT, typename PointNT>
+class SampleConsensusModelTorus
+: public SampleConsensusModel<PointT>,
+ public SampleConsensusModelFromNormals<PointT, PointNT> {
+ using SampleConsensusModel<PointT>::model_name_;
+ using SampleConsensusModel<PointT>::input_;
+ using SampleConsensusModel<PointT>::indices_;
+ using SampleConsensusModel<PointT>::radius_min_;
+ using SampleConsensusModel<PointT>::radius_max_;
+ using SampleConsensusModelFromNormals<PointT, PointNT>::normals_;
+ using SampleConsensusModelFromNormals<PointT, PointNT>::normal_distance_weight_;
+ using SampleConsensusModel<PointT>::error_sqr_dists_;
+
+ using PointCloud = typename SampleConsensusModel<PointT>::PointCloud;
+ using PointCloudPtr = typename SampleConsensusModel<PointT>::PointCloudPtr;
+ using PointCloudConstPtr = typename SampleConsensusModel<PointT>::PointCloudConstPtr;
+
+public:
+ using Ptr = shared_ptr<SampleConsensusModelTorus<PointT, PointNT>>;
+ using ConstPtr = shared_ptr<const SampleConsensusModelTorus<PointT, PointNT>>;
+
+ /** \brief Constructor for base SampleConsensusModelTorus.
+ * \param[in] cloud the input point cloud dataset
+ * \param[in] random if true set the random seed to the current time, else set to
+ * 12345 (default: false)
+ */
+ SampleConsensusModelTorus(const PointCloudConstPtr& cloud, bool random = false)
+ : SampleConsensusModel<PointT>(cloud, random)
+ , SampleConsensusModelFromNormals<PointT, PointNT>()
+ {
+ model_name_ = "SampleConsensusModelTorus";
+ sample_size_ = 4;
+ model_size_ = 8;
+ }
+
+ /** \brief Constructor for base SampleConsensusModelTorus.
+ * \param[in] cloud the input point cloud dataset
+ * \param[in] indices a vector of point indices to be used from \a cloud
+ * \param[in] random if true set the random seed to the current time, else set to
+ * 12345 (default: false)
+ */
+ SampleConsensusModelTorus(const PointCloudConstPtr& cloud,
+ const Indices& indices,
+ bool random = false)
+ : SampleConsensusModel<PointT>(cloud, indices, random)
+ , SampleConsensusModelFromNormals<PointT, PointNT>()
+ {
+ model_name_ = "SampleConsensusModelTorus";
+ sample_size_ = 4;
+ model_size_ = 8;
+ }
+
+ /** \brief Copy constructor.
+ * \param[in] source the model to copy into this
+ */
+ SampleConsensusModelTorus(const SampleConsensusModelTorus& source)
+ : SampleConsensusModel<PointT>(), SampleConsensusModelFromNormals<PointT, PointNT>()
+ {
+ *this = source;
+ model_name_ = "SampleConsensusModelTorus";
+ }
+
+ /** \brief Empty destructor */
+ ~SampleConsensusModelTorus() override = default;
+
+ /** \brief Copy constructor.
+ * \param[in] source the model to copy into this
+ */
+ inline SampleConsensusModelTorus&
+ operator=(const SampleConsensusModelTorus& source)
+ {
+ SampleConsensusModelFromNormals<PointT, PointNT>::operator=(source);
+ return (*this);
+ }
+ /** \brief Check whether the given index samples can form a valid torus model, compute
+ * the model coefficients from these samples and store them in model_coefficients. The
+ * torus coefficients are: radii, torus_center_point, torus_normal.
+ * \param[in] samples the point indices found as possible good candidates for creating a valid model
+ * \param[out] model_coefficients the resultant model coefficients
+ */
+ bool
+ computeModelCoefficients(const Indices& samples,
+ Eigen::VectorXf& model_coefficients) const override;
+
+ /** \brief Compute all distances from the cloud data to a given torus model.
+ * \param[in] model_coefficients the coefficients of a torus model that we need to compute distances to
+ * \param[out] distances the resultant estimated distances
+ */
+ void
+ getDistancesToModel(const Eigen::VectorXf& model_coefficients,
+ std::vector<double>& distances) const override;
+
+ /** \brief Select all the points which respect the given model coefficients as
+ * inliers.
+ * \param[in] model_coefficients the coefficients of a torus model that we need to compute distances to
+ * \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
+ * \param[out] inliers the
+ * resultant model inliers
+ */
+ void
+ selectWithinDistance(const Eigen::VectorXf& model_coefficients,
+ const double threshold,
+ Indices& inliers) override;
+
+ /** \brief Count all the points which respect the given model coefficients as inliers.
+ *
+ * \param[in] model_coefficients the coefficients of a model that we need to compute distances to
+ * \param[in] threshold maximum admissible distance threshold for
+ * determining the inliers from the outliers \return the resultant number of inliers
+ */
+ std::size_t
+ countWithinDistance(const Eigen::VectorXf& model_coefficients,
+ const double threshold) const override;
+
+ /** \brief Recompute the torus coefficients using the given inlier set and return them
+ * to the user.
+ * \param[in] inliers the data inliers found as supporting the model
+ * \param[in] model_coefficients the initial guess for the optimization
+ * \param[out] optimized_coefficients the resultant recomputed coefficients after
+ * non-linear optimization
+ */
+ void
+ optimizeModelCoefficients(const Indices& inliers,
+ const Eigen::VectorXf& model_coefficients,
+ Eigen::VectorXf& optimized_coefficients) const override;
+
+ /** \brief Create a new point cloud with inliers projected onto the torus model.
+ * \param[in] inliers the data inliers that we want to project on the torus model
+ * \param[in] model_coefficients the coefficients of a torus model
+ * \param[out] projected_points the resultant projected points
+ * \param[in] copy_data_fields set to true if we need to copy the other data fields
+ */
+ void
+ projectPoints(const Indices& inliers,
+ const Eigen::VectorXf& model_coefficients,
+ PointCloud& projected_points,
+ bool copy_data_fields = true) const override;
+
+ /** \brief Verify whether a subset of indices verifies the given torus model
+ * coefficients.
+ * \param[in] indices the data indices that need to be tested against the torus model
+ * \param[in] model_coefficients the torus model coefficients
+ * \param[in] threshold a maximum admissible distance threshold for determining the
+ * inliers from the outliers
+ */
+ bool
+ doSamplesVerifyModel(const std::set<index_t>& indices,
+ const Eigen::VectorXf& model_coefficients,
+ const double threshold) const override;
+
+ /** \brief Return a unique id for this model (SACMODEL_TORUS). */
+ inline pcl::SacModel
+ getModelType() const override
+ {
+ return (SACMODEL_TORUS);
+ }
+
+protected:
+ using SampleConsensusModel<PointT>::sample_size_;
+ using SampleConsensusModel<PointT>::model_size_;
+
+ /** \brief Project a point onto a torus given by its model coefficients (radii,
+ * torus_center_point, torus_normal)
+ * \param[in] pt the input point to project
+ * \param[in] model_coefficients the coefficients of the torus (radii, torus_center_point, torus_normal)
+ * \param[out] pt_proj the resultant projected point
+ */
+ void
+ projectPointToTorus(const Eigen::Vector3f& pt,
+ const Eigen::Vector3f& pt_n,
+ const Eigen::VectorXf& model_coefficients,
+ Eigen::Vector3f& pt_proj) const;
+
+ /** \brief Check whether a model is valid given the user constraints.
+ * \param[in] model_coefficients the set of model coefficients
+ */
+ bool
+ isModelValid(const Eigen::VectorXf& model_coefficients) const override;
+
+ /** \brief Check if a sample of indices results in a good sample of points
+ * indices. Pure virtual.
+ * \param[in] samples the resultant index samples
+ */
+ bool
+ isSampleGood(const Indices& samples) const override;
+
+private:
+ struct OptimizationFunctor : pcl::Functor<double> {
+ /** Functor constructor
+ * \param[in] indices the indices of data points to evaluate
+ * \param[in] estimator pointer to the estimator object
+ */
+ OptimizationFunctor(const pcl::SampleConsensusModelTorus<PointT, PointNT>* model,
+ const Indices& indices)
+ : pcl::Functor<double>(indices.size()), model_(model), indices_(indices)
+ {}
+
+ /** Cost function to be minimized
+ * \param[in] x the variables array
+ * \param[out] fvec the resultant functions evaluations
+ * \return 0
+ */
+ int
+ operator()(const Eigen::VectorXd& xs, Eigen::VectorXd& fvec) const
+ {
+ // Getting constants from state vector
+ const double& R = xs[0];
+ const double& r = xs[1];
+
+ const double& x0 = xs[2];
+ const double& y0 = xs[3];
+ const double& z0 = xs[4];
+
+ const Eigen::Vector3d centroid{x0, y0, z0};
+
+ const double& nx = xs[5];
+ const double& ny = xs[6];
+ const double& nz = xs[7];
+
+ const Eigen::Vector3d n1{0.0, 0.0, 1.0};
+ const Eigen::Vector3d n2 = Eigen::Vector3d{nx, ny, nz}.normalized();
+
+ for (size_t j = 0; j < indices_.size(); j++) {
+
+ size_t i = indices_[j];
+ const Eigen::Vector3d pt =
+ (*model_->input_)[i].getVector3fMap().template cast<double>();
+
+ Eigen::Vector3d pte{pt - centroid};
+
+ // Transposition is inversion
+ // Using Quaternions instead of Rodrigues
+ pte = Eigen::Quaterniond()
+ .setFromTwoVectors(n1, n2)
+ .toRotationMatrix()
+ .transpose() *
+ pte;
+
+ const double& x = pte[0];
+ const double& y = pte[1];
+ const double& z = pte[2];
+
+ fvec[j] = (std::pow(sqrt(x * x + y * y) - R, 2) + z * z - r * r);
+ }
+ return 0;
+ }
+
+ const pcl::SampleConsensusModelTorus<PointT, PointNT>* model_;
+ const Indices& indices_;
+ };
+};
+} // namespace pcl
+
+#ifdef PCL_NO_PRECOMPILE
+#include <pcl/sample_consensus/impl/sac_model_torus.hpp>
+#endif
<li>\link pcl::SampleConsensusModelSphere SACMODEL_SPHERE \endlink - used to determine sphere models. The <b>four</b> coefficients of the sphere are given by its 3D center and radius as: [<b>center.x center.y center.z radius</b>]</li>
<li>\link pcl::SampleConsensusModelCylinder SACMODEL_CYLINDER \endlink - used to determine cylinder models. The <b>seven</b> coefficients of the cylinder are given by a point on its axis, the axis direction, and a radius, as: [<b>point_on_axis.x point_on_axis.y point_on_axis.z axis_direction.x axis_direction.y axis_direction.z radius</b>]</li>
<li>\link pcl::SampleConsensusModelCone SACMODEL_CONE \endlink - used to determine cone models. The <b>seven</b> coefficients of the cone are given by a point of its apex, the axis direction and the opening angle, as: [<b>apex.x, apex.y, apex.z, axis_direction.x, axis_direction.y, axis_direction.z, opening_angle</b>]</li>
- <li>SACMODEL_TORUS - not implemented yet</li>
+ <li>\link pcl::SampleConsensusModelTorus SACMODEL_TORUS \endlink - used to determine torus models. The <b>eight</b> coefficients of the torus are given by the inner and outer radius, the center point, and the torus axis.</li>
<li>\link pcl::SampleConsensusModelParallelLine SACMODEL_PARALLEL_LINE \endlink - a model for determining a line <b>parallel</b> with a given axis, within a maximum specified angular deviation. The line coefficients are similar to \link pcl::SampleConsensusModelLine SACMODEL_LINE \endlink.</li>
<li>\link pcl::SampleConsensusModelPerpendicularPlane SACMODEL_PERPENDICULAR_PLANE \endlink - a model for determining a plane <b>perpendicular</b> to a user-specified axis, within a maximum specified angular deviation. The plane coefficients are similar to \link pcl::SampleConsensusModelPlane SACMODEL_PLANE \endlink.</li>
<li>SACMODEL_PARALLEL_LINES - not implemented yet</li>
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2009-2012, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pcl/sample_consensus/impl/sac_model_torus.hpp>
+
+#ifndef PCL_NO_PRECOMPILE
+#include <pcl/impl/instantiate.hpp>
+#include <pcl/point_types.h>
+PCL_INSTANTIATE_PRODUCT(SampleConsensusModelTorus, ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointXYZRGB))((pcl::Normal)))
+#endif // PCL_NO_PRECOMPILE
set(SUBSYS_DESC "Point cloud generic search library")
set(SUBSYS_DEPS common kdtree octree)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} EXT_DEPS flann)
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_common FLANN::FLANN pcl_octree pcl_kdtree)
list(APPEND EXT_DEPS flann)
{
for (; idx < xEnd; ++idx)
{
- if (!mask_[idx] || !isFinite ((*input_)[idx]))
+ if (!mask_[idx])
continue;
float dist_x = (*input_)[idx].x - query.x;
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
+#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/search/search.h>
#include <pcl/common/eigen.h>
/** \brief Constructor
* \param[in] sorted_results whether the results should be return sorted in ascending order on the distances or not.
- * This applies only for radius search, since knn always returns sorted resutls
+ * This applies only for radius search, since knn always returns sorted results
* \param[in] eps the threshold for the mean-squared-error of the estimation of the projection matrix.
* if the MSE is above this value, the point cloud is considered as not from a projective device,
* thus organized neighbor search can not be applied on that cloud.
{
mask_.assign (input_->size (), 0);
for (const auto& idx : *indices_)
- mask_[idx] = 1;
+ if (pcl::isFinite((*input_)[idx]))
+ mask_[idx] = 1;
}
else
- mask_.assign (input_->size (), 1);
+ {
+ mask_.assign (input_->size (), 0);
+ for (std::size_t idx=0; idx<input_->size(); ++idx)
+ if (pcl::isFinite((*input_)[idx]))
+ mask_[idx] = 1;
+ }
return estimateProjectionMatrix () && isValid ();
}
testPoint (const PointT& query, unsigned k, std::vector<Entry>& queue, index_t index) const
{
const PointT& point = input_->points [index];
- if (mask_ [index] && std::isfinite (point.x))
+ if (mask_ [index])
{
//float squared_distance = (point.getVector3fMap () - query.getVector3fMap ()).squaredNorm ();
float dist_x = point.x - query.x;
/** \brief using only a subsample of points to calculate the projection matrix. pyramid_level_ = use down sampled cloud given by pyramid_level_*/
const unsigned pyramid_level_;
- /** \brief mask, indicating whether the point was in the indices list or not.*/
+ /** \brief mask, indicating whether the point was in the indices list or not, and whether it is finite.*/
std::vector<unsigned char> mask_;
public:
PCL_MAKE_ALIGNED_OPERATOR_NEW
set(SUBSYS_DESC "Point cloud segmentation library")
set(SUBSYS_DEPS common geometry search sample_consensus kdtree octree features filters ml)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS OpenMP)
)
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/extract_clusters.h"
"include/pcl/${SUBSYS_NAME}/extract_labeled_clusters.h"
"include/pcl/${SUBSYS_NAME}/extract_polygonal_prism_data.h"
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
-target_link_libraries("${LIB_NAME}" pcl_search pcl_sample_consensus pcl_filters pcl_ml pcl_features)
+target_link_libraries("${LIB_NAME}" pcl_geometry pcl_search pcl_sample_consensus pcl_filters pcl_ml pcl_features)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install include files
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: io.h 5850 2012-06-06 14:04:59Z stfox88 $
- *
- */
-
-#pragma once
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-
-#ifndef Q_MOC_RUN
-// Marking all Boost headers as system headers to remove warnings
-#include <boost/version.hpp>
-#include <boost/graph/adjacency_list.hpp>
-#include <boost/multi_array.hpp>
-#include <boost/ptr_container/ptr_list.hpp>
-
-#include <boost/graph/boykov_kolmogorov_max_flow.hpp>
-#endif
static_cast<std::size_t>(normals.size()));
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
const double cos_eps_angle = std::cos (eps_angle); // compute this once instead of acos many times (faster)
// Create a bool vector of processed point indices, and initialize it to false
continue;
}
- for (std::size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
static_cast<std::size_t>(normals.size()));
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
const double cos_eps_angle = std::cos (eps_angle); // compute this once instead of acos many times (faster)
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
continue;
}
- for (std::size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
/** \brief The maximum number of labels we can find in this pointcloud (default =
* MAXINT)*/
- unsigned int max_label_{std::numeric_limits<int>::max()};
+ PCL_DEPRECATED(1, 18, "this member variable is unused")
+ unsigned int max_label_{std::numeric_limits<unsigned int>::max()};
/** \brief Class getName method. */
virtual std::string
#include <limits>
#include <pcl/pcl_base.h>
+#include <pcl/Vertices.h> // for Vertices
namespace pcl
{
inline void
setInputPlanarHull (const PointCloudConstPtr &hull) { planar_hull_ = hull; }
+ /** \brief Provide a vector of the concave polygons indices, as recieved from ConcaveHull::polygons.
+ * \note This is only needed when using ConcaveHull that has more than one polygon.
+ * \param[in] polygons - see ConcaveHull::polygons
+ */
+ inline void
+ setPolygons(const std::vector<pcl::Vertices>& polygons)
+ {
+ polygons_ = polygons;
+ }
+
/** \brief Get a pointer the input planar hull dataset. */
inline PointCloudConstPtr
getInputPlanarHull () const { return (planar_hull_); }
/** \brief A pointer to the input planar hull dataset. */
PointCloudConstPtr planar_hull_{nullptr};
+ /** \brief polygons indices vectors, as recieved from ConcaveHull */
+ std::vector<pcl::Vertices> polygons_;
+
/** \brief The minimum number of points needed on the convex hull. */
int min_pts_hull_{3};
}
-#define PCL_INSTANTIATE_ApproximateProgressiveMorphologicalFilter(T) template class pcl::ApproximateProgressiveMorphologicalFilter<T>;
+#define PCL_INSTANTIATE_ApproximateProgressiveMorphologicalFilter(T) template class PCL_EXPORTS pcl::ApproximateProgressiveMorphologicalFilter<T>;
#endif // PCL_SEGMENTATION_APPROXIMATE_PROGRESSIVE_MORPHOLOGICAL_FILTER_HPP_
if (!searcher_)
{
if (input_->isOrganized ())
- searcher_.reset (new pcl::search::OrganizedNeighbor<PointT> ());
+ searcher_.reset (new pcl::search::OrganizedNeighbor<PointT> (false)); // not requiring sorted results is much faster
else
- searcher_.reset (new pcl::search::KdTree<PointT> ());
+ searcher_.reset (new pcl::search::KdTree<PointT> (false)); // not requiring sorted results is much faster
}
searcher_->setInputCloud (input_, indices_);
+ // If searcher_ gives sorted results, we can skip the first one because it is the query point itself
+ const int nn_start_idx = searcher_->getSortedResults () ? 1 : 0;
// Temp variables used by search class
Indices nn_indices;
}
// Process the neighbors
- for (int nii = 1; nii < static_cast<int> (nn_indices.size ()); ++nii) // nii = neighbor indices iterator
+ for (int nii = nn_start_idx; nii < static_cast<int> (nn_indices.size ()); ++nii) // nii = neighbor indices iterator
{
// Has this point been processed before?
if (nn_indices[nii] == UNAVAILABLE || processed[nn_indices[nii]])
{
}
-#define PCL_INSTANTIATE_CrfNormalSegmentation(T) template class pcl::CrfNormalSegmentation<T>;
+#define PCL_INSTANTIATE_CrfNormalSegmentation(T) template class PCL_EXPORTS pcl::CrfNormalSegmentation<T>;
#endif // PCL_CRF_NORMAL_SEGMENTATION_HPP_
}
-#define PCL_INSTANTIATE_CrfSegmentation(T) template class pcl::CrfSegmentation<T>;
+#define PCL_INSTANTIATE_CrfSegmentation(T) template class PCL_EXPORTS pcl::CrfSegmentation<T>;
#endif // PCL_CRF_SEGMENTATION_HPP_
cloud.size());
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed(cloud.size(), false);
continue;
}
- for (std::size_t j = 1; j < nn_indices.size();
- ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = nn_start_idx; j < nn_indices.size(); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
PointT pt_xy;
pt_xy.z = 0;
+ std::vector<pcl::PointCloud<PointT>> polygons(polygons_.size());
+ if (polygons_.empty()) {
+ polygons.push_back(polygon);
+ }
+ else { // incase of concave hull, prepare separate polygons
+ for (size_t i = 0; i < polygons_.size(); i++) {
+ const auto& polygon_i = polygons_[i];
+ polygons[i].reserve(polygon_i.vertices.size());
+ for (const auto& pointIdx : polygon_i.vertices) {
+ polygons[i].points.push_back(polygon[pointIdx]);
+ }
+ }
+ }
+
output.indices.resize (indices_->size ());
int l = 0;
for (std::size_t i = 0; i < projected_points.size (); ++i)
pt_xy.x = pt[k1];
pt_xy.y = pt[k2];
- if (!pcl::isXYPointIn2DXYPolygon (pt_xy, polygon))
+ bool in_poly = false;
+ for (const auto& poly : polygons) {
+ in_poly ^= pcl::isXYPointIn2DXYPolygon(pt_xy, poly);
+ }
+
+ if (!in_poly) {
continue;
+ }
output.indices[l++] = (*indices_)[i];
}
{
template <>
-float squaredEuclideanDistance (const pcl::segmentation::grabcut::Color &c1,
- const pcl::segmentation::grabcut::Color &c2)
+inline float squaredEuclideanDistance (const pcl::segmentation::grabcut::Color &c1,
+ const pcl::segmentation::grabcut::Color &c2)
{
return ((c1.r-c2.r)*(c1.r-c2.r)+(c1.g-c2.g)*(c1.g-c2.g)+(c1.b-c2.b)*(c1.b-c2.b));
}
return (colored_cloud);
}
-#define PCL_INSTANTIATE_MinCutSegmentation(T) template class pcl::MinCutSegmentation<T>;
+#define PCL_INSTANTIATE_MinCutSegmentation(T) template class PCL_EXPORTS pcl::MinCutSegmentation<T>;
#endif // PCL_SEGMENTATION_MIN_CUT_SEGMENTATION_HPP_
deinitCompute ();
}
-#define PCL_INSTANTIATE_ProgressiveMorphologicalFilter(T) template class pcl::ProgressiveMorphologicalFilter<T>;
+#define PCL_INSTANTIATE_ProgressiveMorphologicalFilter(T) template class PCL_EXPORTS pcl::ProgressiveMorphologicalFilter<T>;
#endif // PCL_SEGMENTATION_PROGRESSIVE_MORPHOLOGICAL_FILTER_HPP_
return (colored_cloud);
}
-#define PCL_INSTANTIATE_RegionGrowing(T) template class pcl::RegionGrowing<T, pcl::Normal>;
+#define PCL_INSTANTIATE_RegionGrowing(T) template class PCL_EXPORTS pcl::RegionGrowing<T, pcl::Normal>;
{
if (distances[i_seg] < max_dist)
{
- segment_neighbours.push (std::make_pair (distances[i_seg], i_seg) );
+ segment_neighbours.emplace (distances[i_seg], i_seg);
if (segment_neighbours.size () > nghbr_number)
segment_neighbours.pop ();
}
static_cast<std::size_t>(cloud.size()));
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
continue;
}
- for (std::size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
static_cast<std::size_t>(cloud.size()));
return;
}
+ // If tree gives sorted results, we can skip the first one because it is the query point itself
+ const std::size_t nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
std::vector<bool> processed (cloud.size (), false);
sq_idx++;
continue;
}
- for (std::size_t j = 1; j < nn_indices.size (); ++j) // nn_indices[0] should be sq_idx
+ for (std::size_t j = nn_start_idx; j < nn_indices.size (); ++j)
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
return max_label;
}
-namespace pcl
-{
- namespace octree
- {
- //Explicit overloads for RGB types
- template<>
- void
- pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGB,pcl::SupervoxelClustering<pcl::PointXYZRGB>::VoxelData>::addPoint (const pcl::PointXYZRGB &new_point);
-
- template<>
- void
- pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGBA,pcl::SupervoxelClustering<pcl::PointXYZRGBA>::VoxelData>::addPoint (const pcl::PointXYZRGBA &new_point);
-
- //Explicit overloads for RGB types
- template<> void
- pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGB,pcl::SupervoxelClustering<pcl::PointXYZRGB>::VoxelData>::computeData ();
-
- template<> void
- pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGBA,pcl::SupervoxelClustering<pcl::PointXYZRGBA>::VoxelData>::computeData ();
-
- //Explicit overloads for XYZ types
- template<>
- void
- pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZ,pcl::SupervoxelClustering<pcl::PointXYZ>::VoxelData>::addPoint (const pcl::PointXYZ &new_point);
-
- template<> void
- pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZ,pcl::SupervoxelClustering<pcl::PointXYZ>::VoxelData>::computeData ();
- }
-}
-
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
-{
-
- template<> void
- pcl::SupervoxelClustering<pcl::PointXYZRGB>::VoxelData::getPoint (pcl::PointXYZRGB &point_arg) const;
-
- template<> void
- pcl::SupervoxelClustering<pcl::PointXYZRGBA>::VoxelData::getPoint (pcl::PointXYZRGBA &point_arg ) const;
-
- template<typename PointT> void
- pcl::SupervoxelClustering<PointT>::VoxelData::getPoint (PointT &point_arg ) const
- {
- //XYZ is required or this doesn't make much sense...
- point_arg.x = xyz_[0];
- point_arg.y = xyz_[1];
- point_arg.z = xyz_[2];
- }
-
+{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SupervoxelClustering<PointT>::VoxelData::getNormal (Normal &normal_arg) const
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-#define PCL_INSTANTIATE_UnaryClassifier(T) template class pcl::UnaryClassifier<T>;
+#define PCL_INSTANTIATE_UnaryClassifier(T) template class PCL_EXPORTS pcl::UnaryClassifier<T>;
#endif // PCL_UNARY_CLASSIFIER_HPP_
* \param[out] model_coefficients a vector of model_coefficients for each plane found in the input cloud
* \param[out] inlier_indices a vector of inliers for each detected plane
* \param[out] centroids a vector of centroids for each plane
- * \param[out] covariances a vector of covariance matricies for the inliers of each plane
+ * \param[out] covariances a vector of covariance matrices for the inliers of each plane
* \param[out] labels a point cloud for the connected component labels of each pixel
* \param[out] label_indices a vector of PointIndices for each labeled component
*/
int current_label = (*labels_)[idx1].label;
int next_label = (*labels_)[idx2].label;
- if (!((*refine_labels_)[current_label] && !(*refine_labels_)[next_label]))
+ if (!(*refine_labels_)[current_label] || (*refine_labels_)[next_label])
return (false);
const pcl::ModelCoefficients& model_coeff = (*models_)[(*label_to_model_)[current_label]];
owner_ (nullptr)
{}
+#ifdef DOXYGEN_ONLY
/** \brief Gets the data of in the form of a point
* \param[out] point_arg Will contain the point value of the voxeldata
*/
void
getPoint (PointT &point_arg) const;
+#else
+ template<typename PointT2 = PointT, traits::HasColor<PointT2> = true> void
+ getPoint (PointT &point_arg) const
+ {
+ point_arg.rgba = static_cast<std::uint32_t>(rgb_[0]) << 16 |
+ static_cast<std::uint32_t>(rgb_[1]) << 8 |
+ static_cast<std::uint32_t>(rgb_[2]);
+ point_arg.x = xyz_[0];
+ point_arg.y = xyz_[1];
+ point_arg.z = xyz_[2];
+ }
+
+ template<typename PointT2 = PointT, traits::HasNoColor<PointT2> = true> void
+ getPoint (PointT &point_arg ) const
+ {
+ //XYZ is required or this doesn't make much sense...
+ point_arg.x = xyz_[0];
+ point_arg.y = xyz_[1];
+ point_arg.z = xyz_[2];
+ }
+#endif
/** \brief Gets the data of in the form of a normal
* \param[out] normal_arg Will contain the normal value of the voxeldata
#include <pcl/segmentation/impl/crf_segmentation.hpp>
// Instantiations of specific point types
-template class pcl::CrfSegmentation<pcl::PointXYZRGB>;
-template class pcl::CrfSegmentation<pcl::PointXYZRGBA>;
-template class pcl::CrfSegmentation<pcl::PointXYZRGBNormal>;
+template class PCL_EXPORTS pcl::CrfSegmentation<pcl::PointXYZRGB>;
+template class PCL_EXPORTS pcl::CrfSegmentation<pcl::PointXYZRGBA>;
+template class PCL_EXPORTS pcl::CrfSegmentation<pcl::PointXYZRGBNormal>;
if (cut_[jt->first] != tree_label) continue;
// check edge capacity
- const capacitated_edge::iterator kt = nodes_[jt->first].find (u);
+ const auto kt = nodes_[jt->first].find (u);
if (((tree_label == TARGET) && (jt->second <= 0.0)) ||
((tree_label == SOURCE) && (kt->second <= 0.0)))
continue;
// free the orphan subtree and remove it from the active set
if (b_free_orphan)
{
- for (capacitated_edge::const_iterator jt = nodes_[u].begin (); jt != nodes_[u].end (); ++jt)
+ for (auto jt = nodes_[u].cbegin (); jt != nodes_[u].cend (); ++jt)
{
if ((cut_[jt->first] == tree_label) && (parents_[jt->first].first == u))
{
#include <pcl/segmentation/impl/region_growing_rgb.hpp>
// Instantiations of specific point types
-template class pcl::RegionGrowingRGB<pcl::PointXYZRGBA>;
-template class pcl::RegionGrowingRGB<pcl::PointXYZRGB>;
-template class pcl::RegionGrowingRGB<pcl::PointXYZRGBL>;
-template class pcl::RegionGrowingRGB<pcl::PointXYZRGBNormal>;
+template class PCL_EXPORTS pcl::RegionGrowingRGB<pcl::PointXYZRGBA>;
+template class PCL_EXPORTS pcl::RegionGrowingRGB<pcl::PointXYZRGB>;
+template class PCL_EXPORTS pcl::RegionGrowingRGB<pcl::PointXYZRGBL>;
+template class PCL_EXPORTS pcl::RegionGrowingRGB<pcl::PointXYZRGBNormal>;
#include <pcl/segmentation/impl/supervoxel_clustering.hpp>
#include <pcl/octree/impl/octree_pointcloud_adjacency.hpp>
+template class PCL_EXPORTS pcl::SupervoxelClustering<pcl::PointXYZ>;
+template class PCL_EXPORTS pcl::SupervoxelClustering<pcl::PointXYZRGB>;
+template class PCL_EXPORTS pcl::SupervoxelClustering<pcl::PointXYZRGBA>;
+
namespace pcl
{
namespace octree
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-namespace pcl
-{
- template<> void
- pcl::SupervoxelClustering<pcl::PointXYZRGB>::VoxelData::getPoint (pcl::PointXYZRGB &point_arg) const
- {
- point_arg.rgba = static_cast<std::uint32_t>(rgb_[0]) << 16 |
- static_cast<std::uint32_t>(rgb_[1]) << 8 |
- static_cast<std::uint32_t>(rgb_[2]);
- point_arg.x = xyz_[0];
- point_arg.y = xyz_[1];
- point_arg.z = xyz_[2];
- }
-
- template<> void
- pcl::SupervoxelClustering<pcl::PointXYZRGBA>::VoxelData::getPoint (pcl::PointXYZRGBA &point_arg ) const
- {
- point_arg.rgba = static_cast<std::uint32_t>(rgb_[0]) << 16 |
- static_cast<std::uint32_t>(rgb_[1]) << 8 |
- static_cast<std::uint32_t>(rgb_[2]);
- point_arg.x = xyz_[0];
- point_arg.y = xyz_[1];
- point_arg.z = xyz_[2];
- }
-}
-
using VoxelDataT = pcl::SupervoxelClustering<pcl::PointXYZ>::VoxelData;
using VoxelDataRGBT = pcl::SupervoxelClustering<pcl::PointXYZRGB>::VoxelData;
using VoxelDataRGBAT = pcl::SupervoxelClustering<pcl::PointXYZRGBA>::VoxelData;
using AdjacencyContainerRGBT = pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGB, VoxelDataRGBT>;
using AdjacencyContainerRGBAT = pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGBA, VoxelDataRGBAT>;
-template class pcl::SupervoxelClustering<pcl::PointXYZ>;
-template class pcl::SupervoxelClustering<pcl::PointXYZRGB>;
-template class pcl::SupervoxelClustering<pcl::PointXYZRGBA>;
-
template class pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZ, VoxelDataT>;
template class pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGB, VoxelDataRGBT>;
template class pcl::octree::OctreePointCloudAdjacencyContainer<pcl::PointXYZRGBA, VoxelDataRGBAT>;
set(SUBSYS_DESC "Point Cloud Library Simulation")
set(SUBSYS_DEPS common io surface kdtree features search octree visualization filters geometry)
-set(build FALSE)
-find_package(OpenGL)
-
-find_package(GLEW)
-
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} EXT_DEPS opengl glew)
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs})
target_link_libraries("${LIB_NAME}" pcl_common pcl_io
for (const auto& polygon : plg->polygons) {
for (const auto& point : polygon.vertices) {
tmp = newcloud[point].getVector4fMap();
- vertices.push_back(Vertex(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
- Eigen::Vector3f(newcloud[point].r / 255.0f,
- newcloud[point].g / 255.0f,
- newcloud[point].b / 255.0f)));
+ vertices.emplace_back(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
+ Eigen::Vector3f(newcloud[point].r / 255.0f,
+ newcloud[point].g / 255.0f,
+ newcloud[point].b / 255.0f));
indices.push_back(indices.size());
}
}
for (const auto& polygon : plg->polygons) {
for (const auto& point : polygon.vertices) {
tmp = newcloud[point].getVector4fMap();
- vertices.push_back(Vertex(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
- Eigen::Vector3f(1.0, 1.0, 1.0)));
+ vertices.emplace_back(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
+ Eigen::Vector3f(1.0, 1.0, 1.0));
indices.push_back(indices.size());
}
}
set(_glut_target GLUT::GLUT)
endif()
-include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
-
PCL_ADD_EXECUTABLE(pcl_sim_viewer COMPONENT ${SUBSYS_NAME} SOURCES sim_viewer.cpp)
target_link_libraries (pcl_sim_viewer
${VTK_IO_TARGET_LINK_LIBRARIES} pcl_kdtree
// 27840 triangle faces
// 13670 vertices
- // 45.00Hz: simuation only
- // 1.28Hz: simuation, addNoise? , getPointCloud, writeASCII
- // 33.33Hz: simuation, getPointCloud
- // 23.81Hz: simuation, getPointCloud, writeBinary
- // 14.28Hz: simuation, addNoise, getPointCloud, writeBinary
+ // 45.00Hz: simulation only
+ // 1.28Hz: simulation, addNoise? , getPointCloud, writeASCII
+ // 33.33Hz: simulation, getPointCloud
+ // 23.81Hz: simulation, getPointCloud, writeBinary
+ // 14.28Hz: simulation, addNoise, getPointCloud, writeBinary
// MODULE TIME FRACTION
- // simuation 0.02222 31%
+ // simulation 0.02222 31%
// addNoise 0.03 41%
// getPointCloud 0.008 11%
// writeBinary 0.012 16%
set(SUBSYS_DESC "Point cloud stereo library")
set(SUBSYS_DEPS common io)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
-target_link_libraries("${LIB_NAME}" pcl_common)
+target_link_libraries("${LIB_NAME}" pcl_common pcl_io)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
# Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
set(SUBSYS_DEPS common search kdtree octree)
set(SUBSYS_EXT_DEPS "")
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS qhull vtk OpenMP)
)
set(incs
- "include/pcl/${SUBSYS_NAME}/boost.h"
- "include/pcl/${SUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/ear_clipping.h"
"include/pcl/${SUBSYS_NAME}/gp3.h"
"include/pcl/${SUBSYS_NAME}/grid_projection.h"
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-
-include_directories(
- "${CMAKE_CURRENT_SOURCE_DIR}/include"
- "${CMAKE_CURRENT_SOURCE_DIR}"
-)
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs} ${VTK_SMOOTHING_INCLUDES} ${POISSON_INCLUDES} ${OPENNURBS_INCLUDES} ${ON_NURBS_INCLUDES})
target_link_libraries("${LIB_NAME}" pcl_common pcl_search pcl_kdtree pcl_octree ${ON_NURBS_LIBRARIES})
VTK::FiltersModeling
VTK::FiltersCore)
else()
- include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
link_directories(${VTK_LIBRARY_DIRS})
target_link_libraries("${LIB_NAME}"
vtkCommonCore
const ON_UUID& object_id
);
+ using ON_CurveProxy::Trim; // Explicitly introduce the base class Trim function
const ON_BrepEdge* Edge() const;
- const ON_BrepTrim* Trim() const;
+ const ON_BrepTrim* Trim() const; // overload, not override
const ON_Brep* Brep() const;
const ON_BrepFace* Face() const;
const ON_Surface* Surface() const;
Real temp,dist2;
if(!children){return this;}
for(int i=0;i<Cube::CORNERS;i++){
- temp=SquareDistance(children[i].center,p);
+ Point3D<Real> child_center;
+ Real child_width;
+ children[i].centerAndWidth(child_center, child_width);
+ temp=SquareDistance(child_center,p);
if(!i || temp<dist2){
dist2=temp;
nearest=i;
children=NULL;
d=node.depth ();
- for(i=0;i<DIMENSION;i++){this->offset[i] = node.offset[i];}
+ for(i=0;i<DIMENSION;i++){this->off[i] = node.off[i];}
if(node.children){
initChildren();
for(i=0;i<Cube::CORNERS;i++){children[i] = node.children[i];}
template <class NodeData,class Real>
int OctNode<NodeData,Real>::CompareForwardDepths(const void* v1,const void* v2){
- return ((const OctNode<NodeData,Real>*)v1)->depth-((const OctNode<NodeData,Real>*)v2)->depth;
+ return ((const OctNode<NodeData,Real>*)v1)->depth()-((const OctNode<NodeData,Real>*)v2)->depth();
}
template< class NodeData , class Real >
template <class NodeData,class Real>
int OctNode<NodeData,Real>::CompareBackwardDepths(const void* v1,const void* v2){
- return ((const OctNode<NodeData,Real>*)v2)->depth-((const OctNode<NodeData,Real>*)v1)->depth;
+ return ((const OctNode<NodeData,Real>*)v2)->depth()-((const OctNode<NodeData,Real>*)v1)->depth();
}
template <class NodeData,class Real>
* Adapted from PCL_THROW_EXCEPTION. We intentionally do not reuse PCL_THROW_EXCEPTION here
* to avoid introducing any dependencies on PCL in this 3rd party module.
*/
+// NOLINTBEGIN(bugprone-macro-parentheses)
#define POISSON_THROW_EXCEPTION(ExceptionName, message) \
{ \
std::ostringstream s; \
s << message; \
throw ExceptionName(s.str(), __FILE__, BOOST_CURRENT_FUNCTION, __LINE__); \
}
+// NOLINTEND(bugprone-macro-parentheses)
namespace pcl
{
template<class T>
void SparseMatrix<T>::SetZero()
{
- Resize(this->m_N, this->m_M);
+ // copied from operator *=
+ for (int i=0; i<rows; i++)
+ {
+ for(int ii=0;ii<rowSizes[i];ii++){m_ppElements[i][ii].Value=T(0);}
+ }
}
template<class T>
void SparseMatrix<T>::SetIdentity()
{
SetZero();
- for(int ij=0; ij < Min( this->Rows(), this->Columns() ); ij++)
+ for(int ij=0; ij < std::min<int>( rows, _maxEntriesPerRow ); ij++)
(*this)(ij,ij) = T(1);
}
T alpha,beta,rDotR;
int i;
- solution.Resize(M.Columns());
+ solution.Resize(bb.Dimensions());
solution.SetZero();
d=r=bb;
PCL_MAKE_ALIGNED_OPERATOR_NEW
};
}
+#ifdef PCL_NO_PRECOMPILE
+#include <pcl/surface/impl/bilateral_upsampling.hpp>
+#endif
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed boost headers directly.")
-
-#include <boost/dynamic_bitset/dynamic_bitset.hpp>
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-
-#include <Eigen/SVD>
/** \brief When the input data points don't fill into the 1*1*1 box,
* scale them so that they can be filled in the unit box. Otherwise,
- * it will be some drawing problem when doing visulization
+ * it will be some drawing problem when doing visualization
* \param scale_factor scale all the input data point by scale_factor
*/
void
PointInT p0 = (*input_)[(*indices_)[0]];
PointInT p1 = (*input_)[(*indices_)[indices_->size () - 1]];
PointInT p2 = (*input_)[(*indices_)[indices_->size () / 2]];
- Eigen::Array4f dy1dy2 = (p1.getArray4fMap () - p0.getArray4fMap ()) / (p2.getArray4fMap () - p0.getArray4fMap ());
- while (!( (dy1dy2[0] != dy1dy2[1]) || (dy1dy2[2] != dy1dy2[1]) ) )
+ while (!pcl::isXYZFinite(p0) || !pcl::isXYZFinite(p1) || !pcl::isXYZFinite(p2) ||
+ (p1.getVector3fMap() - p0.getVector3fMap()).cross(p2.getVector3fMap() - p0.getVector3fMap()).stableNorm() < Eigen::NumTraits<float>::dummy_precision ())
{
p0 = (*input_)[(*indices_)[rand () % indices_->size ()]];
p1 = (*input_)[(*indices_)[rand () % indices_->size ()]];
p2 = (*input_)[(*indices_)[rand () % indices_->size ()]];
- dy1dy2 = (p1.getArray4fMap () - p0.getArray4fMap ()) / (p2.getArray4fMap () - p0.getArray4fMap ());
}
pcl::PointCloud<PointInT> normal_calc_cloud;
}
if (inside_CB && !outside_CB)
need_invert = true;
- else if (!(!inside_CB && outside_CB))
+ else if (inside_CB || !outside_CB)
{
if ((angles_[end].angle - angles_[start].angle) < M_PI)
need_invert = true;
for (pcl::index_t cp = 0; cp < static_cast<pcl::index_t> (data_->size ()); ++cp)
{
// Check if the point is invalid
- if (!std::isfinite ((*data_)[cp].x) ||
- !std::isfinite ((*data_)[cp].y) ||
- !std::isfinite ((*data_)[cp].z))
+ if (!pcl::isXYZFinite((*data_)[cp]))
continue;
Eigen::Vector3i index_3d;
pcl::MarchingCubes<PointNT>::performReconstruction (pcl::PointCloud<PointNT> &points,
std::vector<pcl::Vertices> &polygons)
{
- if (!(iso_level_ >= 0 && iso_level_ < 1))
+ if (iso_level_ < 0 || iso_level_ >= 1)
{
PCL_ERROR ("[pcl::%s::performReconstruction] Invalid iso level %f! Please use a number between 0 and 1.\n",
getClassName ().c_str (), iso_level_);
const Eigen::Vector3d point = point_f.cast<double> ();
double f = 0.0;
- std::vector<double>::const_iterator w_it (weights.begin());
- for (std::vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d> >::const_iterator c_it = centers.begin ();
- c_it != centers.end (); ++c_it, ++w_it)
+ auto w_it (weights.cbegin());
+ for (auto c_it = centers.cbegin ();
+ c_it != centers.cend (); ++c_it, ++w_it)
f += *w_it * kernel (*c_it, point);
grid_[x * res_y_*res_z_ + y * res_z_ + z] = static_cast<float>(f);
Eigen::Vector2f
calculatePrincipalCurvatures (const double u, const double v) const;
- /** \brief Calculate the principal curvatures using the polynomial surface.
- * \param[in] u The u-coordinate of the point in local MLS frame.
- * \param[in] v The v-coordinate of the point in local MLS frame.
- * \return The principal curvature [k1, k2] at the provided uv coordinates.
- * \note If an error occurs then 1e-5 is returned.
- */
- PCL_DEPRECATED(1, 15, "use calculatePrincipalCurvatures() instead")
- inline Eigen::Vector2f
- calculatePrincipleCurvatures (const double u, const double v) const { return calculatePrincipalCurvatures(u, v); };
-
/** \brief Project a point orthogonal to the polynomial surface.
* \param[in] u The u-coordinate of the point in local MLS frame.
* \param[in] v The v-coordinate of the point in local MLS frame.
updateCurve (double damp) override;
protected:
+ using FittingCurve2dAPDM::addPointConstraint;
+ using FittingCurve2dAPDM::assembleClosestPoints;
/** \brief Add minimization constraint: point-to-surface distance (squared-distance-minimization). */
virtual void
updateCurve (double damp) override;
protected:
+ using FittingCurve2dAPDM::addPointConstraint;
+ using FittingCurve2dAPDM::assembleClosestPoints;
+
/** \brief Add minimization constraint: point-to-surface distance (tangent-distance-minimization). */
virtual void
addPointConstraint (const double ¶m, const Eigen::Vector2d &point, const Eigen::Vector2d &normal,
updateCurve (double damp) override;
protected:
+ using FittingCurve2dPDM::addPointConstraint;
/** \brief Add minimization constraint: point-to-surface distance (squared-distance-minimization). */
virtual void
updateCurve (double damp) override;
protected:
+ using FittingCurve2dPDM::addPointConstraint;
+
/** \brief Add minimization constraint: point-to-surface distance (tangent-distance-minimization). */
virtual void
addPointConstraint (const double ¶m, const Eigen::Vector2d &point, const Eigen::Vector2d &normal,
class FittingSurfaceTDM : public FittingSurface
{
public:
+ using FittingSurface::assemble;
/** \brief Parameters with TDM extensions for fitting */
struct ParameterTDM : public FittingSurface::Parameter
updateSurf (double damp) override;
protected:
+ using FittingSurface::assembleInterior;
+ using FittingSurface::assembleBoundary;
+ using FittingSurface::addPointConstraint;
/** \brief Assemble point-to-surface constraints for interior points. */
virtual void
double clipdist = 0.0;
double snapsize = 0.0;
- int chunk_count = 0;// debugging counter
- for ( chunk_count = 0; rc; chunk_count++ )
+ while ( rc )
{
rc = file.BeginRead3dmBigChunk( &tcode, &big_value );
if (!rc )
ON__INT64 big_value;
rc = file.SeekFromStart(32)?true:false; // skip 32 byte header
- int chunk_count = 0; // debugging counter
- for ( chunk_count = 0; rc; chunk_count++ )
+ while ( rc )
{
rc = file.BeginRead3dmBigChunk( &tcode, &big_value );
if ( !rc )
for(;;)
{
current_file_attributes = 0;
+ /*
+ from readdir man page:
+ If the end of the directory stream is reached,
+ NULL is returned and errno is not changed. If an error occurs,
+ NULL is returned and errno is set appropriately.
+ */
struct dirent* dp = 0;
- int readdir_errno = readdir_r(m_dir, &m_dirent, &dp);
- if ( 0 != readdir_errno )
- break;
+ dp = readdir(m_dir);
if ( 0 == dp )
break;
- if ( 0 == m_dirent.d_name[0] )
+ if ( 0 == dp->d_name[0] )
break;
+ m_dirent = *dp;
+
if ( IsDotOrDotDotDir(m_dirent.d_name) )
continue;
return m_active_id_count;
}
-#if (_MSC_VER >= 1930 && _MSC_VER <= 1939)
+#if (_MSC_VER >= 1930 && _MSC_VER <= 1949)
// Solves internal compiler error on MSVC 2022
// (see https://github.com/microsoft/vcpkg/issues/19561)
+#define ON_VS2022_COMPILER_CRASH
+#endif
+
+#if defined(ON_VS2022_COMPILER_CRASH)
#pragma optimize("", off)
#endif
struct ON_SerialNumberMap::SN_ELEMENT* ON_SerialNumberMap::FirstElement() const
}
return e;
}
-#if (_MSC_VER >= 1930 && _MSC_VER <= 1939)
+#if defined(ON_VS2022_COMPILER_CRASH)
#pragma optimize("", on)
#endif
case single:
text_log.Print("single texture space\n");
break;
- case clspt_projection:
+ case divided:
text_log.Print("divided texture space\n");
break;
default:
case ON_TextureMapping::cylinder_mapping:
case ON_TextureMapping::sphere_mapping:
case ON_TextureMapping::box_mapping:
- case ON_TextureMapping::force_32bit_mapping_projection:
+ case ON_TextureMapping::force_32bit_mapping_type:
default:
break;
}
if ( !ON_IsValidPointList( dim, is_rat, count, stride, point ) )
return false;
- if ( xform.m_xform == NULL )
- return false;
if (count == 0)
return true;
if ( !ON_IsValidPointList( dim, is_rat, count, stride, point ) )
return false;
- if ( xform.m_xform == NULL )
- return false;
if (count == 0)
return true;
if ( !ON_IsValidPointList( dim, 0, count, stride, vector ) )
return false;
- if ( xform.m_xform == NULL )
- return false;
if (count == 0)
return true;
if ( !ON_IsValidPointList( dim, 0, count, stride, vector ) )
return false;
- if ( xform.m_xform == NULL )
- return false;
if (count == 0)
return true;
ON_SimpleArray<int> remap_array(vertex_count);
int remap_vertex_count = 0;
- int merge_count = 0;
int i0, i1, k;
struct tagMESHPOINTS mp;
{
if ( CompareMeshPoint( mp.p0+index[i0], mp.p0+index[i1], &mp ) )
break;
- else
- merge_count++;
}
for ( /*empty*/; i0 < i1; i0++ )
{
)
{
bool rc = false;
- if ( m_order[dir] >= 2 && m_cv_count[dir] >= m_order[dir] && m_knot && t0 < t1 ) {
+ if ( m_order[dir] >= 2 && m_cv_count[dir] >= m_order[dir] && nullptr != m_knot[dir] && t0 < t1 ) {
const double k0 = m_knot[dir][m_order[dir]-2];
const double k1 = m_knot[dir][m_cv_count[dir]-1];
if ( k0 == t0 && k1 == t1 )
for ( j = 0; j < m_cv_count[1]; j++ )
{
cv = CV(i,j,0);
- for (k = 0; i < m_cv_count[2]; k++ )
+ for (k = 0; k < m_cv_count[2]; k++ )
{
current_remainder = ON_CRC32(current_remainder,sizeof_cv,cv);
cv += m_cv_stride[2];
{
for ( ON_ClassId* p = m_p0; p; p = p->m_pNext )
{
- if ( !p->m_pBaseClassId && p->m_sBaseClassName ) {
+ if ( 0 == p->m_pBaseClassId && 0 != p->m_sBaseClassName[0] &&
+ 0 == p->m_sBaseClassName[sizeof(p->m_sBaseClassName) / sizeof(p->m_sBaseClassName[0]) - 1] ) {
if ( !strcmp( m_sClassName, p->m_sBaseClassName ) )
p->m_pBaseClassId = this;
}
void ON_2dPoint::Transform( const ON_Xform& xform )
{
double xx,yy,ww;
- if ( xform.m_xform ) {
- ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][3];
- if ( ww != 0.0 )
- ww = 1.0/ww;
- xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][3]);
- yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][3]);
- x = xx;
- y = yy;
- }
+ ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][3];
+ if ( ww != 0.0 )
+ ww = 1.0/ww;
+ xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][3]);
+ yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][3]);
+ x = xx;
+ y = yy;
}
void ON_3dPoint::Transform( const ON_Xform& xform )
{
double xx,yy,zz,ww;
- if ( xform.m_xform ) {
- ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3];
- if ( ww != 0.0 )
- ww = 1.0/ww;
- xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]);
- yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]);
- zz = ww*(xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]);
- x = xx;
- y = yy;
- z = zz;
- }
+ ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3];
+ if ( ww != 0.0 )
+ ww = 1.0/ww;
+ xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]);
+ yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]);
+ zz = ww*(xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]);
+ x = xx;
+ y = yy;
+ z = zz;
}
void ON_4dPoint::Transform( const ON_Xform& xform )
{
double xx,yy,zz,ww;
- if ( xform.m_xform ) {
- xx = xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]*w;
- yy = xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]*w;
- zz = xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]*w;
- ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3]*w;
- x = xx;
- y = yy;
- z = zz;
- w = ww;
- }
+ xx = xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]*w;
+ yy = xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]*w;
+ zz = xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]*w;
+ ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3]*w;
+ x = xx;
+ y = yy;
+ z = zz;
+ w = ww;
}
void ON_2fPoint::Transform( const ON_Xform& xform )
{
double xx,yy,ww;
- if ( xform.m_xform ) {
- ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][3];
- if ( ww != 0.0 )
- ww = 1.0/ww;
- xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][3]);
- yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][3]);
- x = (float)xx;
- y = (float)yy;
- }
+ ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][3];
+ if ( ww != 0.0 )
+ ww = 1.0/ww;
+ xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][3]);
+ yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][3]);
+ x = (float)xx;
+ y = (float)yy;
}
void ON_2fPoint::Rotate(
void ON_3fPoint::Transform( const ON_Xform& xform )
{
double xx,yy,zz,ww;
- if ( xform.m_xform ) {
- ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3];
- if ( ww != 0.0 )
- ww = 1.0/ww;
- xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]);
- yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]);
- zz = ww*(xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]);
- x = (float)xx;
- y = (float)yy;
- z = (float)zz;
- }
+ ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3];
+ if ( ww != 0.0 )
+ ww = 1.0/ww;
+ xx = ww*(xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]);
+ yy = ww*(xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]);
+ zz = ww*(xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]);
+ x = (float)xx;
+ y = (float)yy;
+ z = (float)zz;
}
void ON_4fPoint::Transform( const ON_Xform& xform )
{
double xx,yy,zz,ww;
- if ( xform.m_xform ) {
- xx = xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]*w;
- yy = xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]*w;
- zz = xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]*w;
- ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3]*w;
- x = (float)xx;
- y = (float)yy;
- z = (float)zz;
- w = (float)ww;
- }
+ xx = xform.m_xform[0][0]*x + xform.m_xform[0][1]*y + xform.m_xform[0][2]*z + xform.m_xform[0][3]*w;
+ yy = xform.m_xform[1][0]*x + xform.m_xform[1][1]*y + xform.m_xform[1][2]*z + xform.m_xform[1][3]*w;
+ zz = xform.m_xform[2][0]*x + xform.m_xform[2][1]*y + xform.m_xform[2][2]*z + xform.m_xform[2][3]*w;
+ ww = xform.m_xform[3][0]*x + xform.m_xform[3][1]*y + xform.m_xform[3][2]*z + xform.m_xform[3][3]*w;
+ x = (float)xx;
+ y = (float)yy;
+ z = (float)zz;
+ w = (float)ww;
}
double ON_3fPoint::Fuzz(
}
-static void NodeCountHelper( const ON_RTreeNode* node, std::size_t& node_count, std::size_t& wasted_branch_count, std::size_t& leaf_count )
-{
- if ( 0 == node )
- return;
- node_count++;
- wasted_branch_count += (ON_RTree_MAX_NODE_COUNT - node->m_count);
- if ( node->m_level > 0 )
- {
- for ( int i = 0; i < node->m_count; i++ )
- {
- NodeCountHelper(node->m_branch[i].m_child,node_count,wasted_branch_count,leaf_count);
- }
- }
- else
- leaf_count += node->m_count;
-}
void ON_RTree::RemoveAll()
{
#include <pcl/point_types.h>
// Instantiations of specific point types
-PCL_INSTANTIATE(GreedyProjectionTriangulation, (pcl::PointNormal)(pcl::PointXYZRGBNormal)(pcl::PointXYZINormal))
+PCL_INSTANTIATE(GreedyProjectionTriangulation, (pcl::PointNormal)(pcl::PointXYZRGBNormal)(pcl::PointXYZINormal)(pcl::PointXYZLNormal))
set(USE_UMFPACK 0 CACHE BOOL "Use UmfPack for solving sparse systems of equations (e.g. in surface/on_nurbs)")
if(USE_UMFPACK)
+ find_package(CHOLMOD REQUIRED)
+ find_package(UMFPACK REQUIRED)
set(ON_NURBS_SOURCES ${ON_NURBS_SOURCES} src/on_nurbs/nurbs_solve_umfpack.cpp)
- set(ON_NURBS_LIBRARIES ${ON_NURBS_LIBRARIES} cholmod umfpack)
+ set(ON_NURBS_LIBRARIES ${ON_NURBS_LIBRARIES} SuiteSparse::CHOLMOD SuiteSparse::UMFPACK)
else()
set(ON_NURBS_SOURCES ${ON_NURBS_SOURCES} src/on_nurbs/nurbs_solve_eigen.cpp)
endif()
set(SUBSYS_DESC "Point cloud library 2d module unit tests")
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS 2d)
-set(OPT_DEPS)
-
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
+PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS})
if(NOT build)
return()
endif()
PCL_ADD_TEST(test_2d test_2d FILES test_2d.cpp
- LINK_WITH pcl_io pcl_gtest
+ LINK_WITH pcl_2d pcl_io pcl_gtest
ARGUMENTS "${PCL_SOURCE_DIR}/test/2d/lena.pcd"
"${PCL_SOURCE_DIR}/test/2d/gauss_smooth.pcd"
"${PCL_SOURCE_DIR}/test/2d/erosion.pcd"
"${PCL_SOURCE_DIR}/test/2d/closing_binary.pcd"
"${PCL_SOURCE_DIR}/test/2d/canny.pcd")
-PCL_ADD_TEST(test_2d_keypoint_instantiation_with_precompile test_2d_keypoint_instantiation_with_precompile FILES keypoint_instantiation.cpp LINK_WITH pcl_gtest pcl_common)
+PCL_ADD_TEST(test_2d_keypoint_instantiation_with_precompile test_2d_keypoint_instantiation_with_precompile FILES keypoint_instantiation.cpp LINK_WITH pcl_2d pcl_gtest pcl_common)
-PCL_ADD_TEST(test_2d_keypoint_instantiation_without_precompile test_2d_keypoint_instantiation_without_precompile FILES keypoint_instantiation.cpp LINK_WITH pcl_gtest pcl_common)
+PCL_ADD_TEST(test_2d_keypoint_instantiation_without_precompile test_2d_keypoint_instantiation_without_precompile FILES keypoint_instantiation.cpp LINK_WITH pcl_2d pcl_gtest pcl_common)
target_compile_definitions(test_2d_keypoint_instantiation_without_precompile PUBLIC PCL_NO_PRECOMPILE)
set(SUBSYS_NAME global_tests)
set(SUBSYS_DESC "Point cloud library global unit tests")
-set(DEFAULT OFF)
-set(build TRUE)
-set(REASON "Disabled by default")
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
include_directories(SYSTEM ${GTEST_INCLUDE_DIRS} ${GTEST_SRC_DIR})
add_library(pcl_gtest STATIC ${GTEST_SRC_DIR}/src/gtest-all.cc)
+target_include_directories(pcl_gtest PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
enable_testing()
set_target_properties(tests PROPERTIES FOLDER "Tests")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-
add_subdirectory(2d)
add_subdirectory(common)
add_subdirectory(features)
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS common)
set(OPT_DEPS io search kdtree octree)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
ASSERT_EQ (0, cloud_out.size ());
}
+TEST (toPCLPointCloud2NoPadding, PointXYZI)
+{
+ pcl::PointCloud<pcl::PointXYZI> cloud;
+ cloud.resize(static_cast<pcl::uindex_t>(2), static_cast<pcl::uindex_t>(2));
+ cloud[0].x = 1.0; cloud[0].y = 2.0; cloud[0].z = 3.0; cloud[0].intensity = 123.0;
+ cloud[1].x = -1.0; cloud[1].y = -2.0; cloud[1].z = -3.0; cloud[1].intensity = -123.0;
+ cloud[2].x = 0.1; cloud[2].y = 0.2; cloud[2].z = 0.3; cloud[2].intensity = 12.3;
+ cloud[3].x = 0.0; cloud[3].y = -1.7; cloud[3].z = 100.0; cloud[3].intensity = 3.14;
+ pcl::PCLPointCloud2 msg;
+ pcl::toPCLPointCloud2(cloud, msg, false);
+ EXPECT_EQ (msg.height, cloud.height);
+ EXPECT_EQ (msg.width, cloud.width);
+ EXPECT_EQ (msg.fields.size(), 4);
+ EXPECT_EQ (msg.fields[0].name, "x");
+ EXPECT_EQ (msg.fields[0].offset, 0);
+ EXPECT_EQ (msg.fields[0].datatype, pcl::PCLPointField::FLOAT32);
+ EXPECT_EQ (msg.fields[0].count, 1);
+ EXPECT_EQ (msg.fields[1].name, "y");
+ EXPECT_EQ (msg.fields[1].offset, 4);
+ EXPECT_EQ (msg.fields[1].datatype, pcl::PCLPointField::FLOAT32);
+ EXPECT_EQ (msg.fields[1].count, 1);
+ EXPECT_EQ (msg.fields[2].name, "z");
+ EXPECT_EQ (msg.fields[2].offset, 8);
+ EXPECT_EQ (msg.fields[2].datatype, pcl::PCLPointField::FLOAT32);
+ EXPECT_EQ (msg.fields[2].count, 1);
+ EXPECT_EQ (msg.fields[3].name, "intensity");
+ EXPECT_EQ (msg.fields[3].offset, 12);
+ EXPECT_EQ (msg.fields[3].datatype, pcl::PCLPointField::FLOAT32);
+ EXPECT_EQ (msg.fields[3].count, 1);
+ EXPECT_EQ (msg.point_step, 16);
+ EXPECT_EQ (msg.row_step, 16*cloud.width);
+ EXPECT_EQ (msg.data.size(), 16*cloud.width*cloud.height);
+ EXPECT_EQ (msg.at<float>(0, 0), 1.0f);
+ EXPECT_EQ (msg.at<float>(3, 4), -1.7f);
+ EXPECT_EQ (msg.at<float>(1, 8), -3.0f);
+ EXPECT_EQ (msg.at<float>(2, 12), 12.3f);
+ pcl::PointCloud<pcl::PointXYZI> cloud2;
+ pcl::fromPCLPointCloud2(msg, cloud2);
+ for(std::size_t i=0; i<cloud2.size(); ++i) {
+ EXPECT_EQ (cloud[i].x, cloud2[i].x);
+ EXPECT_EQ (cloud[i].y, cloud2[i].y);
+ EXPECT_EQ (cloud[i].z, cloud2[i].z);
+ EXPECT_EQ (cloud[i].intensity, cloud2[i].intensity);
+ }
+}
+
+TEST(PCL, fromPCLPointCloud2CastingXYZI)
+{
+ // test fromPCLPointCloud2, but in PCLPointCloud2 the fields have different types than in PointXYZI
+ pcl::PCLPointCloud2 msg;
+ msg.height = 2;
+ msg.width = 2;
+ msg.fields.resize(4);
+ msg.fields[0].name = "x";
+ msg.fields[0].offset = 0;
+ msg.fields[0].datatype = pcl::PCLPointField::FLOAT64;
+ msg.fields[0].count = 1;
+ msg.fields[1].name = "y";
+ msg.fields[1].offset = 8;
+ msg.fields[1].datatype = pcl::PCLPointField::FLOAT64;
+ msg.fields[1].count = 1;
+ msg.fields[2].name = "z";
+ msg.fields[2].offset = 16;
+ msg.fields[2].datatype = pcl::PCLPointField::FLOAT64;
+ msg.fields[2].count = 1;
+ msg.fields[3].name = "intensity";
+ msg.fields[3].offset = 24;
+ msg.fields[3].datatype = pcl::PCLPointField::UINT16;
+ msg.fields[3].count = 1;
+ msg.point_step = 32;
+ msg.row_step = 32*msg.width;
+ msg.data.resize(32*msg.width*msg.height);
+ for(std::size_t i=0; i<msg.width*msg.height; ++i) {
+ msg.at<double>(i, 0) = 1.0*i;
+ msg.at<double>(i, 8) = -1.6*i;
+ msg.at<double>(i, 16) = -3.141*i;
+ msg.at<std::uint16_t>(i, 24) = 123*i;
+ }
+ pcl::PointCloud<pcl::PointXYZI> cloud;
+ pcl::fromPCLPointCloud2(msg, cloud);
+ for(std::size_t i=0; i<msg.width*msg.height; ++i) {
+ EXPECT_EQ(cloud[i].x, 1.0f*i);
+ EXPECT_EQ(cloud[i].y, -1.6f*i);
+ EXPECT_EQ(cloud[i].z, -3.141f*i);
+ EXPECT_EQ(cloud[i].intensity, 123.0f*i);
+ }
+}
+
+TEST(PCL, fromPCLPointCloud2CastingSHOT352)
+{
+ // test whether casting also works if point type contains arrays
+ pcl::PCLPointCloud2 msg;
+ msg.height = 2;
+ msg.width = 2;
+ msg.fields.resize(2);
+ msg.fields[0].name = "shot";
+ msg.fields[0].offset = 0;
+ msg.fields[0].datatype = pcl::PCLPointField::INT64;
+ msg.fields[0].count = 352;
+ msg.fields[1].name = "rf";
+ msg.fields[1].offset = 352*8;
+ msg.fields[1].datatype = pcl::PCLPointField::FLOAT64;
+ msg.fields[1].count = 9;
+ msg.point_step = (352*8+9*8);
+ msg.row_step = msg.point_step*msg.width;
+ msg.data.resize(msg.point_step*msg.width*msg.height);
+ for(std::size_t i=0; i<msg.width*msg.height; ++i) {
+ for(std::size_t j=0; j<352; ++j)
+ msg.at<std::int64_t>(i, 8*j) = 1000*i+j;
+ for(std::size_t j=0; j<9; ++j)
+ msg.at<double>(i, 352*8+8*j) = (10*i+j)/10.0;
+ }
+ pcl::PointCloud<pcl::SHOT352> cloud;
+ pcl::fromPCLPointCloud2(msg, cloud);
+ for(std::size_t i=0; i<msg.width*msg.height; ++i) {
+ for(std::size_t j=0; j<352; ++j)
+ EXPECT_EQ(cloud[i].descriptor[j], 1000*i+j);
+ for(std::size_t j=0; j<9; ++j)
+ EXPECT_EQ(cloud[i].rf[j], (10*i+j)/10.0f);
+ }
+}
+
/* ---[ */
int
main (int argc, char** argv)
cloud.begin ()->getVector3fMap ());
EXPECT_EQ_VECTORS ((--cloud.end ())->getVector3fMap (),
(--cloud.end ())->getVector3fMap ());
- PointCloud<PointXYZ>::const_iterator pit = cloud.begin ();
- PointCloud<PointXYZ>::VectorType::const_iterator pit2 = cloud.begin ();
+ auto pit = cloud.begin ();
+ auto pit2 = cloud.begin ();
for (; pit < cloud.end (); ++pit2, ++pit)
EXPECT_EQ_VECTORS (pit->getVector3fMap (), pit2->getVector3fMap ());
}
EXPECT_EQ (cloud.width, cloud.size ());
EXPECT_EQ (cloud.height, 1);
EXPECT_EQ (cloud.width, old_size + cloud2.size ());
- PointCloud<PointXYZ>::const_iterator pit = cloud.begin ();
- PointCloud<PointXYZ>::const_iterator pit2 = cloud2.begin ();
+ auto pit = cloud.begin ();
+ auto pit2 = cloud2.begin ();
for (; pit2 < cloud2.end (); ++pit2, ++pit)
EXPECT_EQ_VECTORS (pit->getVector3fMap (), pit2->getVector3fMap ());
}
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS features)
set(OPT_DEPS io keypoints) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
pc.compute (*pcs);
EXPECT_EQ (pcs->size (), indices.size ());
- // Adjust for small numerical inconsitencies (due to nn_indices not being sorted)
+ // Adjust for small numerical inconsistencies (due to nn_indices not being sorted)
EXPECT_NEAR (std::abs ((*pcs)[0].principal_curvature[0]), 0.98509, 1e-4);
EXPECT_NEAR (std::abs ((*pcs)[0].principal_curvature[1]), 0.10713, 1e-4);
EXPECT_NEAR (std::abs ((*pcs)[0].principal_curvature[2]), 0.13462, 1e-4);
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS filters)
set(OPT_DEPS io features segmentation)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
#include <pcl/filters/frustum_culling.h>
#include <pcl/filters/sampling_surface_normal.h>
#include <pcl/filters/voxel_grid.h>
+#include <pcl/filters/voxel_grid_occlusion_estimation.h>
#include <pcl/filters/voxel_grid_covariance.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/project_inliers.h>
EXPECT_LE (std::abs (output[neighbors.at (0)].y - output[centroidIdx].y), 0.02);
EXPECT_LE ( output[neighbors.at (0)].z - output[centroidIdx].z, 0.02 * 2);
+ // indices must be handled correctly
+ auto indices = grid.getIndices(); // original cloud indices
+ auto cloud_copied = std::make_shared<PointCloud<PointXYZ>>();
+ *cloud_copied = *cloud;
+ for (int i = 0; i < 100; i++) {
+ cloud_copied->emplace_back(100 + i, 100 + i, 100 + i);
+ }
+ grid.setInputCloud(cloud_copied);
+ grid.setIndices(indices);
+ grid.filter(output);
+
+ EXPECT_EQ(output.size(), 100); // additional points should be ignored
+
// Test the pcl::PCLPointCloud2 method
VoxelGrid<PCLPointCloud2> grid2;
EXPECT_LE (std::abs (output[neighbors2.at (0)].x - output[centroidIdx2].x), 0.02);
EXPECT_LE (std::abs (output[neighbors2.at (0)].y - output[centroidIdx2].y), 0.02);
EXPECT_LE (output[neighbors2.at (0)].z - output[centroidIdx2].z, 0.02 * 2);
+
+ // indices must be handled correctly
+ auto indices2 = grid2.getIndices(); // original cloud indices
+ auto cloud_blob2 = std::make_shared<PCLPointCloud2>();
+ toPCLPointCloud2(*cloud_copied, *cloud_blob2);
+
+ grid2.setInputCloud(cloud_blob2);
+ grid2.setIndices(indices2);
+ grid2.filter(output_blob);
+
+ fromPCLPointCloud2(output_blob, output);
+ EXPECT_EQ(output.size(), 100); // additional points should be ignored
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Verify 2 clusters (0.11 and 0.31) passed threshold and verify their location and color
EXPECT_EQ (outputMin4.size (), 2);
- // Offset noise applied by offsets vec are 1e-3 magnitude, so check within 1e-2
+ // Offset noise applied by offsets vec are 1e-3 magnitude, so check within 1e-2
EXPECT_NEAR (outputMin4[0].x, input->at(1).x, 1e-2);
EXPECT_NEAR (outputMin4[0].y, input->at(1).y, 1e-2);
EXPECT_NEAR (outputMin4[0].z, input->at(1).z, 1e-2);
{
// Test the PointCloud<PointT> method
PointCloud<PointXYZ> cloud_out;
+ PointCloud<PointXYZ> cloud_out_neg;
// Remove outliers using a spherical density criterion
RadiusOutlierRemoval<PointXYZ> outrem;
outrem.setInputCloud (cloud);
outrem.setRadiusSearch (0.02);
outrem.setMinNeighborsInRadius (14);
+ outrem.setNumberOfThreads(4);
outrem.filter (cloud_out);
EXPECT_EQ (cloud_out.size (), 307);
EXPECT_EQ (cloud_out.width, 307);
EXPECT_TRUE (cloud_out.is_dense);
- EXPECT_NEAR (cloud_out[cloud_out.size () - 1].x, -0.077893, 1e-4);
- EXPECT_NEAR (cloud_out[cloud_out.size () - 1].y, 0.16039, 1e-4);
- EXPECT_NEAR (cloud_out[cloud_out.size () - 1].z, -0.021299, 1e-4);
+
+ outrem.setNegative(true);
+ outrem.filter(cloud_out_neg);
+
+ EXPECT_EQ(cloud_out_neg.size(), 90);
+ EXPECT_TRUE(cloud_out_neg.is_dense);
+
+ PointCloud<PointXYZRGB> cloud_out_rgb;
+ PointCloud<PointXYZRGB> cloud_out_rgb_neg;
+ // Remove outliers using a spherical density criterion on non-dense pointcloud
+ RadiusOutlierRemoval<PointXYZRGB> outremNonDense;
+ outremNonDense.setInputCloud(cloud_organized);
+ outremNonDense.setRadiusSearch(0.02);
+ outremNonDense.setMinNeighborsInRadius(14);
+ outremNonDense.setNumberOfThreads(4);
+ outremNonDense.filter(cloud_out_rgb);
+
+ EXPECT_EQ(cloud_out_rgb.size(), 240801);
+ EXPECT_EQ(cloud_out_rgb.width, 240801);
+ //EXPECT_TRUE(cloud_out_rgb.is_dense);
+
+ outremNonDense.setNegative(true);
+ outremNonDense.filter(cloud_out_rgb_neg);
+ EXPECT_EQ(cloud_out_rgb_neg.size(), 606);
+ //EXPECT_TRUE(cloud_out_rgb_neg.is_dense);
// Test the pcl::PCLPointCloud2 method
PCLPointCloud2 cloud_out2;
Eigen::AngleAxisf (0 * M_PI / 180, Eigen::Vector3f::UnitY ()) *
Eigen::AngleAxisf (0 * M_PI / 180, Eigen::Vector3f::UnitZ ());
- camera_pose.block (0, 0, 3, 3) = R;
+ camera_pose.topLeftCorner<3, 3> () = R;
Eigen::Vector3f T;
T (0) = -5; T (1) = 0; T (2) = 0;
- camera_pose.block (0, 3, 3, 1) = T;
+ camera_pose.block<3, 1> (0, 3) = T;
camera_pose (3, 3) = 1;
fc.setCameraPose (camera_pose);
Eigen::Matrix4f cam2robot;
cam2robot << 0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1;
- // Cut out object based on ROI
+ // Cut out object based on ROI
fc.setInputCloud (model);
fc.setNegative (false);
fc.setVerticalFOV (43);
fc.setCameraPose (cam2robot);
fc.filter (*output);
// Should extract milk cartoon with 13541 points
- EXPECT_EQ (output->size (), 13541);
+ EXPECT_EQ (output->size (), 13541);
removed = fc.getRemovedIndices ();
EXPECT_EQ (removed->size (), model->size () - output->size ());
EXPECT_LT(err_refined_var, err_est_var);
}
+TEST (VoxelGridOcclusionEstimation, Filters)
+{
+ auto input_cloud = pcl::make_shared<pcl::PointCloud<pcl::PointXYZ>>();
+ input_cloud->emplace_back(0.0, 0.0, 0.0);
+ input_cloud->emplace_back(9.9, 9.9, 9.9); // we want a nice bounding box from (0, 0, 0) to (10, 10, 10)
+ input_cloud->sensor_origin_ << -0.1f, 0.5f, 0.5f, 0.0f; // just outside the bounding box. Most rays will enter at voxel (0, 0, 0)
+ pcl::VoxelGridOcclusionEstimation<pcl::PointXYZ> vg;
+ vg.setInputCloud (input_cloud);
+ vg.setLeafSize (1.0, 1.0, 1.0);
+ vg.initializeVoxelGrid ();
+ std::vector<Eigen::Vector3i, Eigen::aligned_allocator<Eigen::Vector3i> > occluded_voxels;
+ vg.occlusionEstimationAll (occluded_voxels);
+ for(std::size_t y=0; y<10; ++y) {
+ for (std::size_t z=0; z<10; ++z) {
+ if(y==9 && z==9) continue; // voxel (9, 9, 9) is occupied by point (9.9, 9.9, 9.9), so it will not be counted as occluded
+ Eigen::Vector3i cell(9, y, z);
+ EXPECT_NE(std::find(occluded_voxels.begin(), occluded_voxels.end(), cell), occluded_voxels.end()); // not equal means it was found
+ }
+ }
+}
+
/* ---[ */
int
main (int argc, char** argv)
// sure that each cell has at least one point. As a result, we expect 1000 points in
// the output cloud and the rest in removed indices.
- pcl::UniformSampling<pcl::PointXYZ> us(true); // extract removed indices
- us.setInputCloud(xyz);
- us.setRadiusSearch(0.1);
+ pcl::UniformSampling<pcl::PointXYZ>::Ptr us_ptr(new pcl::UniformSampling<pcl::PointXYZ>(true));// extract removed indices
+ us_ptr->setRadiusSearch(0.1);
pcl::PointCloud<pcl::PointXYZ> output;
- us.filter(output);
+ pcl::Indices indices;
+
+ // Empty input cloud
+ us_ptr->filter(output);
+ us_ptr->filter(indices);
- auto removed_indices = us.getRemovedIndices();
+ us_ptr->setInputCloud(xyz);
+ // Cloud
+ us_ptr->filter(output);
+ // Indices
+ us_ptr->filter(indices);
+
+ for (const auto& outputIndex : indices)
+ {
+ // Check if the point exists in the output cloud
+ bool found = false;
+ for (const auto& j : output)
+ {
+ if (j.x == (*xyz)[outputIndex].x &&
+ j.y == (*xyz)[outputIndex].y &&
+ j.z == (*xyz)[outputIndex].z)
+ {
+ found = true;
+ break;
+ }
+ }
+
+ // Assert that the point was found in the output cloud
+ ASSERT_TRUE(found);
+ }
+
+ auto removed_indices = us_ptr->getRemovedIndices();
ASSERT_EQ(output.size(), 1000);
- EXPECT_EQ(removed_indices->size(), xyz->size() - 1000);
+ EXPECT_EQ(int(removed_indices->size()), int(xyz->size() - 1000));
std::set<int> removed_indices_set(removed_indices->begin(), removed_indices->end());
ASSERT_EQ(removed_indices_set.size(), removed_indices->size());
+
+ // Negative
+ us_ptr->setNegative (true);
+ us_ptr->filter(output);
+ removed_indices = us_ptr->getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), 1000);
+ EXPECT_EQ (int (output.size ()), int (xyz->size() - 1000));
+
+ // Organized
+ us_ptr->setKeepOrganized (true);
+ us_ptr->setNegative (false);
+ us_ptr->filter(output);
+ removed_indices = us_ptr->getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), int(xyz->size() - 1000));
+ for (std::size_t i = 0; i < removed_indices->size (); ++i)
+ {
+ EXPECT_TRUE (std::isnan (output.at ((*removed_indices)[i]).x));
+ EXPECT_TRUE (std::isnan (output.at ((*removed_indices)[i]).y));
+ EXPECT_TRUE (std::isnan (output.at ((*removed_indices)[i]).z));
+ }
+
+ EXPECT_EQ (output.width, xyz->width);
+ EXPECT_EQ (output.height, xyz->height);
+
+ // Check input cloud with nan values
+ us_ptr->setInputCloud (output.makeShared ());
+ us_ptr->setRadiusSearch(2);
+ us_ptr->filter (output);
+ removed_indices = us_ptr->getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), output.size()-1);
}
int
-I/src/pcl/build/include -I/src/pcl/common/include \
-I/src/pcl/dssdk/include \
-I/src/pcl/io/include -isystem /usr/include/eigen3 \
- -O2 -g -DNDEBUG -fPIC -std=c++14 \
+ -O2 -g -DNDEBUG -fPIC -std=c++17 \
-o ply_reader_fuzzer.o -c ply_reader_fuzzer.cpp
$CXX $CXXFLAGS $LIB_FUZZING_ENGINE ply_reader_fuzzer.o \
set(SUBSYS_DESC "Point cloud library geometry module unit tests")
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS geometry)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
using VI = VertexIndex;
VertexIndices vi;
std::vector <VertexIndices> faces;
- vi.push_back (VI (0)); vi.push_back (VI (3)); vi.push_back (VI (1)); faces.push_back (vi); vi.clear (); // 0
- vi.push_back (VI (2)); vi.push_back (VI (1)); vi.push_back (VI (4)); faces.push_back (vi); vi.clear (); // 1
- vi.push_back (VI (0)); vi.push_back (VI (2)); vi.push_back (VI (5)); faces.push_back (vi); vi.clear (); // 2
+ vi.emplace_back(0); vi.emplace_back(3); vi.emplace_back(1); faces.push_back (vi); vi.clear (); // 0
+ vi.emplace_back(2); vi.emplace_back(1); vi.emplace_back(4); faces.push_back (vi); vi.clear (); // 1
+ vi.emplace_back(0); vi.emplace_back(2); vi.emplace_back(5); faces.push_back (vi); vi.clear (); // 2
for (const auto &face : faces)
{
ASSERT_TRUE (mesh.addFace (face).isValid ());
// Check if the whole boundary is reached.
VertexIndices boundary_expected;
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (5));
- boundary_expected.push_back (VI (2));
- boundary_expected.push_back (VI (4));
- boundary_expected.push_back (VI (1));
- boundary_expected.push_back (VI (3));
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(5);
+ boundary_expected.emplace_back(2);
+ boundary_expected.emplace_back(4);
+ boundary_expected.emplace_back(1);
+ boundary_expected.emplace_back(3);
VertexIndices boundary_vertices = getBoundaryVertices (mesh, VI (3));
EXPECT_EQ (boundary_expected, boundary_vertices);
// Close the gaps.
- vi.push_back (VI (0)); vi.push_back (VI (1)); vi.push_back (VI (2)); faces.push_back (vi); vi.clear (); // 3
+ vi.emplace_back(0); vi.emplace_back(1); vi.emplace_back(2); faces.push_back (vi); vi.clear (); // 3
ASSERT_TRUE (mesh.addFace (faces [3]).isValid ());
EXPECT_TRUE (hasFaces (mesh, faces));
// 0 //
// / \ //
// 3 - 4 //
- vi.push_back (VI (0)); vi.push_back (VI (1)); vi.push_back (VI (2)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (3)); vi.push_back (VI (4)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(1); vi.emplace_back(2); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(3); vi.emplace_back(4); faces.push_back (vi); vi.clear ();
for (const auto &face : faces)
{
ASSERT_TRUE (mesh.addFace (face).isValid ());
EXPECT_TRUE (hasFaces (mesh, faces));
// (*) Adding the next two faces would destroy the connectivity around vertex 0. E.g. a VertexAroundVertexCirculator would not be able to access all the vertices (1, 2, 3, 4) anymore.
- vi.push_back (VI (3)); vi.push_back (VI (0)); vi.push_back (VI (4));
+ vi.emplace_back(3); vi.emplace_back(0); vi.emplace_back(4);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
- vi.push_back (VI (1)); vi.push_back (VI (0)); vi.push_back (VI (2));
+ vi.emplace_back(1); vi.emplace_back(0); vi.emplace_back(2);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
{
// Check if the whole boundary is reached.
VertexIndices boundary_expected;
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (1));
- boundary_expected.push_back (VI (2));
- boundary_expected.push_back (VI (3));
- boundary_expected.push_back (VI (4));
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(1);
+ boundary_expected.emplace_back(2);
+ boundary_expected.emplace_back(3);
+ boundary_expected.emplace_back(4);
VertexIndices boundary_vertices = getBoundaryVertices (mesh, VI (2));
std::sort (boundary_vertices.begin (), boundary_vertices.end ());
// 3 - 0 - 6 //
// \ / \ / //
// 4 5 //
- vi.push_back (VI (0)); vi.push_back (VI (5)); vi.push_back (VI (6)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(5); vi.emplace_back(6); faces.push_back (vi); vi.clear ();
EXPECT_TRUE (mesh.addFace (faces [2]).isValid ());
EXPECT_TRUE (hasFaces (mesh, faces));
// Same as (*)
- vi.push_back (VI (1)); vi.push_back (VI (0)); vi.push_back (VI (2));
+ vi.emplace_back(1); vi.emplace_back(0); vi.emplace_back(2);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
- vi.push_back (VI (3)); vi.push_back (VI (0)); vi.push_back (VI (4));
+ vi.emplace_back(3); vi.emplace_back(0); vi.emplace_back(4);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
- vi.push_back (VI (5)); vi.push_back (VI (0)); vi.push_back (VI (6));
+ vi.emplace_back(5); vi.emplace_back(0); vi.emplace_back(6);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
{
// Check if the whole boundary is reached.
VertexIndices boundary_expected;
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (1));
- boundary_expected.push_back (VI (2));
- boundary_expected.push_back (VI (3));
- boundary_expected.push_back (VI (4));
- boundary_expected.push_back (VI (5));
- boundary_expected.push_back (VI (6));
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(1);
+ boundary_expected.emplace_back(2);
+ boundary_expected.emplace_back(3);
+ boundary_expected.emplace_back(4);
+ boundary_expected.emplace_back(5);
+ boundary_expected.emplace_back(6);
VertexIndices boundary_vertices = getBoundaryVertices (mesh, VI (2));
std::sort (boundary_vertices.begin (), boundary_vertices.end ());
// | / | \ //
// |/ | \ //
// 4 5---6 //
- vi.push_back (VI (0)); vi.push_back (VI (7)); vi.push_back (VI (8)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(7); vi.emplace_back(8); faces.push_back (vi); vi.clear ();
EXPECT_TRUE (mesh.addFace (faces [3]).isValid ());
EXPECT_TRUE (hasFaces (mesh, faces));
// Same as (*)
- vi.push_back (VI (1)); vi.push_back (VI (0)); vi.push_back (VI (2));
+ vi.emplace_back(1); vi.emplace_back(0); vi.emplace_back(2);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
- vi.push_back (VI (3)); vi.push_back (VI (0)); vi.push_back (VI (4));
+ vi.emplace_back(3); vi.emplace_back(0); vi.emplace_back(4);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
- vi.push_back (VI (5)); vi.push_back (VI (0)); vi.push_back (VI (6));
+ vi.emplace_back(5); vi.emplace_back(0); vi.emplace_back(6);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
- vi.push_back (VI (7)); vi.push_back (VI (0)); vi.push_back (VI (8));
+ vi.emplace_back(7); vi.emplace_back(0); vi.emplace_back(8);
EXPECT_FALSE (mesh.addFace (vi).isValid ());
vi.clear ();
// Check if the whole boundary is reached.
{
VertexIndices boundary_expected;
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (1));
- boundary_expected.push_back (VI (2));
- boundary_expected.push_back (VI (3));
- boundary_expected.push_back (VI (4));
- boundary_expected.push_back (VI (5));
- boundary_expected.push_back (VI (6));
- boundary_expected.push_back (VI (7));
- boundary_expected.push_back (VI (8));
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(1);
+ boundary_expected.emplace_back(2);
+ boundary_expected.emplace_back(3);
+ boundary_expected.emplace_back(4);
+ boundary_expected.emplace_back(5);
+ boundary_expected.emplace_back(6);
+ boundary_expected.emplace_back(7);
+ boundary_expected.emplace_back(8);
VertexIndices boundary_vertices = getBoundaryVertices (mesh, VI (2));
std::sort (boundary_vertices.begin (), boundary_vertices.end ());
// | /4|2\ | //
// |/ | \| //
// 4---5---6 //
- vi.push_back (VI (0)); vi.push_back (VI (4)); vi.push_back (VI (5)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (8)); vi.push_back (VI (1)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (2)); vi.push_back (VI (3)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (6)); vi.push_back (VI (7)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(4); vi.emplace_back(5); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(8); vi.emplace_back(1); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(2); vi.emplace_back(3); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(6); vi.emplace_back(7); faces.push_back (vi); vi.clear ();
for (std::size_t i = 4; i < faces.size (); ++i)
{
EXPECT_TRUE (mesh.addFace (faces [i]).isValid ());
VertexIndices boundary_expected;
for (unsigned int i=8; i>0; --i)
{
- boundary_expected.push_back (VI (i));
+ boundary_expected.emplace_back(i);
}
VertexIndices boundary_vertices = getBoundaryVertices (mesh, VI (1));
EXPECT_EQ (boundary_expected, boundary_vertices);
TEST (TestAddDeleteFace, Manifold1)
{
using Mesh = ManifoldTriangleMesh;
- using VI = VertexIndex;
+
Mesh mesh;
for (unsigned int i=0; i<7; ++i) mesh.addVertex (i);
std::vector <VertexIndices> faces;
std::vector <std::vector <int> > expected;
VertexIndices vi;
- vi.push_back (VI (0)); vi.push_back (VI (1)); vi.push_back (VI (2)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (2)); vi.push_back (VI (3)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (3)); vi.push_back (VI (4)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (4)); vi.push_back (VI (5)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (5)); vi.push_back (VI (6)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (6)); vi.push_back (VI (1)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(1); vi.emplace_back(2); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(2); vi.emplace_back(3); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(3); vi.emplace_back(4); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(4); vi.emplace_back(5); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(5); vi.emplace_back(6); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(6); vi.emplace_back(1); faces.push_back (vi); vi.clear ();
for (std::size_t i = 0; i < faces.size (); ++i)
{
mesh.clear ();
expected.clear ();
for (unsigned int i=0; i<11; ++i) mesh.addVertex (i);
- vi.push_back (VI ( 3)); vi.push_back (VI (7)); vi.push_back (VI (4)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 3)); vi.push_back (VI (2)); vi.push_back (VI (8)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 8)); vi.push_back (VI (2)); vi.push_back (VI (9)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (10)); vi.push_back (VI (9)); vi.push_back (VI (2)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (10)); vi.push_back (VI (2)); vi.push_back (VI (1)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 3); vi.emplace_back(7); vi.emplace_back(4); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 3); vi.emplace_back(2); vi.emplace_back(8); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 8); vi.emplace_back(2); vi.emplace_back(9); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(10); vi.emplace_back(9); vi.emplace_back(2); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(10); vi.emplace_back(2); vi.emplace_back(1); faces.push_back (vi); vi.clear ();
for (std::size_t i = 0; i < faces.size (); ++i)
{
ASSERT_TRUE (mesh.addFace (faces [i]).isValid ()) << "Face " << i;
// 2 //
std::vector <VertexIndices> faces;
VertexIndices vi;
- vi.push_back (VI (0)); vi.push_back (VI (1)); vi.push_back (VI (2)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (2)); vi.push_back (VI (3)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (3)); vi.push_back (VI (1)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(1); vi.emplace_back(2); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(2); vi.emplace_back(3); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(3); vi.emplace_back(1); faces.push_back (vi); vi.clear ();
// Try all possible combinations of adding the faces and deleting a vertex.
// NOTE: Some cases are redundant.
std::vector <VertexIndices> faces;
std::vector <std::vector <int> > expected;
VertexIndices vi;
- vi.push_back (VI (0)); vi.push_back (VI (1)); vi.push_back (VI (2)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (2)); vi.push_back (VI (3)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (3)); vi.push_back (VI (4)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (4)); vi.push_back (VI (5)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (5)); vi.push_back (VI (6)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (6)); vi.push_back (VI (1)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(1); vi.emplace_back(2); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(2); vi.emplace_back(3); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(3); vi.emplace_back(4); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(4); vi.emplace_back(5); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(5); vi.emplace_back(6); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(6); vi.emplace_back(1); faces.push_back (vi); vi.clear ();
for (std::size_t i = 0; i < faces.size (); ++i)
{
using VI = VertexIndex;
VertexIndices vi;
std::vector <VertexIndices> faces;
- vi.push_back (VI (0)); vi.push_back (VI (3)); vi.push_back (VI (1)); faces.push_back (vi); vi.clear (); // 0
- vi.push_back (VI (2)); vi.push_back (VI (1)); vi.push_back (VI (4)); faces.push_back (vi); vi.clear (); // 1
- vi.push_back (VI (0)); vi.push_back (VI (2)); vi.push_back (VI (5)); faces.push_back (vi); vi.clear (); // 2
+ vi.emplace_back(0); vi.emplace_back(3); vi.emplace_back(1); faces.push_back (vi); vi.clear (); // 0
+ vi.emplace_back(2); vi.emplace_back(1); vi.emplace_back(4); faces.push_back (vi); vi.clear (); // 1
+ vi.emplace_back(0); vi.emplace_back(2); vi.emplace_back(5); faces.push_back (vi); vi.clear (); // 2
for (const auto &face : faces)
{
ASSERT_TRUE (mesh.addFace (face).isValid ());
// Check if the whole boundary is reached.
VertexIndices boundary_expected;
- boundary_expected.push_back (VI (0));
- boundary_expected.push_back (VI (5));
- boundary_expected.push_back (VI (2));
- boundary_expected.push_back (VI (4));
- boundary_expected.push_back (VI (1));
- boundary_expected.push_back (VI (3));
+ boundary_expected.emplace_back(0);
+ boundary_expected.emplace_back(5);
+ boundary_expected.emplace_back(2);
+ boundary_expected.emplace_back(4);
+ boundary_expected.emplace_back(1);
+ boundary_expected.emplace_back(3);
VertexIndices boundary_vertices = getBoundaryVertices (mesh, VI (3));
EXPECT_EQ (boundary_expected, boundary_vertices);
}
// Make manifold
- vi.push_back (VI (0)); vi.push_back (VI (1)); vi.push_back (VI (2)); faces.push_back (vi); vi.clear (); // 3
+ vi.emplace_back(0); vi.emplace_back(1); vi.emplace_back(2); faces.push_back (vi); vi.clear (); // 3
ASSERT_TRUE (mesh.addFace (faces [3]).isValid ());
EXPECT_TRUE (hasFaces (mesh, faces));
for (int i=0; i<7; ++i) mesh_.addVertex ();
VertexIndices vi;
- using VI = VertexIndex;
- vi.push_back (VI (0)); vi.push_back (VI (1)); vi.push_back (VI (2)); faces_.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (2)); vi.push_back (VI (3)); faces_.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (3)); vi.push_back (VI (4)); faces_.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (4)); vi.push_back (VI (5)); faces_.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (5)); vi.push_back (VI (6)); faces_.push_back (vi); vi.clear ();
- vi.push_back (VI (0)); vi.push_back (VI (6)); vi.push_back (VI (1)); faces_.push_back (vi); vi.clear ();
+
+ vi.emplace_back(0); vi.emplace_back(1); vi.emplace_back(2); faces_.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(2); vi.emplace_back(3); faces_.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(3); vi.emplace_back(4); faces_.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(4); vi.emplace_back(5); faces_.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(5); vi.emplace_back(6); faces_.push_back (vi); vi.clear ();
+ vi.emplace_back(0); vi.emplace_back(6); vi.emplace_back(1); faces_.push_back (vi); vi.clear ();
for (std::size_t i=0; i<faces_.size (); ++i)
{
ASSERT_TRUE (mesh_.addFace (faces_ [i]).isValid ()) << "Face number " << i;
}
for (int i=1; i<=6; ++i)
{
- expected_123456_.push_back (VertexIndex (i));
- expected_654321_.push_back (VertexIndex (7-i));
+ expected_123456_.emplace_back(i);
+ expected_654321_.emplace_back(7-i);
}
}
for (int i = 0; i < static_cast<int> (mesh_.sizeFaces ()); ++i)
{
expected.clear ();
- expected.push_back (FaceIndex (i==(n-1) ? 0 : (i+1)));
- expected.push_back (FaceIndex (i== 0 ? (n-1) : (i-1)));
- expected.push_back (FaceIndex ());
+ expected.emplace_back(i==(n-1) ? 0 : (i+1));
+ expected.emplace_back(i== 0 ? (n-1) : (i-1));
+ expected.emplace_back();
FAFC circ = mesh_.getFaceAroundFaceCirculator (FaceIndex (i));
const FAFC circ_end = circ;
for (int i = 0; i < static_cast<int> (mesh_.sizeFaces ()); ++i)
{
expected.clear ();
- expected.push_back (FaceIndex (i== 0 ? (n-1) : (i-1)));
- expected.push_back (FaceIndex (i==(n-1) ? 0 : (i+1)));
- expected.push_back (FaceIndex ());
+ expected.emplace_back(i== 0 ? (n-1) : (i-1));
+ expected.emplace_back(i==(n-1) ? 0 : (i+1));
+ expected.emplace_back();
FAFC circ = mesh_.getFaceAroundFaceCirculator (FaceIndex (i));
const FAFC circ_end = circ;
#pragma once
+#include <iomanip> // for setw
#include <iostream>
#include <vector>
// \ / \ / //
// 0 - 1 //
VertexIndices vi_0, vi_1, vi_2;
- vi_0.push_back (VertexIndex (0)); vi_0.push_back (VertexIndex (1)); vi_0.push_back (VertexIndex (2));
- vi_1.push_back (VertexIndex (0)); vi_1.push_back (VertexIndex (2)); vi_1.push_back (VertexIndex (3));
- vi_2.push_back (VertexIndex (4)); vi_2.push_back (VertexIndex (2)); vi_2.push_back (VertexIndex (1));
+ vi_0.emplace_back(0); vi_0.emplace_back(1); vi_0.emplace_back(2);
+ vi_1.emplace_back(0); vi_1.emplace_back(2); vi_1.emplace_back(3);
+ vi_2.emplace_back(4); vi_2.emplace_back(2); vi_2.emplace_back(1);
// Mesh data.
int vd_0 (10), vd_1 (11), vd_2 (12), vd_3 (13), vd_4 (14);
VertexIndices vi;
for (unsigned int i=0; i<n; ++i)
{
- vi.push_back (VertexIndex (i));
+ vi.emplace_back(i);
mesh.addVertex (i);
}
std::vector <VertexIndices> faces;
VertexIndices vi;
- vi.push_back (VertexIndex (0));
- vi.push_back (VertexIndex (1));
- vi.push_back (VertexIndex (2));
+ vi.emplace_back(0);
+ vi.emplace_back(1);
+ vi.emplace_back(2);
faces.push_back (vi);
vi.clear ();
- vi.push_back (VertexIndex (0));
- vi.push_back (VertexIndex (2));
- vi.push_back (VertexIndex (3));
- vi.push_back (VertexIndex (4));
+ vi.emplace_back(0);
+ vi.emplace_back(2);
+ vi.emplace_back(3);
+ vi.emplace_back(4);
faces.push_back (vi);
vi.clear ();
- vi.push_back (VertexIndex (0));
- vi.push_back (VertexIndex (4));
- vi.push_back (VertexIndex (5));
- vi.push_back (VertexIndex (6));
- vi.push_back (VertexIndex (1));
+ vi.emplace_back(0);
+ vi.emplace_back(4);
+ vi.emplace_back(5);
+ vi.emplace_back(6);
+ vi.emplace_back(1);
faces.push_back (vi);
vi.clear ();
VertexIndices vi;
for (unsigned int i=0; i<n; ++i)
{
- vi.push_back (VertexIndex (i));
+ vi.emplace_back(i);
mesh.addVertex (i);
}
// 08 - 09 - 10 - 11 //
// | | | | //
// 12 - 13 - 14 - 15 //
- using VI = VertexIndex;
std::vector <VertexIndices> faces;
VertexIndices vi;
- vi.push_back (VI ( 0)); vi.push_back (VI ( 4)); vi.push_back (VI ( 5)); vi.push_back (VI ( 1)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 1)); vi.push_back (VI ( 5)); vi.push_back (VI ( 6)); vi.push_back (VI ( 2)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 2)); vi.push_back (VI ( 6)); vi.push_back (VI ( 7)); vi.push_back (VI ( 3)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 4)); vi.push_back (VI ( 8)); vi.push_back (VI ( 9)); vi.push_back (VI ( 5)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 5)); vi.push_back (VI ( 9)); vi.push_back (VI (10)); vi.push_back (VI ( 6)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 6)); vi.push_back (VI (10)); vi.push_back (VI (11)); vi.push_back (VI ( 7)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 8)); vi.push_back (VI (12)); vi.push_back (VI (13)); vi.push_back (VI ( 9)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI ( 9)); vi.push_back (VI (13)); vi.push_back (VI (14)); vi.push_back (VI (10)); faces.push_back (vi); vi.clear ();
- vi.push_back (VI (10)); vi.push_back (VI (14)); vi.push_back (VI (15)); vi.push_back (VI (11)); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 0); vi.emplace_back( 4); vi.emplace_back( 5); vi.emplace_back( 1); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 1); vi.emplace_back( 5); vi.emplace_back( 6); vi.emplace_back( 2); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 2); vi.emplace_back( 6); vi.emplace_back( 7); vi.emplace_back( 3); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 4); vi.emplace_back( 8); vi.emplace_back( 9); vi.emplace_back( 5); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 5); vi.emplace_back( 9); vi.emplace_back(10); vi.emplace_back( 6); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 6); vi.emplace_back(10); vi.emplace_back(11); vi.emplace_back( 7); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 8); vi.emplace_back(12); vi.emplace_back(13); vi.emplace_back( 9); faces.push_back (vi); vi.clear ();
+ vi.emplace_back( 9); vi.emplace_back(13); vi.emplace_back(14); vi.emplace_back(10); faces.push_back (vi); vi.clear ();
+ vi.emplace_back(10); vi.emplace_back(14); vi.emplace_back(15); vi.emplace_back(11); faces.push_back (vi); vi.clear ();
ASSERT_EQ (order_vec.size (), non_manifold.size ());
ASSERT_EQ (9, faces.size ());
VertexIndices vi;
for (unsigned int i=0; i<n; ++i)
{
- vi.push_back (VertexIndex (i));
+ vi.emplace_back(i);
mesh.addVertex (i);
}
set(SUBSYS_DESC "Point cloud library gpu octree tests")
set(SUBSYS_DEPS common octree gpu_containers gpu_octree gpu_utils)
-set(DEFAULT ON)
-set(build TRUE)
-set(REASON "")
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
set(SUBSYS_NAME tests_io)
set(SUBSYS_DESC "Point cloud library io module unit tests")
-PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS io)
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS common io octree)
set(OPT_DEPS visualization)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
# Uses VTK readers to verify
if(VTK_FOUND AND NOT ANDROID)
- include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
PCL_ADD_TEST (io_ply_mesh_io test_ply_mesh_io
FILES test_ply_mesh_io.cpp
LINK_WITH pcl_gtest pcl_io
PCL_ADD_TEST(buffers test_buffers
FILES test_buffers.cpp
- LINK_WITH pcl_gtest pcl_common)
+ LINK_WITH pcl_gtest pcl_common pcl_io)
PCL_ADD_TEST(io_octree_compression test_octree_compression
FILES test_octree_compression.cpp
#include <pcl/test/gtest.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/pcd_grabber.h>
#include <string>
#include <thread>
#include <vector>
-#include <boost/filesystem.hpp> // for directory_iterator, extension
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace std::chrono_literals;
pclzf_dir_ = grabber_sequences + "/pclzf";
pcd_dir_ = grabber_sequences + "/pcd";
// Get pcd files
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (pcd_dir_); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (pcd_dir_); itr != end_itr; ++itr)
{
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
{
#include <pcl/io/ascii_io.h>
#include <pcl/io/obj_io.h>
#include <fstream>
+#include <iomanip> // for setprecision
#include <locale>
#include <stdexcept>
fs.close ();
pcl::PCLPointCloud2 blob;
- pcl::OBJReader objreader = pcl::OBJReader();
+ pcl::OBJReader objreader;
int res = objreader.read ("test_obj.obj", blob);
EXPECT_NE (res, -1);
EXPECT_EQ (blob.width, 8);
EXPECT_EQ (blob.fields[5].count, 1);
EXPECT_EQ (blob.fields[5].datatype, pcl::PCLPointField::FLOAT32);
+ auto fblob = reinterpret_cast<const float*>(blob.data.data());
+
+ size_t offset_p = 0;
+ size_t offset_vn = blob.fields[3].offset / 4;
+ for (size_t i = 0; i < blob.width; ++i, offset_p += 6, offset_vn += 6)
+ {
+ Eigen::Vector3f expected_normal =
+ Eigen::Vector3f(fblob[offset_p], fblob[offset_p + 1], fblob[offset_p + 2])
+ .normalized();
+
+ Eigen::Vector3f actual_normal =
+ Eigen::Vector3f(fblob[offset_vn], fblob[offset_vn + 1], fblob[offset_vn + 2])
+ .normalized();
+
+ EXPECT_NEAR(expected_normal.x(), actual_normal.x(), 1e-4);
+ EXPECT_NEAR(expected_normal.y(), actual_normal.y(), 1e-4);
+ EXPECT_NEAR(expected_normal.z(), actual_normal.z(), 1e-4);
+ }
+
remove ("test_obj.obj");
remove ("test_obj.mtl");
}
+TEST(PCL, loadOBJWithoutFaces)
+{
+ std::ofstream fs;
+ fs.open ("test_obj.obj");
+ fs << "v -1.000000 -1.000000 1.000000\n"
+ "v -1.000000 1.000000 1.000000\n"
+ "v 1.000000 -1.000000 1.000000\n"
+ "v 1.000000 1.000000 1.000000\n"
+ "vn -1.0000 0.0000 0.0000\n"
+ "vn 0.0000 0.0000 -1.0000\n"
+ "vn 1.0000 0.0000 0.0000\n"
+ "vn 0.0000 1.0000 0.0000\n";
+
+ fs.close ();
+ pcl::PointCloud<pcl::PointNormal> point_cloud;
+ pcl::io::loadOBJFile("test_obj.obj", point_cloud);
+ EXPECT_EQ(point_cloud.size(), 4);
+ EXPECT_NEAR(point_cloud[0].normal_x, -1.0, 1e-5);
+ EXPECT_NEAR(point_cloud[1].normal_z, -1.0, 1e-5);
+ EXPECT_NEAR(point_cloud[2].normal_x, 1.0, 1e-5);
+ EXPECT_NEAR(point_cloud[3].normal_y, 1.0, 1e-5);
+ EXPECT_NEAR(point_cloud[1].normal_y, 0.0, 1e-5);
+ EXPECT_NEAR(point_cloud[3].normal_z, 0.0, 1e-5);
+ remove ("test_obj.obj");
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct PointXYZFPFH33
const auto timestamp = pcl::getTimestamp(time);
- EXPECT_EQ(timestamp, "19700101T" + getTimeOffset() + "0000");
+ EXPECT_EQ(timestamp.size(), 15);
}
TEST(PCL, TestTimestampGeneratorWithFraction)
const auto timestamp = pcl::getTimestamp(dt);
- EXPECT_EQ(timestamp, "19700101T" + getTimeOffset() + "0000.123456");
+ ASSERT_EQ(timestamp.size(), 22);
+ EXPECT_EQ(timestamp.substr(15), ".123456");
}
TEST(PCL, TestTimestampGeneratorWithSmallFraction)
const auto timestamp = pcl::getTimestamp(dt);
- EXPECT_EQ(timestamp, "19700101T" + getTimeOffset() + "0000.000123");
+ ASSERT_EQ(timestamp.size(), 22);
+ EXPECT_EQ(timestamp.substr(15), ".000123");
}
TEST(PCL, TestParseTimestamp)
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS kdtree)
set(OPT_DEPS io) # io is not a mandatory dependency in kdtree
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT (build AND BUILD_io))
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS keypoints)
set(OPT_DEPS io) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT (build AND BUILD_io))
set(SUBSYS_DESC "Point cloud library ml module unit tests")
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS ml)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
set(SUBSYS_DESC "Point cloud library octree module unit tests")
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS octree)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
PCL_ADD_TEST(a_octree_test test_octree
FILES test_octree.cpp
- LINK_WITH pcl_gtest pcl_common)
+ LINK_WITH pcl_gtest pcl_common pcl_octree)
PCL_ADD_TEST(a_octree_iterator_test test_octree_iterator
FILES test_octree_iterator.cpp
LINK_WITH pcl_gtest pcl_common pcl_octree)
for (int point = 0; point < 15; point++)
{
- // gereate a random point
+ // generate a random point
PointXYZ newPoint (1.0, 2.0, 3.0);
// OctreePointCloudPointVector can store all points..
for (int point = 0; point < pointcount; point++)
{
- // gereate a random point
+ // generate a random point
PointXYZ newPoint (static_cast<float> (1024.0 * rand () / RAND_MAX),
static_cast<float> (1024.0 * rand () / RAND_MAX),
static_cast<float> (1024.0 * rand () / RAND_MAX));
for (int point = 0; point < pointcount; point++)
{
- // gereate a random point
+ // generate a random point
PointXYZ newPoint (static_cast<float> (1024.0 * rand () / RAND_MAX),
static_cast<float> (1024.0 * rand () / RAND_MAX),
static_cast<float> (1024.0 * rand () / RAND_MAX));
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS outofcore)
set(OPT_DEPS) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
return()
endif()
-include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
PCL_ADD_TEST (outofcore_test test_outofcore
FILES test_outofcore.cpp
LINK_WITH pcl_gtest pcl_common pcl_io pcl_filters pcl_outofcore pcl_visualization)
#include <pcl/test/gtest.h>
+#include <list>
#include <vector>
#include <iostream>
#include <random>
bool
compPt (const PointT &p1, const PointT &p2)
{
- return !(p1.x != p2.x || p1.y != p2.y || p1.z != p2.z);
+ return (p1.x == p2.x && p1.y == p2.y && p1.z == p2.z);
}
TEST (PCL, Outofcore_Octree_Build)
AlignedPointTVector some_points;
for (unsigned int i=0; i< numPts; i++)
- some_points.push_back (PointT (static_cast<float>(rand () % 1024), static_cast<float>(rand () % 1024), static_cast<float>(rand () % 1024)));
+ some_points.emplace_back(static_cast<float>(rand () % 1024), static_cast<float>(rand () % 1024), static_cast<float>(rand () % 1024));
//(Case 1)
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS people)
set(OPT_DEPS) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
return()
endif()
-include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
PCL_ADD_TEST(a_people_detection_test test_people_detection
FILES test_people_groundBasedPeopleDetectionApp.cpp
LINK_WITH pcl_gtest pcl_common pcl_io pcl_kdtree pcl_search pcl_features pcl_sample_consensus pcl_filters pcl_segmentation pcl_people
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS recognition)
set(OPT_DEPS keypoints) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
PCL_ADD_TEST(a_recognition_ism_test test_recognition_ism
FILES test_recognition_ism.cpp
- LINK_WITH pcl_gtest pcl_io pcl_features
+ LINK_WITH pcl_gtest pcl_io pcl_features pcl_recognition
ARGUMENTS "${PCL_SOURCE_DIR}/test/ism_train.pcd" "${PCL_SOURCE_DIR}/test/ism_test.pcd")
if(BUILD_keypoints)
set(SUBSYS_NAME tests_registration)
set(SUBSYS_DESC "Point cloud library registration module unit tests")
-PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS registration)
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS io registration)
set(OPT_DEPS io) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
*/
#include <pcl/test/gtest.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/registration/correspondence_estimation_normal_shooting.h>
#include <pcl/features/normal_3d.h>
#include <pcl/kdtree/kdtree.h>
auto cloud1 (pcl::make_shared<pcl::PointCloud<PointSource>> ());
auto cloud2 (pcl::make_shared<pcl::PointCloud<PointTarget>> ());
- // Defining two parallel planes differing only by the y co-ordinate
+ // Defining two parallel planes differing only by the y coordinate
for (std::size_t i = 0; i < 50; ++i)
{
for (std::size_t j = 0; j < 25; ++j)
fpcs_ia.setNumberOfThreads (nr_threads);
fpcs_ia.setApproxOverlap (approx_overlap);
fpcs_ia.setDelta (delta, true);
+ fpcs_ia.setScoreThreshold (0.025); // if score is below this threshold, fpcs can stop because the solution is very good
fpcs_ia.setNumberOfSamples (nr_samples);
// align
fpcs_ia.align (source_aligned);
EXPECT_EQ (source_aligned.size (), cloud_source.size ());
- // check for correct coarse transformation marix
- //Eigen::Matrix4f transform_res_from_fpcs = fpcs_ia.getFinalTransformation ();
- //for (int i = 0; i < 4; ++i)
- // for (int j = 0; j < 4; ++j)
- // EXPECT_NEAR (transform_res_from_fpcs (i,j), transform_from_fpcs[i][j], 0.5);
+ // check for correct coarse transformation matrix
+ Eigen::Matrix4f transform_res_from_fpcs = fpcs_ia.getFinalTransformation ();
+ for (int i = 0; i < 4; ++i)
+ for (int j = 0; j < 4; ++j)
+ EXPECT_NEAR (transform_res_from_fpcs (i,j), transform_from_fpcs[i][j], 0.25);
}
constexpr int nr_samples = 100;
const float transform_from_fpcs [4][4] = {
- { -0.0019f, 0.8266f, -0.5628f, -0.0378f },
+ { -0.0019f, 0.8266f, -0.5628f, 0.0378f },
{ -0.9999f, -0.0094f, -0.0104f, 0.9997f },
{ -0.0139f, 0.5627f, 0.8265f, 0.0521f },
{ 0.f, 0.f, 0.f, 1.f }
using namespace pcl::io;
using namespace pcl::registration;
-PointCloud<PointXYZI> cloud_source, cloud_target;
+PointCloud<PointXYZ> cloud_source, cloud_target;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, KFPCSInitialAlignment)
const auto previous_verbosity_level = pcl::console::getVerbosityLevel();
pcl::console::setVerbosityLevel(pcl::console::L_VERBOSE);
// create shared pointers
- PointCloud<PointXYZI>::Ptr cloud_source_ptr, cloud_target_ptr;
+ PointCloud<PointXYZ>::Ptr cloud_source_ptr, cloud_target_ptr;
cloud_source_ptr = cloud_source.makeShared ();
cloud_target_ptr = cloud_target.makeShared ();
// initialize k-fpcs
- PointCloud <PointXYZI> cloud_source_aligned;
- KFPCSInitialAlignment <PointXYZI, PointXYZI> kfpcs_ia;
+ PointCloud <PointXYZ> cloud_source_aligned;
+ KFPCSInitialAlignment <PointXYZ, PointXYZ> kfpcs_ia;
kfpcs_ia.setInputSource (cloud_source_ptr);
kfpcs_ia.setInputTarget (cloud_target_ptr);
kfpcs_ia.setApproxOverlap (approx_overlap);
kfpcs_ia.setDelta (voxel_size, false);
kfpcs_ia.setScoreThreshold (abort_score);
+ kfpcs_ia.setMaximumIterations (100);
// repeat alignment 2 times to increase probability to ~99.99%
constexpr float max_angle3d = 0.1745f, max_translation3d = 1.f;
{
if (argc < 3)
{
- std::cerr << "No test files given. Please download `bun0.pcd` and `bun4.pcd` pass their path to the test." << std::endl;
+ std::cerr << "No test files given. Please download `office1_keypoints.pcd` and `office2_keypoints.pcd` pass their path to the test." << std::endl;
return (-1);
}
// Input
if (loadPCDFile (argv[1], cloud_source) < 0)
{
- std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ std::cerr << "Failed to read test file. Please download `office1_keypoints.pcd` and pass its path to the test." << std::endl;
return (-1);
}
if (loadPCDFile (argv[2], cloud_target) < 0)
{
- std::cerr << "Failed to read test file. Please download `bun4.pcd` and pass its path to the test." << std::endl;
+ std::cerr << "Failed to read test file. Please download `office2_keypoints.pcd` and pass its path to the test." << std::endl;
return (-1);
}
constexpr int nr_threads = 1;
constexpr float voxel_size = 0.1f;
constexpr float approx_overlap = 0.9f;
-constexpr float abort_score = 0.0f;
+constexpr float abort_score = 0.4f;
const float transformation_office1_office2 [4][4] = {
{ -0.6946f, -0.7194f, -0.0051f, -3.6352f },
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// This test if the ICP algorithm can successfully find the transformation of a cloud that has been
-// moved 0.7 in x direction.
+// moved 0.2 in z direction.
// It indirectly test the KDTree doesn't get an empty input cloud, see #3624
// It is more or less a copy of https://github.com/PointCloudLibrary/pcl/blob/cc7fe363c6463a0abc617b1e17e94ab4bd4169ef/doc/tutorials/content/sources/iterative_closest_point/iterative_closest_point.cpp
TEST(PCL, ICP_translated)
{
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in(new pcl::PointCloud<pcl::PointXYZ>(5,1));
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in(new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZ>);
// Fill in the CloudIn data
- for (auto& point : *cloud_in)
- {
- point.x = 1024 * rand() / (RAND_MAX + 1.0f);
- point.y = 1024 * rand() / (RAND_MAX + 1.0f);
- point.z = 1024 * rand() / (RAND_MAX + 1.0f);
- }
+ *cloud_in = cloud_source;
*cloud_out = *cloud_in;
for (auto& point : *cloud_out)
- point.x += 0.7f;
+ point.z += 0.2f;
pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> icp;
icp.setInputSource(cloud_in);
icp.setInputTarget(cloud_out);
+ icp.setMaximumIterations (50);
pcl::PointCloud<pcl::PointXYZ> Final;
icp.align(Final);
EXPECT_LT(icp.getFitnessScore(), 1e-6);
// Ensure that the translation found is within acceptable threshold.
- EXPECT_NEAR(icp.getFinalTransformation()(0, 3), 0.7, 2e-3);
+ EXPECT_NEAR(icp.getFinalTransformation()(0, 0), 1.0, 2e-3);
+ EXPECT_NEAR(icp.getFinalTransformation()(1, 1), 1.0, 2e-3);
+ EXPECT_NEAR(icp.getFinalTransformation()(2, 2), 1.0, 2e-3);
+ EXPECT_NEAR(icp.getFinalTransformation()(0, 3), 0.0, 2e-3);
EXPECT_NEAR(icp.getFinalTransformation()(1, 3), 0.0, 2e-3);
- EXPECT_NEAR(icp.getFinalTransformation()(2, 3), 0.0, 2e-3);
+ EXPECT_NEAR(icp.getFinalTransformation()(2, 3), 0.2, 2e-3);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
testing::InitGoogleTest (&argc, argv);
return (RUN_ALL_TESTS ());
- // Tranpose the cloud_model
+ // Transpose the cloud_model
/*for (std::size_t i = 0; i < cloud_model.size (); ++i)
{
// cloud_model[i].z += 1;
// Check if the estimation with correspondences gives the same results
Eigen::Matrix4f T_SVD_2;
pcl::Correspondences corr; corr.reserve (source->size ());
- for (std::size_t i=0; i<source->size (); ++i) corr.push_back (pcl::Correspondence (i, i, 0.f));
+ for (std::size_t i=0; i<source->size (); ++i) corr.emplace_back(i, i, 0.f);
trans_est_svd.estimateRigidTransformation(*source, *target, corr, T_SVD_2);
const Eigen::Quaternionf R_SVD_2 (T_SVD_2.topLeftCorner <3, 3> ());
// Check if the estimation with correspondences gives the same results
Eigen::Matrix4f T_DQ_2;
pcl::Correspondences corr; corr.reserve (source->size ());
- for (std::size_t i=0; i<source->size (); ++i) corr.push_back (pcl::Correspondence (i, i, 0.f));
+ for (std::size_t i=0; i<source->size (); ++i) corr.emplace_back(i, i, 0.f);
trans_est_dual_quaternion.estimateRigidTransformation(*source, *target, corr, T_DQ_2);
const Eigen::Quaternionf R_DQ_2 (T_DQ_2.topLeftCorner <3, 3> ());
pcl::Correspondences corr;
corr.reserve (source->size ());
for (std::size_t i = 0; i < source->size (); ++i)
- corr.push_back (pcl::Correspondence (i, i, 0.f));
+ corr.emplace_back(i, i, 0.f);
trans_est_lm_float.estimateRigidTransformation (*source, *target, corr, T_LM_2_float);
const Eigen::Quaternionf R_LM_2_float (T_LM_2_float.topLeftCorner <3, 3> ());
corr.clear ();
corr.reserve (source->size ());
for (std::size_t i = 0; i < source->size (); ++i)
- corr.push_back (pcl::Correspondence (i, i, 0.f));
+ corr.emplace_back(i, i, 0.f);
trans_est_lm_double.estimateRigidTransformation (*source, *target, corr, T_LM_2_double);
const Eigen::Quaterniond R_LM_2_double (T_LM_2_double.topLeftCorner <3, 3> ());
testing::InitGoogleTest (&argc, argv);
return (RUN_ALL_TESTS ());
- // Tranpose the cloud_model
+ // Transpose the cloud_model
/*for (std::size_t i = 0; i < cloud_model.size (); ++i)
{
// cloud_model[i].z += 1;
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS sample_consensus)
set(OPT_DEPS io)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
*
*/
-#include <pcl/test/gtest.h>
-#include <pcl/pcl_tests.h> // for EXPECT_XYZ_NEAR
-
#include <pcl/sample_consensus/ransac.h>
-#include <pcl/sample_consensus/sac_model_sphere.h>
-#include <pcl/sample_consensus/sac_model_cone.h>
-#include <pcl/sample_consensus/sac_model_cylinder.h>
#include <pcl/sample_consensus/sac_model_circle.h>
#include <pcl/sample_consensus/sac_model_circle3d.h>
-#include <pcl/sample_consensus/sac_model_normal_sphere.h>
+#include <pcl/sample_consensus/sac_model_cone.h>
+#include <pcl/sample_consensus/sac_model_cylinder.h>
#include <pcl/sample_consensus/sac_model_ellipse3d.h>
+#include <pcl/sample_consensus/sac_model_normal_sphere.h>
+#include <pcl/sample_consensus/sac_model_sphere.h>
+#include <pcl/sample_consensus/sac_model_torus.h>
+#include <pcl/test/gtest.h>
+#include <pcl/pcl_tests.h> // for EXPECT_XYZ_NEAR
using namespace pcl;
using SampleConsensusModelConePtr = SampleConsensusModelCone<PointXYZ, Normal>::Ptr;
using SampleConsensusModelCircle2DPtr = SampleConsensusModelCircle2D<PointXYZ>::Ptr;
using SampleConsensusModelCircle3DPtr = SampleConsensusModelCircle3D<PointXYZ>::Ptr;
-using SampleConsensusModelCylinderPtr = SampleConsensusModelCylinder<PointXYZ, Normal>::Ptr;
-using SampleConsensusModelNormalSpherePtr = SampleConsensusModelNormalSphere<PointXYZ, Normal>::Ptr;
+using SampleConsensusModelCylinderPtr =
+ SampleConsensusModelCylinder<PointXYZ, Normal>::Ptr;
+using SampleConsensusModelNormalSpherePtr =
+ SampleConsensusModelNormalSphere<PointXYZ, Normal>::Ptr;
using SampleConsensusModelEllipse3DPtr = SampleConsensusModelEllipse3D<PointXYZ>::Ptr;
+using SampleConsensusModelTorusPtr = SampleConsensusModelTorus<PointXYZ,Normal>::Ptr;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SampleConsensusModelSphere, RANSAC)
+TEST(SampleConsensusModelSphere, RANSAC)
{
- srand (0);
+ srand(0);
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
- cloud.resize (10);
- cloud[0].getVector3fMap () << 1.7068f, 1.0684f, 2.2147f;
- cloud[1].getVector3fMap () << 2.4708f, 2.3081f, 1.1736f;
- cloud[2].getVector3fMap () << 2.7609f, 1.9095f, 1.3574f;
- cloud[3].getVector3fMap () << 2.8016f, 1.6704f, 1.5009f;
- cloud[4].getVector3fMap () << 1.8517f, 2.0276f, 1.0112f;
- cloud[5].getVector3fMap () << 1.8726f, 1.3539f, 2.7523f;
- cloud[6].getVector3fMap () << 2.5179f, 2.3218f, 1.2074f;
- cloud[7].getVector3fMap () << 2.4026f, 2.5114f, 2.7588f;
- cloud[8].getVector3fMap () << 2.6999f, 2.5606f, 1.5571f;
- cloud[9].getVector3fMap () << 0.0000f, 0.0000f, 0.0000f;
+ cloud.resize(10);
+ cloud[0].getVector3fMap() << 1.7068f, 1.0684f, 2.2147f;
+ cloud[1].getVector3fMap() << 2.4708f, 2.3081f, 1.1736f;
+ cloud[2].getVector3fMap() << 2.7609f, 1.9095f, 1.3574f;
+ cloud[3].getVector3fMap() << 2.8016f, 1.6704f, 1.5009f;
+ cloud[4].getVector3fMap() << 1.8517f, 2.0276f, 1.0112f;
+ cloud[5].getVector3fMap() << 1.8726f, 1.3539f, 2.7523f;
+ cloud[6].getVector3fMap() << 2.5179f, 2.3218f, 1.2074f;
+ cloud[7].getVector3fMap() << 2.4026f, 2.5114f, 2.7588f;
+ cloud[8].getVector3fMap() << 2.6999f, 2.5606f, 1.5571f;
+ cloud[9].getVector3fMap() << 0.0000f, 0.0000f, 0.0000f;
// Create a shared sphere model pointer directly
- SampleConsensusModelSpherePtr model (new SampleConsensusModelSphere<PointXYZ> (cloud.makeShared ()));
+ SampleConsensusModelSpherePtr model(
+ new SampleConsensusModelSphere<PointXYZ>(cloud.makeShared()));
// Create the RANSAC object
- RandomSampleConsensus<PointXYZ> sac (model, 0.03);
+ RandomSampleConsensus<PointXYZ> sac(model, 0.03);
// Algorithm tests
- bool result = sac.computeModel ();
- ASSERT_TRUE (result);
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
pcl::Indices sample;
- sac.getModel (sample);
- EXPECT_EQ (4, sample.size ());
+ sac.getModel(sample);
+ EXPECT_EQ(4, sample.size());
pcl::Indices inliers;
- sac.getInliers (inliers);
- EXPECT_EQ (9, inliers.size ());
+ sac.getInliers(inliers);
+ EXPECT_EQ(9, inliers.size());
Eigen::VectorXf coeff;
- sac.getModelCoefficients (coeff);
- EXPECT_EQ (4, coeff.size ());
- EXPECT_NEAR (2, coeff[0] / coeff[3], 1e-2);
- EXPECT_NEAR (2, coeff[1] / coeff[3], 1e-2);
- EXPECT_NEAR (2, coeff[2] / coeff[3], 1e-2);
+ sac.getModelCoefficients(coeff);
+ EXPECT_EQ(4, coeff.size());
+ EXPECT_NEAR(2, coeff[0] / coeff[3], 1e-2);
+ EXPECT_NEAR(2, coeff[1] / coeff[3], 1e-2);
+ EXPECT_NEAR(2, coeff[2] / coeff[3], 1e-2);
Eigen::VectorXf coeff_refined;
- model->optimizeModelCoefficients (inliers, coeff, coeff_refined);
- EXPECT_EQ (4, coeff_refined.size ());
- EXPECT_NEAR (2, coeff_refined[0] / coeff_refined[3], 1e-2);
- EXPECT_NEAR (2, coeff_refined[1] / coeff_refined[3], 1e-2);
- EXPECT_NEAR (2, coeff_refined[2] / coeff_refined[3], 1e-2);
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(4, coeff_refined.size());
+ EXPECT_NEAR(2, coeff_refined[0] / coeff_refined[3], 1e-2);
+ EXPECT_NEAR(2, coeff_refined[1] / coeff_refined[3], 1e-2);
+ EXPECT_NEAR(2, coeff_refined[2] / coeff_refined[3], 1e-2);
}
//////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT>
-class SampleConsensusModelSphereTest : private SampleConsensusModelSphere<PointT>
-{
- public:
- using SampleConsensusModelSphere<PointT>::SampleConsensusModelSphere;
- using SampleConsensusModelSphere<PointT>::countWithinDistanceStandard;
-#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
- using SampleConsensusModelSphere<PointT>::countWithinDistanceSSE;
+class SampleConsensusModelSphereTest : private SampleConsensusModelSphere<PointT> {
+public:
+ using SampleConsensusModelSphere<PointT>::SampleConsensusModelSphere;
+ using SampleConsensusModelSphere<PointT>::countWithinDistanceStandard;
+#if defined(__SSE__) && defined(__SSE2__) && defined(__SSE4_1__)
+ using SampleConsensusModelSphere<PointT>::countWithinDistanceSSE;
#endif
-#if defined (__AVX__) && defined (__AVX2__)
- using SampleConsensusModelSphere<PointT>::countWithinDistanceAVX;
+#if defined(__AVX__) && defined(__AVX2__)
+ using SampleConsensusModelSphere<PointT>::countWithinDistanceAVX;
#endif
};
-TEST (SampleConsensusModelSphere, SIMD_countWithinDistance) // Test if all countWithinDistance implementations return the same value
+TEST(SampleConsensusModelSphere,
+ SIMD_countWithinDistance) // Test if all countWithinDistance implementations return
+ // the same value
{
- const auto seed = static_cast<unsigned> (std::time (nullptr));
- srand (seed);
- for (size_t i=0; i<100; i++) // Run as often as you like
+ const auto seed = static_cast<unsigned>(std::time(nullptr));
+ srand(seed);
+ for (size_t i = 0; i < 100; i++) // Run as often as you like
{
// Generate a cloud with 1000 random points
PointCloud<PointXYZ> cloud;
pcl::Indices indices;
- cloud.resize (1000);
- for (std::size_t idx = 0; idx < cloud.size (); ++idx)
- {
- cloud[idx].x = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
- cloud[idx].y = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
- cloud[idx].z = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
- if (rand () % 3 != 0)
- {
- indices.push_back (static_cast<int> (idx));
+ cloud.resize(1000);
+ for (std::size_t idx = 0; idx < cloud.size(); ++idx) {
+ cloud[idx].x = 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0;
+ cloud[idx].y = 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0;
+ cloud[idx].z = 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0;
+ if (rand() % 3 != 0) {
+ indices.push_back(static_cast<int>(idx));
}
}
- SampleConsensusModelSphereTest<PointXYZ> model (cloud.makeShared (), indices, true);
+ SampleConsensusModelSphereTest<PointXYZ> model(cloud.makeShared(), indices, true);
// Generate random sphere model parameters
Eigen::VectorXf model_coefficients(4);
- model_coefficients << 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
- 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
- 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
- 0.15 * static_cast<float> (rand ()) / RAND_MAX; // center and radius
+ model_coefficients << 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0,
+ 0.15 * static_cast<float>(rand()) / RAND_MAX; // center and radius
- const double threshold = 0.15 * static_cast<double> (rand ()) / RAND_MAX; // threshold in [0; 0.1]
+ const double threshold =
+ 0.15 * static_cast<double>(rand()) / RAND_MAX; // threshold in [0; 0.1]
// The number of inliers is usually somewhere between 0 and 10
- const auto res_standard = model.countWithinDistanceStandard (model_coefficients, threshold); // Standard
- PCL_DEBUG ("seed=%lu, i=%lu, model=(%f, %f, %f, %f), threshold=%f, res_standard=%lu\n", seed, i,
- model_coefficients(0), model_coefficients(1), model_coefficients(2), model_coefficients(3), threshold, res_standard);
-#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
- const auto res_sse = model.countWithinDistanceSSE (model_coefficients, threshold); // SSE
- ASSERT_EQ (res_standard, res_sse);
+ const auto res_standard =
+ model.countWithinDistanceStandard(model_coefficients, threshold); // Standard
+ PCL_DEBUG(
+ "seed=%lu, i=%lu, model=(%f, %f, %f, %f), threshold=%f, res_standard=%lu\n",
+ seed,
+ i,
+ model_coefficients(0),
+ model_coefficients(1),
+ model_coefficients(2),
+ model_coefficients(3),
+ threshold,
+ res_standard);
+#if defined(__SSE__) && defined(__SSE2__) && defined(__SSE4_1__)
+ const auto res_sse =
+ model.countWithinDistanceSSE(model_coefficients, threshold); // SSE
+ ASSERT_EQ(res_standard, res_sse);
#endif
-#if defined (__AVX__) && defined (__AVX2__)
- const auto res_avx = model.countWithinDistanceAVX (model_coefficients, threshold); // AVX
- ASSERT_EQ (res_standard, res_avx);
+#if defined(__AVX__) && defined(__AVX2__)
+ const auto res_avx =
+ model.countWithinDistanceAVX(model_coefficients, threshold); // AVX
+ ASSERT_EQ(res_standard, res_avx);
#endif
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SampleConsensusModelNormalSphere, RANSAC)
+TEST(SampleConsensusModelNormalSphere, RANSAC)
{
- srand (0);
+ srand(0);
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
- cloud.resize (27); normals.resize (27);
- cloud[ 0].getVector3fMap () << -0.014695f, 0.009549f, 0.954775f;
- cloud[ 1].getVector3fMap () << 0.014695f, 0.009549f, 0.954775f;
- cloud[ 2].getVector3fMap () << -0.014695f, 0.040451f, 0.954775f;
- cloud[ 3].getVector3fMap () << 0.014695f, 0.040451f, 0.954775f;
- cloud[ 4].getVector3fMap () << -0.009082f, -0.015451f, 0.972049f;
- cloud[ 5].getVector3fMap () << 0.009082f, -0.015451f, 0.972049f;
- cloud[ 6].getVector3fMap () << -0.038471f, 0.009549f, 0.972049f;
- cloud[ 7].getVector3fMap () << 0.038471f, 0.009549f, 0.972049f;
- cloud[ 8].getVector3fMap () << -0.038471f, 0.040451f, 0.972049f;
- cloud[ 9].getVector3fMap () << 0.038471f, 0.040451f, 0.972049f;
- cloud[10].getVector3fMap () << -0.009082f, 0.065451f, 0.972049f;
- cloud[11].getVector3fMap () << 0.009082f, 0.065451f, 0.972049f;
- cloud[12].getVector3fMap () << -0.023776f, -0.015451f, 0.982725f;
- cloud[13].getVector3fMap () << 0.023776f, -0.015451f, 0.982725f;
- cloud[14].getVector3fMap () << -0.023776f, 0.065451f, 0.982725f;
- cloud[15].getVector3fMap () << 0.023776f, 0.065451f, 0.982725f;
- cloud[16].getVector3fMap () << -0.000000f, -0.025000f, 1.000000f;
- cloud[17].getVector3fMap () << 0.000000f, -0.025000f, 1.000000f;
- cloud[18].getVector3fMap () << -0.029389f, -0.015451f, 1.000000f;
- cloud[19].getVector3fMap () << 0.029389f, -0.015451f, 1.000000f;
- cloud[20].getVector3fMap () << -0.047553f, 0.009549f, 1.000000f;
- cloud[21].getVector3fMap () << 0.047553f, 0.009549f, 1.000000f;
- cloud[22].getVector3fMap () << -0.047553f, 0.040451f, 1.000000f;
- cloud[23].getVector3fMap () << 0.047553f, 0.040451f, 1.000000f;
- cloud[24].getVector3fMap () << -0.029389f, 0.065451f, 1.000000f;
- cloud[25].getVector3fMap () << 0.029389f, 0.065451f, 1.000000f;
- cloud[26].getVector3fMap () << 0.000000f, 0.075000f, 1.000000f;
-
- normals[ 0].getNormalVector3fMap () << -0.293893f, -0.309017f, -0.904509f;
- normals[ 1].getNormalVector3fMap () << 0.293893f, -0.309017f, -0.904508f;
- normals[ 2].getNormalVector3fMap () << -0.293893f, 0.309017f, -0.904509f;
- normals[ 3].getNormalVector3fMap () << 0.293893f, 0.309017f, -0.904508f;
- normals[ 4].getNormalVector3fMap () << -0.181636f, -0.809017f, -0.559017f;
- normals[ 5].getNormalVector3fMap () << 0.181636f, -0.809017f, -0.559017f;
- normals[ 6].getNormalVector3fMap () << -0.769421f, -0.309017f, -0.559017f;
- normals[ 7].getNormalVector3fMap () << 0.769421f, -0.309017f, -0.559017f;
- normals[ 8].getNormalVector3fMap () << -0.769421f, 0.309017f, -0.559017f;
- normals[ 9].getNormalVector3fMap () << 0.769421f, 0.309017f, -0.559017f;
- normals[10].getNormalVector3fMap () << -0.181636f, 0.809017f, -0.559017f;
- normals[11].getNormalVector3fMap () << 0.181636f, 0.809017f, -0.559017f;
- normals[12].getNormalVector3fMap () << -0.475528f, -0.809017f, -0.345491f;
- normals[13].getNormalVector3fMap () << 0.475528f, -0.809017f, -0.345491f;
- normals[14].getNormalVector3fMap () << -0.475528f, 0.809017f, -0.345491f;
- normals[15].getNormalVector3fMap () << 0.475528f, 0.809017f, -0.345491f;
- normals[16].getNormalVector3fMap () << -0.000000f, -1.000000f, 0.000000f;
- normals[17].getNormalVector3fMap () << 0.000000f, -1.000000f, 0.000000f;
- normals[18].getNormalVector3fMap () << -0.587785f, -0.809017f, 0.000000f;
- normals[19].getNormalVector3fMap () << 0.587785f, -0.809017f, 0.000000f;
- normals[20].getNormalVector3fMap () << -0.951057f, -0.309017f, 0.000000f;
- normals[21].getNormalVector3fMap () << 0.951057f, -0.309017f, 0.000000f;
- normals[22].getNormalVector3fMap () << -0.951057f, 0.309017f, 0.000000f;
- normals[23].getNormalVector3fMap () << 0.951057f, 0.309017f, 0.000000f;
- normals[24].getNormalVector3fMap () << -0.587785f, 0.809017f, 0.000000f;
- normals[25].getNormalVector3fMap () << 0.587785f, 0.809017f, 0.000000f;
- normals[26].getNormalVector3fMap () << 0.000000f, 1.000000f, 0.000000f;
+ cloud.resize(27);
+ normals.resize(27);
+ cloud[0].getVector3fMap() << -0.014695f, 0.009549f, 0.954775f;
+ cloud[1].getVector3fMap() << 0.014695f, 0.009549f, 0.954775f;
+ cloud[2].getVector3fMap() << -0.014695f, 0.040451f, 0.954775f;
+ cloud[3].getVector3fMap() << 0.014695f, 0.040451f, 0.954775f;
+ cloud[4].getVector3fMap() << -0.009082f, -0.015451f, 0.972049f;
+ cloud[5].getVector3fMap() << 0.009082f, -0.015451f, 0.972049f;
+ cloud[6].getVector3fMap() << -0.038471f, 0.009549f, 0.972049f;
+ cloud[7].getVector3fMap() << 0.038471f, 0.009549f, 0.972049f;
+ cloud[8].getVector3fMap() << -0.038471f, 0.040451f, 0.972049f;
+ cloud[9].getVector3fMap() << 0.038471f, 0.040451f, 0.972049f;
+ cloud[10].getVector3fMap() << -0.009082f, 0.065451f, 0.972049f;
+ cloud[11].getVector3fMap() << 0.009082f, 0.065451f, 0.972049f;
+ cloud[12].getVector3fMap() << -0.023776f, -0.015451f, 0.982725f;
+ cloud[13].getVector3fMap() << 0.023776f, -0.015451f, 0.982725f;
+ cloud[14].getVector3fMap() << -0.023776f, 0.065451f, 0.982725f;
+ cloud[15].getVector3fMap() << 0.023776f, 0.065451f, 0.982725f;
+ cloud[16].getVector3fMap() << -0.000000f, -0.025000f, 1.000000f;
+ cloud[17].getVector3fMap() << 0.000000f, -0.025000f, 1.000000f;
+ cloud[18].getVector3fMap() << -0.029389f, -0.015451f, 1.000000f;
+ cloud[19].getVector3fMap() << 0.029389f, -0.015451f, 1.000000f;
+ cloud[20].getVector3fMap() << -0.047553f, 0.009549f, 1.000000f;
+ cloud[21].getVector3fMap() << 0.047553f, 0.009549f, 1.000000f;
+ cloud[22].getVector3fMap() << -0.047553f, 0.040451f, 1.000000f;
+ cloud[23].getVector3fMap() << 0.047553f, 0.040451f, 1.000000f;
+ cloud[24].getVector3fMap() << -0.029389f, 0.065451f, 1.000000f;
+ cloud[25].getVector3fMap() << 0.029389f, 0.065451f, 1.000000f;
+ cloud[26].getVector3fMap() << 0.000000f, 0.075000f, 1.000000f;
+
+ normals[0].getNormalVector3fMap() << -0.293893f, -0.309017f, -0.904509f;
+ normals[1].getNormalVector3fMap() << 0.293893f, -0.309017f, -0.904508f;
+ normals[2].getNormalVector3fMap() << -0.293893f, 0.309017f, -0.904509f;
+ normals[3].getNormalVector3fMap() << 0.293893f, 0.309017f, -0.904508f;
+ normals[4].getNormalVector3fMap() << -0.181636f, -0.809017f, -0.559017f;
+ normals[5].getNormalVector3fMap() << 0.181636f, -0.809017f, -0.559017f;
+ normals[6].getNormalVector3fMap() << -0.769421f, -0.309017f, -0.559017f;
+ normals[7].getNormalVector3fMap() << 0.769421f, -0.309017f, -0.559017f;
+ normals[8].getNormalVector3fMap() << -0.769421f, 0.309017f, -0.559017f;
+ normals[9].getNormalVector3fMap() << 0.769421f, 0.309017f, -0.559017f;
+ normals[10].getNormalVector3fMap() << -0.181636f, 0.809017f, -0.559017f;
+ normals[11].getNormalVector3fMap() << 0.181636f, 0.809017f, -0.559017f;
+ normals[12].getNormalVector3fMap() << -0.475528f, -0.809017f, -0.345491f;
+ normals[13].getNormalVector3fMap() << 0.475528f, -0.809017f, -0.345491f;
+ normals[14].getNormalVector3fMap() << -0.475528f, 0.809017f, -0.345491f;
+ normals[15].getNormalVector3fMap() << 0.475528f, 0.809017f, -0.345491f;
+ normals[16].getNormalVector3fMap() << -0.000000f, -1.000000f, 0.000000f;
+ normals[17].getNormalVector3fMap() << 0.000000f, -1.000000f, 0.000000f;
+ normals[18].getNormalVector3fMap() << -0.587785f, -0.809017f, 0.000000f;
+ normals[19].getNormalVector3fMap() << 0.587785f, -0.809017f, 0.000000f;
+ normals[20].getNormalVector3fMap() << -0.951057f, -0.309017f, 0.000000f;
+ normals[21].getNormalVector3fMap() << 0.951057f, -0.309017f, 0.000000f;
+ normals[22].getNormalVector3fMap() << -0.951057f, 0.309017f, 0.000000f;
+ normals[23].getNormalVector3fMap() << 0.951057f, 0.309017f, 0.000000f;
+ normals[24].getNormalVector3fMap() << -0.587785f, 0.809017f, 0.000000f;
+ normals[25].getNormalVector3fMap() << 0.587785f, 0.809017f, 0.000000f;
+ normals[26].getNormalVector3fMap() << 0.000000f, 1.000000f, 0.000000f;
// Create a shared sphere model pointer directly
- SampleConsensusModelNormalSpherePtr model (new SampleConsensusModelNormalSphere<PointXYZ, Normal> (cloud.makeShared ()));
- model->setInputNormals (normals.makeShared ());
+ SampleConsensusModelNormalSpherePtr model(
+ new SampleConsensusModelNormalSphere<PointXYZ, Normal>(cloud.makeShared()));
+ model->setInputNormals(normals.makeShared());
// Create the RANSAC object
- RandomSampleConsensus<PointXYZ> sac (model, 0.03);
+ RandomSampleConsensus<PointXYZ> sac(model, 0.03);
// Algorithm tests
- bool result = sac.computeModel ();
- ASSERT_TRUE (result);
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
pcl::Indices sample;
- sac.getModel (sample);
- EXPECT_EQ (4, sample.size ());
+ sac.getModel(sample);
+ EXPECT_EQ(4, sample.size());
pcl::Indices inliers;
- sac.getInliers (inliers);
- EXPECT_EQ (27, inliers.size ());
+ sac.getInliers(inliers);
+ EXPECT_EQ(27, inliers.size());
Eigen::VectorXf coeff;
- sac.getModelCoefficients (coeff);
- EXPECT_EQ (4, coeff.size ());
- EXPECT_NEAR (0.000, coeff[0], 1e-2);
- EXPECT_NEAR (0.025, coeff[1], 1e-2);
- EXPECT_NEAR (1.000, coeff[2], 1e-2);
- EXPECT_NEAR (0.050, coeff[3], 1e-2);
+ sac.getModelCoefficients(coeff);
+ EXPECT_EQ(4, coeff.size());
+ EXPECT_NEAR(0.000, coeff[0], 1e-2);
+ EXPECT_NEAR(0.025, coeff[1], 1e-2);
+ EXPECT_NEAR(1.000, coeff[2], 1e-2);
+ EXPECT_NEAR(0.050, coeff[3], 1e-2);
Eigen::VectorXf coeff_refined;
- model->optimizeModelCoefficients (inliers, coeff, coeff_refined);
- EXPECT_EQ (4, coeff_refined.size ());
- EXPECT_NEAR (0.000, coeff_refined[0], 1e-2);
- EXPECT_NEAR (0.025, coeff_refined[1], 1e-2);
- EXPECT_NEAR (1.000, coeff_refined[2], 1e-2);
- EXPECT_NEAR (0.050, coeff_refined[3], 1e-2);
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(4, coeff_refined.size());
+ EXPECT_NEAR(0.000, coeff_refined[0], 1e-2);
+ EXPECT_NEAR(0.025, coeff_refined[1], 1e-2);
+ EXPECT_NEAR(1.000, coeff_refined[2], 1e-2);
+ EXPECT_NEAR(0.050, coeff_refined[3], 1e-2);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SampleConsensusModelCone, RANSAC)
+TEST(SampleConsensusModelCone, RANSAC)
{
- srand (0);
+ srand(0);
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
- cloud.resize (31); normals.resize (31);
-
- cloud[ 0].getVector3fMap () << -0.011247f, 0.200000f, 0.965384f;
- cloud[ 1].getVector3fMap () << 0.000000f, 0.200000f, 0.963603f;
- cloud[ 2].getVector3fMap () << 0.011247f, 0.200000f, 0.965384f;
- cloud[ 3].getVector3fMap () << -0.016045f, 0.175000f, 0.977916f;
- cloud[ 4].getVector3fMap () << -0.008435f, 0.175000f, 0.974038f;
- cloud[ 5].getVector3fMap () << 0.004218f, 0.175000f, 0.973370f;
- cloud[ 6].getVector3fMap () << 0.016045f, 0.175000f, 0.977916f;
- cloud[ 7].getVector3fMap () << -0.025420f, 0.200000f, 0.974580f;
- cloud[ 8].getVector3fMap () << 0.025420f, 0.200000f, 0.974580f;
- cloud[ 9].getVector3fMap () << -0.012710f, 0.150000f, 0.987290f;
- cloud[10].getVector3fMap () << -0.005624f, 0.150000f, 0.982692f;
- cloud[11].getVector3fMap () << 0.002812f, 0.150000f, 0.982247f;
- cloud[12].getVector3fMap () << 0.012710f, 0.150000f, 0.987290f;
- cloud[13].getVector3fMap () << -0.022084f, 0.175000f, 0.983955f;
- cloud[14].getVector3fMap () << 0.022084f, 0.175000f, 0.983955f;
- cloud[15].getVector3fMap () << -0.034616f, 0.200000f, 0.988753f;
- cloud[16].getVector3fMap () << 0.034616f, 0.200000f, 0.988753f;
- cloud[17].getVector3fMap () << -0.006044f, 0.125000f, 0.993956f;
- cloud[18].getVector3fMap () << 0.004835f, 0.125000f, 0.993345f;
- cloud[19].getVector3fMap () << -0.017308f, 0.150000f, 0.994376f;
- cloud[20].getVector3fMap () << 0.017308f, 0.150000f, 0.994376f;
- cloud[21].getVector3fMap () << -0.025962f, 0.175000f, 0.991565f;
- cloud[22].getVector3fMap () << 0.025962f, 0.175000f, 0.991565f;
- cloud[23].getVector3fMap () << -0.009099f, 0.125000f, 1.000000f;
- cloud[24].getVector3fMap () << 0.009099f, 0.125000f, 1.000000f;
- cloud[25].getVector3fMap () << -0.018199f, 0.150000f, 1.000000f;
- cloud[26].getVector3fMap () << 0.018199f, 0.150000f, 1.000000f;
- cloud[27].getVector3fMap () << -0.027298f, 0.175000f, 1.000000f;
- cloud[28].getVector3fMap () << 0.027298f, 0.175000f, 1.000000f;
- cloud[29].getVector3fMap () << -0.036397f, 0.200000f, 1.000000f;
- cloud[30].getVector3fMap () << 0.036397f, 0.200000f, 1.000000f;
-
- normals[ 0].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
- normals[ 1].getNormalVector3fMap () << 0.000000f, -0.342020f, -0.939693f;
- normals[ 2].getNormalVector3fMap () << 0.290381f, -0.342020f, -0.893701f;
- normals[ 3].getNormalVector3fMap () << -0.552338f, -0.342020f, -0.760227f;
- normals[ 4].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
- normals[ 5].getNormalVector3fMap () << 0.145191f, -0.342020f, -0.916697f;
- normals[ 6].getNormalVector3fMap () << 0.552337f, -0.342020f, -0.760227f;
- normals[ 7].getNormalVector3fMap () << -0.656282f, -0.342020f, -0.656283f;
- normals[ 8].getNormalVector3fMap () << 0.656282f, -0.342020f, -0.656283f;
- normals[ 9].getNormalVector3fMap () << -0.656283f, -0.342020f, -0.656282f;
- normals[10].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
- normals[11].getNormalVector3fMap () << 0.145191f, -0.342020f, -0.916697f;
- normals[12].getNormalVector3fMap () << 0.656282f, -0.342020f, -0.656282f;
- normals[13].getNormalVector3fMap () << -0.760228f, -0.342020f, -0.552337f;
- normals[14].getNormalVector3fMap () << 0.760228f, -0.342020f, -0.552337f;
- normals[15].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290380f;
- normals[16].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290380f;
- normals[17].getNormalVector3fMap () << -0.624162f, -0.342020f, -0.624162f;
- normals[18].getNormalVector3fMap () << 0.499329f, -0.342020f, -0.687268f;
- normals[19].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290380f;
- normals[20].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290380f;
- normals[21].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290381f;
- normals[22].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290381f;
- normals[23].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals[24].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
- normals[25].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals[26].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
- normals[27].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals[28].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
- normals[29].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals[30].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
+ cloud.resize(31);
+ normals.resize(31);
+
+ cloud[0].getVector3fMap() << -0.011247f, 0.200000f, 0.965384f;
+ cloud[1].getVector3fMap() << 0.000000f, 0.200000f, 0.963603f;
+ cloud[2].getVector3fMap() << 0.011247f, 0.200000f, 0.965384f;
+ cloud[3].getVector3fMap() << -0.016045f, 0.175000f, 0.977916f;
+ cloud[4].getVector3fMap() << -0.008435f, 0.175000f, 0.974038f;
+ cloud[5].getVector3fMap() << 0.004218f, 0.175000f, 0.973370f;
+ cloud[6].getVector3fMap() << 0.016045f, 0.175000f, 0.977916f;
+ cloud[7].getVector3fMap() << -0.025420f, 0.200000f, 0.974580f;
+ cloud[8].getVector3fMap() << 0.025420f, 0.200000f, 0.974580f;
+ cloud[9].getVector3fMap() << -0.012710f, 0.150000f, 0.987290f;
+ cloud[10].getVector3fMap() << -0.005624f, 0.150000f, 0.982692f;
+ cloud[11].getVector3fMap() << 0.002812f, 0.150000f, 0.982247f;
+ cloud[12].getVector3fMap() << 0.012710f, 0.150000f, 0.987290f;
+ cloud[13].getVector3fMap() << -0.022084f, 0.175000f, 0.983955f;
+ cloud[14].getVector3fMap() << 0.022084f, 0.175000f, 0.983955f;
+ cloud[15].getVector3fMap() << -0.034616f, 0.200000f, 0.988753f;
+ cloud[16].getVector3fMap() << 0.034616f, 0.200000f, 0.988753f;
+ cloud[17].getVector3fMap() << -0.006044f, 0.125000f, 0.993956f;
+ cloud[18].getVector3fMap() << 0.004835f, 0.125000f, 0.993345f;
+ cloud[19].getVector3fMap() << -0.017308f, 0.150000f, 0.994376f;
+ cloud[20].getVector3fMap() << 0.017308f, 0.150000f, 0.994376f;
+ cloud[21].getVector3fMap() << -0.025962f, 0.175000f, 0.991565f;
+ cloud[22].getVector3fMap() << 0.025962f, 0.175000f, 0.991565f;
+ cloud[23].getVector3fMap() << -0.009099f, 0.125000f, 1.000000f;
+ cloud[24].getVector3fMap() << 0.009099f, 0.125000f, 1.000000f;
+ cloud[25].getVector3fMap() << -0.018199f, 0.150000f, 1.000000f;
+ cloud[26].getVector3fMap() << 0.018199f, 0.150000f, 1.000000f;
+ cloud[27].getVector3fMap() << -0.027298f, 0.175000f, 1.000000f;
+ cloud[28].getVector3fMap() << 0.027298f, 0.175000f, 1.000000f;
+ cloud[29].getVector3fMap() << -0.036397f, 0.200000f, 1.000000f;
+ cloud[30].getVector3fMap() << 0.036397f, 0.200000f, 1.000000f;
+
+ normals[0].getNormalVector3fMap() << -0.290381f, -0.342020f, -0.893701f;
+ normals[1].getNormalVector3fMap() << 0.000000f, -0.342020f, -0.939693f;
+ normals[2].getNormalVector3fMap() << 0.290381f, -0.342020f, -0.893701f;
+ normals[3].getNormalVector3fMap() << -0.552338f, -0.342020f, -0.760227f;
+ normals[4].getNormalVector3fMap() << -0.290381f, -0.342020f, -0.893701f;
+ normals[5].getNormalVector3fMap() << 0.145191f, -0.342020f, -0.916697f;
+ normals[6].getNormalVector3fMap() << 0.552337f, -0.342020f, -0.760227f;
+ normals[7].getNormalVector3fMap() << -0.656282f, -0.342020f, -0.656283f;
+ normals[8].getNormalVector3fMap() << 0.656282f, -0.342020f, -0.656283f;
+ normals[9].getNormalVector3fMap() << -0.656283f, -0.342020f, -0.656282f;
+ normals[10].getNormalVector3fMap() << -0.290381f, -0.342020f, -0.893701f;
+ normals[11].getNormalVector3fMap() << 0.145191f, -0.342020f, -0.916697f;
+ normals[12].getNormalVector3fMap() << 0.656282f, -0.342020f, -0.656282f;
+ normals[13].getNormalVector3fMap() << -0.760228f, -0.342020f, -0.552337f;
+ normals[14].getNormalVector3fMap() << 0.760228f, -0.342020f, -0.552337f;
+ normals[15].getNormalVector3fMap() << -0.893701f, -0.342020f, -0.290380f;
+ normals[16].getNormalVector3fMap() << 0.893701f, -0.342020f, -0.290380f;
+ normals[17].getNormalVector3fMap() << -0.624162f, -0.342020f, -0.624162f;
+ normals[18].getNormalVector3fMap() << 0.499329f, -0.342020f, -0.687268f;
+ normals[19].getNormalVector3fMap() << -0.893701f, -0.342020f, -0.290380f;
+ normals[20].getNormalVector3fMap() << 0.893701f, -0.342020f, -0.290380f;
+ normals[21].getNormalVector3fMap() << -0.893701f, -0.342020f, -0.290381f;
+ normals[22].getNormalVector3fMap() << 0.893701f, -0.342020f, -0.290381f;
+ normals[23].getNormalVector3fMap() << -0.939693f, -0.342020f, 0.000000f;
+ normals[24].getNormalVector3fMap() << 0.939693f, -0.342020f, 0.000000f;
+ normals[25].getNormalVector3fMap() << -0.939693f, -0.342020f, 0.000000f;
+ normals[26].getNormalVector3fMap() << 0.939693f, -0.342020f, 0.000000f;
+ normals[27].getNormalVector3fMap() << -0.939693f, -0.342020f, 0.000000f;
+ normals[28].getNormalVector3fMap() << 0.939693f, -0.342020f, 0.000000f;
+ normals[29].getNormalVector3fMap() << -0.939693f, -0.342020f, 0.000000f;
+ normals[30].getNormalVector3fMap() << 0.939693f, -0.342020f, 0.000000f;
// Create a shared cylinder model pointer directly
- SampleConsensusModelConePtr model (new SampleConsensusModelCone<PointXYZ, Normal> (cloud.makeShared ()));
- model->setInputNormals (normals.makeShared ());
+ SampleConsensusModelConePtr model(
+ new SampleConsensusModelCone<PointXYZ, Normal>(cloud.makeShared()));
+ model->setInputNormals(normals.makeShared());
// Create the RANSAC object
- RandomSampleConsensus<PointXYZ> sac (model, 0.03);
+ RandomSampleConsensus<PointXYZ> sac(model, 0.03);
// Algorithm tests
- bool result = sac.computeModel ();
- ASSERT_TRUE (result);
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
pcl::Indices sample;
- sac.getModel (sample);
- EXPECT_EQ (3, sample.size ());
+ sac.getModel(sample);
+ EXPECT_EQ(3, sample.size());
pcl::Indices inliers;
- sac.getInliers (inliers);
- EXPECT_EQ (31, inliers.size ());
+ sac.getInliers(inliers);
+ EXPECT_EQ(31, inliers.size());
Eigen::VectorXf coeff;
- sac.getModelCoefficients (coeff);
- EXPECT_EQ (7, coeff.size ());
- EXPECT_NEAR (0.000000, coeff[0], 1e-2);
- EXPECT_NEAR (0.100000, coeff[1], 1e-2);
- EXPECT_NEAR (0.349066, coeff[6], 1e-2);
+ sac.getModelCoefficients(coeff);
+ EXPECT_EQ(7, coeff.size());
+ EXPECT_NEAR(0.000000, coeff[0], 1e-2);
+ EXPECT_NEAR(0.100000, coeff[1], 1e-2);
+ EXPECT_NEAR(0.349066, coeff[6], 1e-2);
Eigen::VectorXf coeff_refined;
- model->optimizeModelCoefficients (inliers, coeff, coeff_refined);
- EXPECT_EQ (7, coeff_refined.size ());
- EXPECT_NEAR (0.349066, coeff_refined[6], 1e-2);
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(7, coeff_refined.size());
+ EXPECT_NEAR(0.349066, coeff_refined[6], 1e-2);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SampleConsensusModelCylinder, RANSAC)
+TEST(SampleConsensusModelCylinder, RANSAC)
{
- srand (0);
+ srand(0);
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
- cloud.resize (20); normals.resize (20);
-
- cloud[ 0].getVector3fMap () << -0.499902f, 2.199701f, 0.000008f;
- cloud[ 1].getVector3fMap () << -0.875397f, 2.030177f, 0.050104f;
- cloud[ 2].getVector3fMap () << -0.995875f, 1.635973f, 0.099846f;
- cloud[ 3].getVector3fMap () << -0.779523f, 1.285527f, 0.149961f;
- cloud[ 4].getVector3fMap () << -0.373285f, 1.216488f, 0.199959f;
- cloud[ 5].getVector3fMap () << -0.052893f, 1.475973f, 0.250101f;
- cloud[ 6].getVector3fMap () << -0.036558f, 1.887591f, 0.299839f;
- cloud[ 7].getVector3fMap () << -0.335048f, 2.171994f, 0.350001f;
- cloud[ 8].getVector3fMap () << -0.745456f, 2.135528f, 0.400072f;
- cloud[ 9].getVector3fMap () << -0.989282f, 1.803311f, 0.449983f;
- cloud[10].getVector3fMap () << -0.900651f, 1.400701f, 0.500126f;
- cloud[11].getVector3fMap () << -0.539658f, 1.201468f, 0.550079f;
- cloud[12].getVector3fMap () << -0.151875f, 1.340951f, 0.599983f;
- cloud[13].getVector3fMap () << -0.000724f, 1.724373f, 0.649882f;
- cloud[14].getVector3fMap () << -0.188573f, 2.090983f, 0.699854f;
- cloud[15].getVector3fMap () << -0.587925f, 2.192257f, 0.749956f;
- cloud[16].getVector3fMap () << -0.927724f, 1.958846f, 0.800008f;
- cloud[17].getVector3fMap () << -0.976888f, 1.549655f, 0.849970f;
- cloud[18].getVector3fMap () << -0.702003f, 1.242707f, 0.899954f;
- cloud[19].getVector3fMap () << -0.289916f, 1.246296f, 0.950075f;
-
- normals[ 0].getNormalVector3fMap () << 0.000098f, 1.000098f, 0.000008f;
- normals[ 1].getNormalVector3fMap () << -0.750891f, 0.660413f, 0.000104f;
- normals[ 2].getNormalVector3fMap () << -0.991765f, -0.127949f, -0.000154f;
- normals[ 3].getNormalVector3fMap () << -0.558918f, -0.829439f, -0.000039f;
- normals[ 4].getNormalVector3fMap () << 0.253627f, -0.967447f, -0.000041f;
- normals[ 5].getNormalVector3fMap () << 0.894105f, -0.447965f, 0.000101f;
- normals[ 6].getNormalVector3fMap () << 0.926852f, 0.375543f, -0.000161f;
- normals[ 7].getNormalVector3fMap () << 0.329948f, 0.943941f, 0.000001f;
- normals[ 8].getNormalVector3fMap () << -0.490966f, 0.871203f, 0.000072f;
- normals[ 9].getNormalVector3fMap () << -0.978507f, 0.206425f, -0.000017f;
- normals[10].getNormalVector3fMap () << -0.801227f, -0.598534f, 0.000126f;
- normals[11].getNormalVector3fMap () << -0.079447f, -0.996697f, 0.000079f;
- normals[12].getNormalVector3fMap () << 0.696154f, -0.717889f, -0.000017f;
- normals[13].getNormalVector3fMap () << 0.998685f, 0.048502f, -0.000118f;
- normals[14].getNormalVector3fMap () << 0.622933f, 0.782133f, -0.000146f;
- normals[15].getNormalVector3fMap () << -0.175948f, 0.984480f, -0.000044f;
- normals[16].getNormalVector3fMap () << -0.855476f, 0.517824f, 0.000008f;
- normals[17].getNormalVector3fMap () << -0.953769f, -0.300571f, -0.000030f;
- normals[18].getNormalVector3fMap () << -0.404035f, -0.914700f, -0.000046f;
- normals[19].getNormalVector3fMap () << 0.420154f, -0.907445f, 0.000075f;
+ cloud.resize(20);
+ normals.resize(20);
+
+ cloud[0].getVector3fMap() << -0.499902f, 2.199701f, 0.000008f;
+ cloud[1].getVector3fMap() << -0.875397f, 2.030177f, 0.050104f;
+ cloud[2].getVector3fMap() << -0.995875f, 1.635973f, 0.099846f;
+ cloud[3].getVector3fMap() << -0.779523f, 1.285527f, 0.149961f;
+ cloud[4].getVector3fMap() << -0.373285f, 1.216488f, 0.199959f;
+ cloud[5].getVector3fMap() << -0.052893f, 1.475973f, 0.250101f;
+ cloud[6].getVector3fMap() << -0.036558f, 1.887591f, 0.299839f;
+ cloud[7].getVector3fMap() << -0.335048f, 2.171994f, 0.350001f;
+ cloud[8].getVector3fMap() << -0.745456f, 2.135528f, 0.400072f;
+ cloud[9].getVector3fMap() << -0.989282f, 1.803311f, 0.449983f;
+ cloud[10].getVector3fMap() << -0.900651f, 1.400701f, 0.500126f;
+ cloud[11].getVector3fMap() << -0.539658f, 1.201468f, 0.550079f;
+ cloud[12].getVector3fMap() << -0.151875f, 1.340951f, 0.599983f;
+ cloud[13].getVector3fMap() << -0.000724f, 1.724373f, 0.649882f;
+ cloud[14].getVector3fMap() << -0.188573f, 2.090983f, 0.699854f;
+ cloud[15].getVector3fMap() << -0.587925f, 2.192257f, 0.749956f;
+ cloud[16].getVector3fMap() << -0.927724f, 1.958846f, 0.800008f;
+ cloud[17].getVector3fMap() << -0.976888f, 1.549655f, 0.849970f;
+ cloud[18].getVector3fMap() << -0.702003f, 1.242707f, 0.899954f;
+ cloud[19].getVector3fMap() << -0.289916f, 1.246296f, 0.950075f;
+
+ normals[0].getNormalVector3fMap() << 0.000098f, 1.000098f, 0.000008f;
+ normals[1].getNormalVector3fMap() << -0.750891f, 0.660413f, 0.000104f;
+ normals[2].getNormalVector3fMap() << -0.991765f, -0.127949f, -0.000154f;
+ normals[3].getNormalVector3fMap() << -0.558918f, -0.829439f, -0.000039f;
+ normals[4].getNormalVector3fMap() << 0.253627f, -0.967447f, -0.000041f;
+ normals[5].getNormalVector3fMap() << 0.894105f, -0.447965f, 0.000101f;
+ normals[6].getNormalVector3fMap() << 0.926852f, 0.375543f, -0.000161f;
+ normals[7].getNormalVector3fMap() << 0.329948f, 0.943941f, 0.000001f;
+ normals[8].getNormalVector3fMap() << -0.490966f, 0.871203f, 0.000072f;
+ normals[9].getNormalVector3fMap() << -0.978507f, 0.206425f, -0.000017f;
+ normals[10].getNormalVector3fMap() << -0.801227f, -0.598534f, 0.000126f;
+ normals[11].getNormalVector3fMap() << -0.079447f, -0.996697f, 0.000079f;
+ normals[12].getNormalVector3fMap() << 0.696154f, -0.717889f, -0.000017f;
+ normals[13].getNormalVector3fMap() << 0.998685f, 0.048502f, -0.000118f;
+ normals[14].getNormalVector3fMap() << 0.622933f, 0.782133f, -0.000146f;
+ normals[15].getNormalVector3fMap() << -0.175948f, 0.984480f, -0.000044f;
+ normals[16].getNormalVector3fMap() << -0.855476f, 0.517824f, 0.000008f;
+ normals[17].getNormalVector3fMap() << -0.953769f, -0.300571f, -0.000030f;
+ normals[18].getNormalVector3fMap() << -0.404035f, -0.914700f, -0.000046f;
+ normals[19].getNormalVector3fMap() << 0.420154f, -0.907445f, 0.000075f;
// Create a shared cylinder model pointer directly
- SampleConsensusModelCylinderPtr model (new SampleConsensusModelCylinder<PointXYZ, Normal> (cloud.makeShared ()));
- model->setInputNormals (normals.makeShared ());
+ SampleConsensusModelCylinderPtr model(
+ new SampleConsensusModelCylinder<PointXYZ, Normal>(cloud.makeShared()));
+ model->setInputNormals(normals.makeShared());
// Create the RANSAC object
- RandomSampleConsensus<PointXYZ> sac (model, 0.03);
+ RandomSampleConsensus<PointXYZ> sac(model, 0.03);
// Algorithm tests
- bool result = sac.computeModel ();
- ASSERT_TRUE (result);
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
pcl::Indices sample;
- sac.getModel (sample);
- EXPECT_EQ (2, sample.size ());
+ sac.getModel(sample);
+ EXPECT_EQ(2, sample.size());
pcl::Indices inliers;
- sac.getInliers (inliers);
- EXPECT_EQ (20, inliers.size ());
+ sac.getInliers(inliers);
+ EXPECT_EQ(20, inliers.size());
Eigen::VectorXf coeff;
- sac.getModelCoefficients (coeff);
- EXPECT_EQ (7, coeff.size ());
- EXPECT_NEAR (-0.5, coeff[0], 1e-3);
- EXPECT_NEAR ( 1.7, coeff[1], 1e-3);
- EXPECT_NEAR ( 0.5, coeff[6], 1e-3);
+ sac.getModelCoefficients(coeff);
+ EXPECT_EQ(7, coeff.size());
+ EXPECT_NEAR(-0.5, coeff[0], 1e-3);
+ EXPECT_NEAR(1.7, coeff[1], 1e-3);
+ EXPECT_NEAR(0.5, coeff[6], 1e-3);
Eigen::VectorXf coeff_refined;
- model->optimizeModelCoefficients (inliers, coeff, coeff_refined);
- EXPECT_EQ (7, coeff_refined.size ());
- EXPECT_NEAR (0.5, coeff_refined[6], 1e-3);
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(7, coeff_refined.size());
+ EXPECT_NEAR(0.5, coeff_refined[6], 1e-3);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SampleConsensusModelCircle2D, RANSAC)
+TEST(SampleConsensusModelCircle2D, RANSAC)
{
- srand (0);
+ srand(0);
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
- cloud.resize (18);
-
- cloud[ 0].getVector3fMap () << 3.587751f, -4.190982f, 0.0f;
- cloud[ 1].getVector3fMap () << 3.808883f, -4.412265f, 0.0f;
- cloud[ 2].getVector3fMap () << 3.587525f, -5.809143f, 0.0f;
- cloud[ 3].getVector3fMap () << 2.999913f, -5.999980f, 0.0f;
- cloud[ 4].getVector3fMap () << 2.412224f, -5.809090f, 0.0f;
- cloud[ 5].getVector3fMap () << 2.191080f, -5.587682f, 0.0f;
- cloud[ 6].getVector3fMap () << 2.048941f, -5.309003f, 0.0f;
- cloud[ 7].getVector3fMap () << 2.000397f, -4.999944f, 0.0f;
- cloud[ 8].getVector3fMap () << 2.999953f, -6.000056f, 0.0f;
- cloud[ 9].getVector3fMap () << 2.691127f, -5.951136f, 0.0f;
- cloud[10].getVector3fMap () << 2.190892f, -5.587838f, 0.0f;
- cloud[11].getVector3fMap () << 2.048874f, -5.309052f, 0.0f;
- cloud[12].getVector3fMap () << 1.999990f, -5.000147f, 0.0f;
- cloud[13].getVector3fMap () << 2.049026f, -4.690918f, 0.0f;
- cloud[14].getVector3fMap () << 2.190956f, -4.412162f, 0.0f;
- cloud[15].getVector3fMap () << 2.412231f, -4.190918f, 0.0f;
- cloud[16].getVector3fMap () << 2.691027f, -4.049060f, 0.0f;
- cloud[17].getVector3fMap () << 2.000000f, -3.000000f, 0.0f;
+ cloud.resize(18);
+
+ cloud[0].getVector3fMap() << 3.587751f, -4.190982f, 0.0f;
+ cloud[1].getVector3fMap() << 3.808883f, -4.412265f, 0.0f;
+ cloud[2].getVector3fMap() << 3.587525f, -5.809143f, 0.0f;
+ cloud[3].getVector3fMap() << 2.999913f, -5.999980f, 0.0f;
+ cloud[4].getVector3fMap() << 2.412224f, -5.809090f, 0.0f;
+ cloud[5].getVector3fMap() << 2.191080f, -5.587682f, 0.0f;
+ cloud[6].getVector3fMap() << 2.048941f, -5.309003f, 0.0f;
+ cloud[7].getVector3fMap() << 2.000397f, -4.999944f, 0.0f;
+ cloud[8].getVector3fMap() << 2.999953f, -6.000056f, 0.0f;
+ cloud[9].getVector3fMap() << 2.691127f, -5.951136f, 0.0f;
+ cloud[10].getVector3fMap() << 2.190892f, -5.587838f, 0.0f;
+ cloud[11].getVector3fMap() << 2.048874f, -5.309052f, 0.0f;
+ cloud[12].getVector3fMap() << 1.999990f, -5.000147f, 0.0f;
+ cloud[13].getVector3fMap() << 2.049026f, -4.690918f, 0.0f;
+ cloud[14].getVector3fMap() << 2.190956f, -4.412162f, 0.0f;
+ cloud[15].getVector3fMap() << 2.412231f, -4.190918f, 0.0f;
+ cloud[16].getVector3fMap() << 2.691027f, -4.049060f, 0.0f;
+ cloud[17].getVector3fMap() << 2.000000f, -3.000000f, 0.0f;
// Create a shared 2d circle model pointer directly
- SampleConsensusModelCircle2DPtr model (new SampleConsensusModelCircle2D<PointXYZ> (cloud.makeShared ()));
+ SampleConsensusModelCircle2DPtr model(
+ new SampleConsensusModelCircle2D<PointXYZ>(cloud.makeShared()));
// Create the RANSAC object
- RandomSampleConsensus<PointXYZ> sac (model, 0.03);
+ RandomSampleConsensus<PointXYZ> sac(model, 0.03);
// Algorithm tests
- bool result = sac.computeModel ();
- ASSERT_TRUE (result);
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
pcl::Indices sample;
- sac.getModel (sample);
- EXPECT_EQ (3, sample.size ());
+ sac.getModel(sample);
+ EXPECT_EQ(3, sample.size());
pcl::Indices inliers;
- sac.getInliers (inliers);
- EXPECT_EQ (17, inliers.size ());
+ sac.getInliers(inliers);
+ EXPECT_EQ(17, inliers.size());
Eigen::VectorXf coeff;
- sac.getModelCoefficients (coeff);
- EXPECT_EQ (3, coeff.size ());
- EXPECT_NEAR ( 3, coeff[0], 1e-3);
- EXPECT_NEAR (-5, coeff[1], 1e-3);
- EXPECT_NEAR ( 1, coeff[2], 1e-3);
+ sac.getModelCoefficients(coeff);
+ EXPECT_EQ(3, coeff.size());
+ EXPECT_NEAR(3, coeff[0], 1e-3);
+ EXPECT_NEAR(-5, coeff[1], 1e-3);
+ EXPECT_NEAR(1, coeff[2], 1e-3);
Eigen::VectorXf coeff_refined;
- model->optimizeModelCoefficients (inliers, coeff, coeff_refined);
- EXPECT_EQ (3, coeff_refined.size ());
- EXPECT_NEAR ( 3, coeff_refined[0], 1e-3);
- EXPECT_NEAR (-5, coeff_refined[1], 1e-3);
- EXPECT_NEAR ( 1, coeff_refined[2], 1e-3);
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(3, coeff_refined.size());
+ EXPECT_NEAR(3, coeff_refined[0], 1e-3);
+ EXPECT_NEAR(-5, coeff_refined[1], 1e-3);
+ EXPECT_NEAR(1, coeff_refined[2], 1e-3);
}
///////////////////////////////////////////////////////////////////////////////
-TEST (SampleConsensusModelCircle2D, SampleValidationPointsValid)
+TEST(SampleConsensusModelCircle2D, SampleValidationPointsValid)
{
PointCloud<PointXYZ> cloud;
- cloud.resize (3);
+ cloud.resize(3);
- cloud[0].getVector3fMap () << 1.0f, 2.0f, 0.0f;
- cloud[1].getVector3fMap () << 0.0f, 1.0f, 0.0f;
- cloud[2].getVector3fMap () << 1.5f, 0.0f, 0.0f;
+ cloud[0].getVector3fMap() << 1.0f, 2.0f, 0.0f;
+ cloud[1].getVector3fMap() << 0.0f, 1.0f, 0.0f;
+ cloud[2].getVector3fMap() << 1.5f, 0.0f, 0.0f;
// Create a shared line model pointer directly
- SampleConsensusModelCircle2DPtr model (
- new SampleConsensusModelCircle2D<PointXYZ> (cloud.makeShared ()));
+ SampleConsensusModelCircle2DPtr model(
+ new SampleConsensusModelCircle2D<PointXYZ>(cloud.makeShared()));
// Algorithm tests
pcl::Indices samples;
int iterations = 0;
model->getSamples(iterations, samples);
- EXPECT_EQ (samples.size(), 3);
+ EXPECT_EQ(samples.size(), 3);
Eigen::VectorXf modelCoefficients;
- EXPECT_TRUE (model->computeModelCoefficients (samples, modelCoefficients));
+ EXPECT_TRUE(model->computeModelCoefficients(samples, modelCoefficients));
- EXPECT_NEAR (modelCoefficients[0], 1.05f, 1e-3); // center x
- EXPECT_NEAR (modelCoefficients[1], 0.95f, 1e-3); // center y
- EXPECT_NEAR (modelCoefficients[2], std::sqrt (1.105f), 1e-3); // radius
+ EXPECT_NEAR(modelCoefficients[0], 1.05f, 1e-3); // center x
+ EXPECT_NEAR(modelCoefficients[1], 0.95f, 1e-3); // center y
+ EXPECT_NEAR(modelCoefficients[2], std::sqrt(1.105f), 1e-3); // radius
}
using PointVector = std::vector<PointXYZ>;
class SampleConsensusModelCircle2DCollinearTest
- : public testing::TestWithParam<PointVector> {
- protected:
- void SetUp() override {
- pointCloud_ = make_shared<PointCloud<PointXYZ>>();
- for(const auto& point : GetParam()) {
- pointCloud_->emplace_back(point);
- }
+: public testing::TestWithParam<PointVector> {
+protected:
+ void
+ SetUp() override
+ {
+ pointCloud_ = make_shared<PointCloud<PointXYZ>>();
+ for (const auto& point : GetParam()) {
+ pointCloud_->emplace_back(point);
}
+ }
- PointCloud<PointXYZ>::Ptr pointCloud_ = nullptr;
+ PointCloud<PointXYZ>::Ptr pointCloud_ = nullptr;
};
// Parametric tests clearly show the input for which they failed and provide
// clearer feedback if something is changed in the future.
-TEST_P (SampleConsensusModelCircle2DCollinearTest, SampleValidationPointsInvalid)
+TEST_P(SampleConsensusModelCircle2DCollinearTest, SampleValidationPointsInvalid)
{
- ASSERT_NE (pointCloud_, nullptr);
+ ASSERT_NE(pointCloud_, nullptr);
- SampleConsensusModelCircle2DPtr model (
- new SampleConsensusModelCircle2D<PointXYZ> (pointCloud_));
- ASSERT_GE (model->getInputCloud ()->size (), model->getSampleSize ());
+ SampleConsensusModelCircle2DPtr model(
+ new SampleConsensusModelCircle2D<PointXYZ>(pointCloud_));
+ ASSERT_GE(model->getInputCloud()->size(), model->getSampleSize());
// Algorithm tests
// (Maybe) TODO: try to implement the "cheat point" trick from
// Explicitly enforce sample order so they can act as designed
pcl::Indices forcedSamples = {0, 1, 2};
Eigen::VectorXf modelCoefficients;
- EXPECT_FALSE (model->computeModelCoefficients (forcedSamples, modelCoefficients));
+ EXPECT_FALSE(model->computeModelCoefficients(forcedSamples, modelCoefficients));
}
-INSTANTIATE_TEST_SUITE_P (CollinearInputs, SampleConsensusModelCircle2DCollinearTest,
- testing::Values( // Throw in some negative coordinates and "asymmetric" points to cover more cases
- PointVector {{ 0.0f, 0.0f, 0.0f}, { 1.0f, 0.0f, 0.0f}, { 2.0f, 0.0f, 0.0f}}, // collinear along x-axis
- PointVector {{-1.0f, 0.0f, 0.0f}, { 1.0f, 0.0f, 0.0f}, { 2.0f, 0.0f, 0.0f}},
- PointVector {{ 0.0f, -1.0f, 0.0f}, { 0.0f, 1.0f, 0.0f}, { 0.0f, 2.0f, 0.0f}}, // collinear along y-axis
- PointVector {{ 0.0f, -1.0f, 0.0f}, { 0.0f, 1.0f, 0.0f}, { 0.0f, 2.0f, 0.0f}},
- PointVector {{ 0.0f, 0.0f, 0.0f}, { 1.0f, 1.0f, 0.0f}, { 2.0f, 2.0f, 0.0f}}, // collinear along diagonal
- PointVector {{ 0.0f, 0.0f, 0.0f}, {-1.0f, -1.0f, 0.0f}, {-2.0f, -2.0f, 0.0f}},
- PointVector {{-3.0f, -6.5f, 0.0f}, {-2.0f, -0.5f, 0.0f}, {-1.5f, 2.5f, 0.0f}}, // other collinear input
- PointVector {{ 2.0f, 2.0f, 0.0f}, { 0.0f, 0.0f, 0.0f}, { 0.0f, 0.0f, 0.0f}}, // two points equal
- PointVector {{ 0.0f, 0.0f, 0.0f}, { 2.0f, 2.0f, 0.0f}, { 0.0f, 0.0f, 0.0f}},
- PointVector {{ 0.0f, 0.0f, 0.0f}, { 0.0f, 0.0f, 0.0f}, { 2.0f, 2.0f, 0.0f}},
- PointVector {{ 1.0f, 1.0f, 0.0f}, { 1.0f, 1.0f, 0.0f}, { 1.0f, 1.0f, 0.0f}} // all points equal
- )
-);
+INSTANTIATE_TEST_SUITE_P(
+ CollinearInputs,
+ SampleConsensusModelCircle2DCollinearTest,
+ testing::Values( // Throw in some negative coordinates and "asymmetric" points to
+ // cover more cases
+ PointVector{{0.0f, 0.0f, 0.0f},
+ {1.0f, 0.0f, 0.0f},
+ {2.0f, 0.0f, 0.0f}}, // collinear along x-axis
+ PointVector{{-1.0f, 0.0f, 0.0f}, {1.0f, 0.0f, 0.0f}, {2.0f, 0.0f, 0.0f}},
+ PointVector{{0.0f, -1.0f, 0.0f},
+ {0.0f, 1.0f, 0.0f},
+ {0.0f, 2.0f, 0.0f}}, // collinear along y-axis
+ PointVector{{0.0f, -1.0f, 0.0f}, {0.0f, 1.0f, 0.0f}, {0.0f, 2.0f, 0.0f}},
+ PointVector{{0.0f, 0.0f, 0.0f},
+ {1.0f, 1.0f, 0.0f},
+ {2.0f, 2.0f, 0.0f}}, // collinear along diagonal
+ PointVector{{0.0f, 0.0f, 0.0f}, {-1.0f, -1.0f, 0.0f}, {-2.0f, -2.0f, 0.0f}},
+ PointVector{{-3.0f, -6.5f, 0.0f},
+ {-2.0f, -0.5f, 0.0f},
+ {-1.5f, 2.5f, 0.0f}}, // other collinear input
+ PointVector{{2.0f, 2.0f, 0.0f},
+ {0.0f, 0.0f, 0.0f},
+ {0.0f, 0.0f, 0.0f}}, // two points equal
+ PointVector{{0.0f, 0.0f, 0.0f}, {2.0f, 2.0f, 0.0f}, {0.0f, 0.0f, 0.0f}},
+ PointVector{{0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f}, {2.0f, 2.0f, 0.0f}},
+ PointVector{{1.0f, 1.0f, 0.0f}, {1.0f, 1.0f, 0.0f}, {1.0f, 1.0f, 0.0f}}
+ // all points equal
+ ));
//////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT>
-class SampleConsensusModelCircle2DTest : private SampleConsensusModelCircle2D<PointT>
-{
- public:
- using SampleConsensusModelCircle2D<PointT>::SampleConsensusModelCircle2D;
- using SampleConsensusModelCircle2D<PointT>::countWithinDistanceStandard;
-#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
- using SampleConsensusModelCircle2D<PointT>::countWithinDistanceSSE;
+class SampleConsensusModelCircle2DTest : private SampleConsensusModelCircle2D<PointT> {
+public:
+ using SampleConsensusModelCircle2D<PointT>::SampleConsensusModelCircle2D;
+ using SampleConsensusModelCircle2D<PointT>::countWithinDistanceStandard;
+#if defined(__SSE__) && defined(__SSE2__) && defined(__SSE4_1__)
+ using SampleConsensusModelCircle2D<PointT>::countWithinDistanceSSE;
#endif
-#if defined (__AVX__) && defined (__AVX2__)
- using SampleConsensusModelCircle2D<PointT>::countWithinDistanceAVX;
+#if defined(__AVX__) && defined(__AVX2__)
+ using SampleConsensusModelCircle2D<PointT>::countWithinDistanceAVX;
#endif
};
-TEST (SampleConsensusModelCircle2D, SIMD_countWithinDistance) // Test if all countWithinDistance implementations return the same value
+TEST(SampleConsensusModelCircle2D,
+ SIMD_countWithinDistance) // Test if all countWithinDistance implementations return
+ // the same value
{
- const auto seed = static_cast<unsigned> (std::time (nullptr));
- srand (seed);
- for (size_t i=0; i<100; i++) // Run as often as you like
+ const auto seed = static_cast<unsigned>(std::time(nullptr));
+ srand(seed);
+ for (size_t i = 0; i < 100; i++) // Run as often as you like
{
// Generate a cloud with 1000 random points
PointCloud<PointXYZ> cloud;
pcl::Indices indices;
- cloud.resize (1000);
- for (std::size_t idx = 0; idx < cloud.size (); ++idx)
- {
- cloud[idx].x = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
- cloud[idx].y = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
- cloud[idx].z = 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0;
- if (rand () % 2 == 0)
- {
- indices.push_back (static_cast<int> (idx));
+ cloud.resize(1000);
+ for (std::size_t idx = 0; idx < cloud.size(); ++idx) {
+ cloud[idx].x = 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0;
+ cloud[idx].y = 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0;
+ cloud[idx].z = 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0;
+ if (rand() % 2 == 0) {
+ indices.push_back(static_cast<int>(idx));
}
}
- SampleConsensusModelCircle2DTest<PointXYZ> model (cloud.makeShared (), indices, true);
+ SampleConsensusModelCircle2DTest<PointXYZ> model(cloud.makeShared(), indices, true);
// Generate random circle model parameters
Eigen::VectorXf model_coefficients(3);
- model_coefficients << 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
- 2.0 * static_cast<float> (rand ()) / RAND_MAX - 1.0,
- 0.1 * static_cast<float> (rand ()) / RAND_MAX; // center and radius
+ model_coefficients << 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0,
+ 2.0 * static_cast<float>(rand()) / RAND_MAX - 1.0,
+ 0.1 * static_cast<float>(rand()) / RAND_MAX; // center and radius
- const double threshold = 0.1 * static_cast<double> (rand ()) / RAND_MAX; // threshold in [0; 0.1]
+ const double threshold =
+ 0.1 * static_cast<double>(rand()) / RAND_MAX; // threshold in [0; 0.1]
// The number of inliers is usually somewhere between 0 and 20
- const auto res_standard = model.countWithinDistanceStandard (model_coefficients, threshold); // Standard
- PCL_DEBUG ("seed=%lu, i=%lu, model=(%f, %f, %f), threshold=%f, res_standard=%lu\n", seed, i,
- model_coefficients(0), model_coefficients(1), model_coefficients(2), threshold, res_standard);
-#if defined (__SSE__) && defined (__SSE2__) && defined (__SSE4_1__)
- const auto res_sse = model.countWithinDistanceSSE (model_coefficients, threshold); // SSE
- ASSERT_EQ (res_standard, res_sse);
+ const auto res_standard =
+ model.countWithinDistanceStandard(model_coefficients, threshold); // Standard
+ PCL_DEBUG("seed=%lu, i=%lu, model=(%f, %f, %f), threshold=%f, res_standard=%lu\n",
+ seed,
+ i,
+ model_coefficients(0),
+ model_coefficients(1),
+ model_coefficients(2),
+ threshold,
+ res_standard);
+#if defined(__SSE__) && defined(__SSE2__) && defined(__SSE4_1__)
+ const auto res_sse =
+ model.countWithinDistanceSSE(model_coefficients, threshold); // SSE
+ ASSERT_EQ(res_standard, res_sse);
#endif
-#if defined (__AVX__) && defined (__AVX2__)
- const auto res_avx = model.countWithinDistanceAVX (model_coefficients, threshold); // AVX
- ASSERT_EQ (res_standard, res_avx);
+#if defined(__AVX__) && defined(__AVX2__)
+ const auto res_avx =
+ model.countWithinDistanceAVX(model_coefficients, threshold); // AVX
+ ASSERT_EQ(res_standard, res_avx);
#endif
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SampleConsensusModelCircle3D, RANSAC)
+TEST(SampleConsensusModelCircle3D, RANSAC)
{
- srand (0);
+ srand(0);
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
- cloud.resize (20);
-
- cloud[ 0].getVector3fMap () << 1.00000000f, 5.0000000f, -2.9000001f;
- cloud[ 1].getVector3fMap () << 1.03420200f, 5.0000000f, -2.9060307f;
- cloud[ 2].getVector3fMap () << 1.06427870f, 5.0000000f, -2.9233956f;
- cloud[ 3].getVector3fMap () << 1.08660260f, 5.0000000f, -2.9500000f;
- cloud[ 4].getVector3fMap () << 1.09848080f, 5.0000000f, -2.9826353f;
- cloud[ 5].getVector3fMap () << 1.09848080f, 5.0000000f, -3.0173647f;
- cloud[ 6].getVector3fMap () << 1.08660260f, 5.0000000f, -3.0500000f;
- cloud[ 7].getVector3fMap () << 1.06427870f, 5.0000000f, -3.0766044f;
- cloud[ 8].getVector3fMap () << 1.03420200f, 5.0000000f, -3.0939693f;
- cloud[ 9].getVector3fMap () << 1.00000000f, 5.0000000f, -3.0999999f;
- cloud[10].getVector3fMap () << 0.96579796f, 5.0000000f, -3.0939693f;
- cloud[11].getVector3fMap () << 0.93572122f, 5.0000000f, -3.0766044f;
- cloud[12].getVector3fMap () << 0.91339743f, 5.0000000f, -3.0500000f;
- cloud[13].getVector3fMap () << 0.90151924f, 5.0000000f, -3.0173647f;
- cloud[14].getVector3fMap () << 0.90151924f, 5.0000000f, -2.9826353f;
- cloud[15].getVector3fMap () << 0.91339743f, 5.0000000f, -2.9500000f;
- cloud[16].getVector3fMap () << 0.93572122f, 5.0000000f, -2.9233956f;
- cloud[17].getVector3fMap () << 0.96579796f, 5.0000000f, -2.9060307f;
- cloud[18].getVector3fMap () << 0.85000002f, 4.8499999f, -3.1500001f;
- cloud[19].getVector3fMap () << 1.15000000f, 5.1500001f, -2.8499999f;
+ cloud.resize(20);
+
+ cloud[0].getVector3fMap() << 1.00000000f, 5.0000000f, -2.9000001f;
+ cloud[1].getVector3fMap() << 1.03420200f, 5.0000000f, -2.9060307f;
+ cloud[2].getVector3fMap() << 1.06427870f, 5.0000000f, -2.9233956f;
+ cloud[3].getVector3fMap() << 1.08660260f, 5.0000000f, -2.9500000f;
+ cloud[4].getVector3fMap() << 1.09848080f, 5.0000000f, -2.9826353f;
+ cloud[5].getVector3fMap() << 1.09848080f, 5.0000000f, -3.0173647f;
+ cloud[6].getVector3fMap() << 1.08660260f, 5.0000000f, -3.0500000f;
+ cloud[7].getVector3fMap() << 1.06427870f, 5.0000000f, -3.0766044f;
+ cloud[8].getVector3fMap() << 1.03420200f, 5.0000000f, -3.0939693f;
+ cloud[9].getVector3fMap() << 1.00000000f, 5.0000000f, -3.0999999f;
+ cloud[10].getVector3fMap() << 0.96579796f, 5.0000000f, -3.0939693f;
+ cloud[11].getVector3fMap() << 0.93572122f, 5.0000000f, -3.0766044f;
+ cloud[12].getVector3fMap() << 0.91339743f, 5.0000000f, -3.0500000f;
+ cloud[13].getVector3fMap() << 0.90151924f, 5.0000000f, -3.0173647f;
+ cloud[14].getVector3fMap() << 0.90151924f, 5.0000000f, -2.9826353f;
+ cloud[15].getVector3fMap() << 0.91339743f, 5.0000000f, -2.9500000f;
+ cloud[16].getVector3fMap() << 0.93572122f, 5.0000000f, -2.9233956f;
+ cloud[17].getVector3fMap() << 0.96579796f, 5.0000000f, -2.9060307f;
+ cloud[18].getVector3fMap() << 0.85000002f, 4.8499999f, -3.1500001f;
+ cloud[19].getVector3fMap() << 1.15000000f, 5.1500001f, -2.8499999f;
// Create a shared 3d circle model pointer directly
- SampleConsensusModelCircle3DPtr model (new SampleConsensusModelCircle3D<PointXYZ> (cloud.makeShared ()));
+ SampleConsensusModelCircle3DPtr model(
+ new SampleConsensusModelCircle3D<PointXYZ>(cloud.makeShared()));
// Create the RANSAC object
- RandomSampleConsensus<PointXYZ> sac (model, 0.03);
+ RandomSampleConsensus<PointXYZ> sac(model, 0.03);
// Algorithm tests
- bool result = sac.computeModel ();
- ASSERT_TRUE (result);
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
pcl::Indices sample;
- sac.getModel (sample);
- EXPECT_EQ (3, sample.size ());
+ sac.getModel(sample);
+ EXPECT_EQ(3, sample.size());
pcl::Indices inliers;
- sac.getInliers (inliers);
- EXPECT_EQ (18, inliers.size ());
+ sac.getInliers(inliers);
+ EXPECT_EQ(18, inliers.size());
Eigen::VectorXf coeff;
- sac.getModelCoefficients (coeff);
- EXPECT_EQ (7, coeff.size ());
- EXPECT_NEAR ( 1.0, coeff[0], 1e-3);
- EXPECT_NEAR ( 5.0, coeff[1], 1e-3);
- EXPECT_NEAR (-3.0, coeff[2], 1e-3);
- EXPECT_NEAR ( 0.1, coeff[3], 1e-3);
- EXPECT_NEAR ( 0.0, coeff[4], 1e-3);
+ sac.getModelCoefficients(coeff);
+ EXPECT_EQ(7, coeff.size());
+ EXPECT_NEAR(1.0, coeff[0], 1e-3);
+ EXPECT_NEAR(5.0, coeff[1], 1e-3);
+ EXPECT_NEAR(-3.0, coeff[2], 1e-3);
+ EXPECT_NEAR(0.1, coeff[3], 1e-3);
+ EXPECT_NEAR(0.0, coeff[4], 1e-3);
// Use abs in y component because both variants are valid normal vectors
- EXPECT_NEAR ( 1.0, std::abs (coeff[5]), 1e-3);
- EXPECT_NEAR ( 0.0, coeff[6], 1e-3);
+ EXPECT_NEAR(1.0, std::abs(coeff[5]), 1e-3);
+ EXPECT_NEAR(0.0, coeff[6], 1e-3);
Eigen::VectorXf coeff_refined;
- model->optimizeModelCoefficients (inliers, coeff, coeff_refined);
- EXPECT_EQ (7, coeff_refined.size ());
- EXPECT_NEAR ( 1.0, coeff_refined[0], 1e-3);
- EXPECT_NEAR ( 5.0, coeff_refined[1], 1e-3);
- EXPECT_NEAR (-3.0, coeff_refined[2], 1e-3);
- EXPECT_NEAR ( 0.1, coeff_refined[3], 1e-3);
- EXPECT_NEAR ( 0.0, coeff_refined[4], 1e-3);
- EXPECT_NEAR ( 1.0, std::abs (coeff_refined[5]), 1e-3);
- EXPECT_NEAR ( 0.0, coeff_refined[6], 1e-3);
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(7, coeff_refined.size());
+ EXPECT_NEAR(1.0, coeff_refined[0], 1e-3);
+ EXPECT_NEAR(5.0, coeff_refined[1], 1e-3);
+ EXPECT_NEAR(-3.0, coeff_refined[2], 1e-3);
+ EXPECT_NEAR(0.1, coeff_refined[3], 1e-3);
+ EXPECT_NEAR(0.0, coeff_refined[4], 1e-3);
+ EXPECT_NEAR(1.0, std::abs(coeff_refined[5]), 1e-3);
+ EXPECT_NEAR(0.0, coeff_refined[6], 1e-3);
}
-TEST (SampleConsensusModelSphere, projectPoints)
+TEST(SampleConsensusModelSphere, projectPoints)
{
Eigen::VectorXf model_coefficients(4);
model_coefficients << -0.32, -0.89, 0.37, 0.12; // center and radius
pcl::PointCloud<pcl::PointXYZ>::Ptr input(new pcl::PointCloud<pcl::PointXYZ>);
- input->emplace_back(-0.259754, -0.950873, 0.318377); // inlier, dist from center=0.10
- input->emplace_back( 0.595892, 0.455094, 0.025545); // outlier, not projected
- input->emplace_back(-0.221871, -0.973718, 0.353817); // inlier, dist from center=0.13
- input->emplace_back(-0.332269, -0.848851, 0.437499); // inlier, dist from center=0.08
+ input->emplace_back(-0.259754, -0.950873, 0.318377); // inlier, dist from center=0.10
+ input->emplace_back(0.595892, 0.455094, 0.025545); // outlier, not projected
+ input->emplace_back(-0.221871, -0.973718, 0.353817); // inlier, dist from center=0.13
+ input->emplace_back(-0.332269, -0.848851, 0.437499); // inlier, dist from center=0.08
input->emplace_back(-0.242308, -0.561036, -0.365535); // outlier, not projected
- input->emplace_back(-0.327668, -0.800009, 0.290988); // inlier, dist from center=0.12
- input->emplace_back(-0.173948, -0.883831, 0.403625); // inlier, dist from center=0.15
- input->emplace_back(-0.033891, 0.624537, -0.606994); // outlier, not projected
+ input->emplace_back(-0.327668, -0.800009, 0.290988); // inlier, dist from center=0.12
+ input->emplace_back(-0.173948, -0.883831, 0.403625); // inlier, dist from center=0.15
+ input->emplace_back(-0.033891, 0.624537, -0.606994); // outlier, not projected
pcl::SampleConsensusModelSphere<pcl::PointXYZ> model(input);
pcl::Indices inliers = {0, 2, 3, 5, 6};
pcl::PointCloud<pcl::PointXYZ> projected_points;
model.projectPoints(inliers, model_coefficients, projected_points, false);
EXPECT_EQ(projected_points.size(), 5);
- for(int i=0; i<5; ++i)
+ for (int i = 0; i < 5; ++i)
EXPECT_XYZ_NEAR(projected_points[i], projected_truth[i], 1e-5);
pcl::PointCloud<pcl::PointXYZ> projected_points_all;
model.projectPoints(inliers, model_coefficients, projected_points_all, true);
EXPECT_EQ(projected_points_all.size(), 8);
EXPECT_XYZ_NEAR(projected_points_all[0], projected_truth[0], 1e-5);
- EXPECT_XYZ_NEAR(projected_points_all[1], (*input)[1], 1e-5);
+ EXPECT_XYZ_NEAR(projected_points_all[1], (*input)[1], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[2], projected_truth[1], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[3], projected_truth[2], 1e-5);
- EXPECT_XYZ_NEAR(projected_points_all[4], (*input)[4], 1e-5);
+ EXPECT_XYZ_NEAR(projected_points_all[4], (*input)[4], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[5], projected_truth[3], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[6], projected_truth[4], 1e-5);
- EXPECT_XYZ_NEAR(projected_points_all[7], (*input)[7], 1e-5);
+ EXPECT_XYZ_NEAR(projected_points_all[7], (*input)[7], 1e-5);
}
TEST(SampleConsensusModelCylinder, projectPoints)
model.projectPoints(inliers, model_coefficients, projected_points_all, true);
EXPECT_EQ(projected_points_all.size(), 8);
EXPECT_XYZ_NEAR(projected_points_all[0], projected_truth[0], 1e-5);
- EXPECT_XYZ_NEAR(projected_points_all[1], (*input)[1], 1e-5);
+ EXPECT_XYZ_NEAR(projected_points_all[1], (*input)[1], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[2], projected_truth[1], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[3], projected_truth[2], 1e-5);
- EXPECT_XYZ_NEAR(projected_points_all[4], (*input)[4], 1e-5);
+ EXPECT_XYZ_NEAR(projected_points_all[4], (*input)[4], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[5], projected_truth[3], 1e-5);
EXPECT_XYZ_NEAR(projected_points_all[6], projected_truth[4], 1e-5);
- EXPECT_XYZ_NEAR(projected_points_all[7], (*input)[7], 1e-5);
+ EXPECT_XYZ_NEAR(projected_points_all[7], (*input)[7], 1e-5);
}
TEST(SampleConsensusModelEllipse3D, RANSAC)
PointCloud<PointXYZ> cloud;
cloud.resize(22);
- cloud[ 0].getVector3fMap() << 1.000000, 5.000000, 3.000000;
- cloud[ 1].getVector3fMap() << 0.690983, 5.000000, 2.902110;
- cloud[ 2].getVector3fMap() << 0.412215, 5.000000, 2.618030;
- cloud[ 3].getVector3fMap() << 0.190983, 5.000000, 2.175570;
- cloud[ 4].getVector3fMap() << 0.048944, 5.000000, 1.618030;
- cloud[ 5].getVector3fMap() << 0.000000, 5.000000, 1.000000;
- cloud[ 6].getVector3fMap() << 0.048944, 5.000000, 0.381966;
- cloud[ 7].getVector3fMap() << 0.190983, 5.000000, -0.175571;
- cloud[ 8].getVector3fMap() << 0.412215, 5.000000, -0.618034;
- cloud[ 9].getVector3fMap() << 0.690983, 5.000000, -0.902113;
+ cloud[0].getVector3fMap() << 1.000000, 5.000000, 3.000000;
+ cloud[1].getVector3fMap() << 0.690983, 5.000000, 2.902110;
+ cloud[2].getVector3fMap() << 0.412215, 5.000000, 2.618030;
+ cloud[3].getVector3fMap() << 0.190983, 5.000000, 2.175570;
+ cloud[4].getVector3fMap() << 0.048944, 5.000000, 1.618030;
+ cloud[5].getVector3fMap() << 0.000000, 5.000000, 1.000000;
+ cloud[6].getVector3fMap() << 0.048944, 5.000000, 0.381966;
+ cloud[7].getVector3fMap() << 0.190983, 5.000000, -0.175571;
+ cloud[8].getVector3fMap() << 0.412215, 5.000000, -0.618034;
+ cloud[9].getVector3fMap() << 0.690983, 5.000000, -0.902113;
cloud[10].getVector3fMap() << 1.000000, 5.000000, -1.000000;
cloud[11].getVector3fMap() << 1.309020, 5.000000, -0.902113;
cloud[12].getVector3fMap() << 1.587790, 5.000000, -0.618034;
cloud[21].getVector3fMap() << 1.15000000f, 5.1500001f, -2.8499999f;
// Create a shared 3d ellipse model pointer directly
- SampleConsensusModelEllipse3DPtr model( new SampleConsensusModelEllipse3D<PointXYZ>(cloud.makeShared()));
+ SampleConsensusModelEllipse3DPtr model(
+ new SampleConsensusModelEllipse3D<PointXYZ>(cloud.makeShared()));
// Create the RANSAC object
RandomSampleConsensus<PointXYZ> sac(model, 0.0011);
EXPECT_NEAR(1.0, std::abs(coeff_refined[10]), 1e-3);
}
-int
-main (int argc, char** argv)
+// Heavy oclusion, all points on a 30 degree segment on the major radius
+// and 90 degrees on the minor
+TEST(SampleConsensusModelTorusOclusion, RANSAC)
+{
+
+ srand(0);
+
+ PointCloud<PointXYZ> cloud;
+ PointCloud<Normal> normals;
+
+ cloud.resize(50);
+ normals.resize(50);
+
+ cloud[0].getVector3fMap() << 1.4386461277601734, -1.0569588316537601,
+ 3.6109405500068648;
+ cloud[1].getVector3fMap() << 1.5892272151771987, -1.0107131751656608,
+ 3.7524801792294786;
+ cloud[2].getVector3fMap() << 0.779450850990528, -1.1095381992814901,
+ 2.1362474566704086;
+ cloud[3].getVector3fMap() << 2.2332764042301116, -1.0712898227325813,
+ 4.104289150491388;
+ cloud[4].getVector3fMap() << 1.4625240977021328, -1.0262438455563425,
+ 3.6408698702774327;
+ cloud[5].getVector3fMap() << 1.107288574597542, -1.038213986002474,
+ 3.2111695308737334;
+ cloud[6].getVector3fMap() << 1.634136176426644, -1.0586849054858922,
+ 3.7791937450863844;
+ cloud[7].getVector3fMap() << 2.8081039281494284, -1.124052124218725,
+ 4.208730485774282;
+ cloud[8].getVector3fMap() << 2.7325382847100004, -1.0968720291167913,
+ 4.214374570675481;
+ cloud[9].getVector3fMap() << 1.1897810394404515, -1.0861469920822655,
+ 3.3103205876091675;
+ cloud[10].getVector3fMap() << 0.8242772124713484, -1.0935505936537508,
+ 2.4973185149793924;
+ cloud[11].getVector3fMap() << 2.9589166075430366, -1.0656763334810868,
+ 4.240842449620676;
+ cloud[12].getVector3fMap() << 1.7930324882302902, -1.0629548347911097,
+ 3.8893227292858175;
+ cloud[13].getVector3fMap() << 0.7810401372209808, -1.0705732580035723,
+ 2.305065186549668;
+ cloud[14].getVector3fMap() << 0.9062603270739178, -1.0815748767074063,
+ 2.785726514647834;
+ cloud[15].getVector3fMap() << 1.3832157146170436, -1.0790593653653633,
+ 3.546265907749163;
+ cloud[16].getVector3fMap() << 2.040614544849421, -1.0918678466867353,
+ 4.015855816881193;
+ cloud[17].getVector3fMap() << 2.274098663746168, -1.0273778393320356,
+ 4.128098505334945;
+ cloud[18].getVector3fMap() << 1.2518457008499417, -1.0912889870169762,
+ 3.38890936771287;
+ cloud[19].getVector3fMap() << 0.8773148573186607, -1.026298817514791,
+ 2.7419351335271855;
+ cloud[20].getVector3fMap() << 2.460972277763233, -1.0874470683716413,
+ 4.168209147029958;
+ cloud[21].getVector3fMap() << 2.326091552875379, -1.0983984335719184,
+ 4.125546904328003;
+ cloud[22].getVector3fMap() << 2.0996991277329786, -1.0707210059905774,
+ 4.050880542671691;
+ cloud[23].getVector3fMap() << 0.95831333743683, -1.061687690479444,
+ 2.9269785200505813;
+ cloud[24].getVector3fMap() << 2.0588703194976024, -1.0025516869353353,
+ 4.043701622831673;
+ normals[0].getNormalVector3fMap() << -0.6776646502188018, -0.2278353266150415,
+ 0.6991864456566977;
+ normals[1].getNormalVector3fMap() << -0.6264981002776666, -0.04285270066264479,
+ 0.7782440339600377;
+ normals[2].getNormalVector3fMap() << -0.8972132050327152, -0.4381527971259608,
+ 0.05505080458648829;
+ normals[3].getNormalVector3fMap() << -0.32813125795357256, -0.2851592909303272,
+ 0.9005631884270638;
+ normals[4].getNormalVector3fMap() << -0.6799645795137096, -0.10497538222537117,
+ 0.7256916285402369;
+ normals[5].getNormalVector3fMap() << -0.8324065340358171, -0.15285594400989705,
+ 0.5326672718267207;
+ normals[6].getNormalVector3fMap() << -0.5919262688649901, -0.2347396219435707,
+ 0.7710516209161101;
+ normals[7].getNormalVector3fMap() << -0.07514704519204393, -0.4962084968749015,
+ 0.8649451134193752;
+ normals[8].getNormalVector3fMap() << -0.11054456443635059, -0.387488116467167,
+ 0.9152228465626854;
+ normals[9].getNormalVector3fMap() << -0.7604417087668234, -0.34458796832906313,
+ 0.5504430394242827;
+ normals[10].getNormalVector3fMap() << -0.9040312337559508, -0.3742023746150035,
+ 0.20664005232816324;
+ normals[11].getNormalVector3fMap() << -0.0176869745485768, -0.2627053339243488,
+ 0.964713987904713;
+ normals[12].getNormalVector3fMap() << -0.5210086952913671, -0.25181933916444066,
+ 0.8155592926658195;
+ normals[13].getNormalVector3fMap() << -0.950388588906301, -0.2822930320142894,
+ 0.13066053020277188;
+ normals[14].getNormalVector3fMap() << -0.8850007317473024, -0.32629950682962533,
+ 0.3321179559275326;
+ normals[15].getNormalVector3fMap() << -0.6856032449538655, -0.31623746146145426,
+ 0.65569967094482;
+ normals[16].getNormalVector3fMap() << -0.3996678191380136, -0.36747138674694285,
+ 0.8397799796778576;
+ normals[17].getNormalVector3fMap() << -0.3208968621888316, -0.10951135732814456,
+ 0.9407616416784378;
+ normals[18].getNormalVector3fMap() << -0.728898292732006, -0.36515594806790613,
+ 0.5791100175640165;
+ normals[19].getNormalVector3fMap() << -0.9387598943114375, -0.1051952700591645,
+ 0.3281216481574453;
+ normals[20].getNormalVector3fMap() << -0.22602052518599647, -0.34978827348656716,
+ 0.9091550395427244;
+ normals[21].getNormalVector3fMap() << -0.27783106746442193, -0.3935937342876755,
+ 0.8762955382067529;
+ normals[22].getNormalVector3fMap() << -0.38553965278262686, -0.2828840239623112,
+ 0.878257254521215;
+ normals[23].getNormalVector3fMap() << -0.8823896524250601, -0.24675076191777665,
+ 0.4006277109564169;
+ normals[24].getNormalVector3fMap() << -0.4182604905252856, -0.010206747741342384,
+ 0.908269775103241;
+
+ // 50% noise uniform [-2,2]
+ //
+ cloud[25].getVector3fMap() << 0.25023241635877147, 0.27654549396527894,
+ 1.07955881369387;
+ cloud[26].getVector3fMap() << 1.5449856383148206, -0.46768009897289264,
+ -2.062172100500517;
+ cloud[27].getVector3fMap() << 2.068709384697231, -0.8995244010670893,
+ 0.4472750119304405;
+ cloud[28].getVector3fMap() << -1.9703101501142217, 1.1677926799358453,
+ -1.0951161775500093;
+ cloud[29].getVector3fMap() << 1.5128012164196942, -0.3784790741317119,
+ 1.9953141538660382;
+ cloud[30].getVector3fMap() << -1.7035274240520712, -0.040343373432154106,
+ -0.13506114362465782;
+ cloud[31].getVector3fMap() << 1.390301434734198, -1.0836155740357354,
+ 1.3817400889837255;
+ cloud[32].getVector3fMap() << 0.6973526735174085, 1.4609265623041212,
+ 0.3991283042562106;
+ cloud[33].getVector3fMap() << 0.4585644490692351, 1.8056826118986748,
+ 1.1908087822224616;
+ cloud[34].getVector3fMap() << -1.899161354377058, -1.2250806902713103,
+ 1.5135509588271026;
+ cloud[35].getVector3fMap() << 0.05728241071603746, -1.3140082682155136,
+ -1.6804780212669348;
+ cloud[36].getVector3fMap() << -0.5371089158049953, -0.02542717526439331,
+ -0.6188539490393421;
+ cloud[37].getVector3fMap() << -0.21842672967261145, 0.6528285340670843,
+ 1.937369474575887;
+ cloud[38].getVector3fMap() << 1.6906916394191258, 1.6029527944840072,
+ 1.3312465637845015;
+ cloud[39].getVector3fMap() << 0.3871457304584722, -0.7014470556575774,
+ -1.3686189094260588;
+ cloud[40].getVector3fMap() << 1.1287360826333366, -1.8859435547052814,
+ -0.1392786225318703;
+ cloud[41].getVector3fMap() << -0.8284092960915028, 1.0112260700590863,
+ -1.1937340633604672;
+ cloud[42].getVector3fMap() << 1.8440270354564277, -0.3703200026464992,
+ -1.5917391524525757;
+ cloud[43].getVector3fMap() << 0.02671922592530418, 1.7827062803768543,
+ 0.22852714632858673;
+ cloud[44].getVector3fMap() << -1.5132468082647963, -1.3357890987499987,
+ -1.158617245205414;
+ cloud[45].getVector3fMap() << -1.1450583549521511, 1.45432498632732,
+ -2.095300144813141;
+ cloud[46].getVector3fMap() << 0.18809078359436793, -1.6008222007566066,
+ -1.9699784955663424;
+ cloud[47].getVector3fMap() << -1.1753993948548627, 1.5857927603987902,
+ 0.14497327864750886;
+ cloud[48].getVector3fMap() << 1.121788740853686, -0.27095183911320286,
+ -0.12199102154089814;
+ cloud[49].getVector3fMap() << 0.768999145889063, -2.0309651709863434,
+ 0.7930530394403963;
+ normals[25].getNormalVector3fMap() << -0.5835940349115277, 0.1757014210775822,
+ 0.7928095692201251;
+ normals[26].getNormalVector3fMap() << -0.6960838602866861, -0.42094642891496487,
+ -0.5816110069729798;
+ normals[27].getNormalVector3fMap() << 0.255914777841287, 0.6279839361250196,
+ -0.7349447614966528;
+ normals[28].getNormalVector3fMap() << -0.6075736135140413, 0.1336509980609126,
+ -0.7829378742140479;
+ normals[29].getNormalVector3fMap() << -0.4983181083004855, 0.6669454154651717,
+ -0.5539520518328415;
+ normals[30].getNormalVector3fMap() << -0.7745671302471588, 0.5084406300820161,
+ -0.37620989676307437;
+ normals[31].getNormalVector3fMap() << -0.424778132583581, -0.3243720781494619,
+ 0.8451900928168792;
+ normals[32].getNormalVector3fMap() << -0.5821055941507861, 0.35171580987235973,
+ 0.73310917764286;
+ normals[33].getNormalVector3fMap() << 0.8396655225180351, -0.48303927894460474,
+ 0.2482637011147448;
+ normals[34].getNormalVector3fMap() << 0.256742174797301, 0.7352345686595317,
+ 0.6273066114177216;
+ normals[35].getNormalVector3fMap() << -0.0652239383938407, -0.5360244339035914,
+ 0.8416790624214975;
+ normals[36].getNormalVector3fMap() << 0.6702382164209467, -0.3031905309377628,
+ 0.6773892789220579;
+ normals[37].getNormalVector3fMap() << -0.6040272704362459, -0.10302003040831528,
+ -0.7902771222197995;
+ normals[38].getNormalVector3fMap() << 0.9983521281387145, 0.041967677271189614,
+ -0.03913747954788317;
+ normals[39].getNormalVector3fMap() << -0.573664090993926, 0.46793032429526715,
+ 0.6722728034875713;
+ normals[40].getNormalVector3fMap() << -0.5945467180061245, -0.48897233716525434,
+ -0.6382948014791401;
+ normals[41].getNormalVector3fMap() << 0.11334045761805764, -0.6164053590067436,
+ 0.7792293462483921;
+ normals[42].getNormalVector3fMap() << -0.766256491311007, 0.13240541094009678,
+ -0.6287446196012567;
+ normals[43].getNormalVector3fMap() << 0.43564165550696804, 0.7816025130800787,
+ 0.4464458080596722;
+ normals[44].getNormalVector3fMap() << 0.7597220695940338, -0.5120511261307517,
+ -0.4007817625591101;
+ normals[45].getNormalVector3fMap() << -0.6597147170804349, -0.27171235425320656,
+ -0.7006774497681952;
+ normals[46].getNormalVector3fMap() << -0.14344953607996272, 0.06349058786868034,
+ -0.9876189426345229;
+ normals[47].getNormalVector3fMap() << 0.2696193746529791, 0.8928064202811087,
+ -0.36083526534496174;
+ normals[48].getNormalVector3fMap() << 0.5473019047514905, 0.29388155846326774,
+ -0.7836416621457739;
+ normals[49].getNormalVector3fMap() << 0.053697689135186716, 0.05924709269452209,
+ -0.9967980438327452;
+
+ SampleConsensusModelTorus<PointXYZ, Normal>::Ptr model(
+ new SampleConsensusModelTorus<PointXYZ, Normal>(cloud.makeShared()));
+
+ model->setInputNormals(normals.makeShared());
+
+ // Create the RANSAC object
+ // Small threshold to filter out the numerous outliers
+ RandomSampleConsensus<PointXYZ> sac(model, 0.001);
+
+ // Algorithm tests
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
+
+ pcl::Indices sample;
+ sac.getModel(sample);
+ EXPECT_EQ(4, sample.size());
+ pcl::Indices inliers;
+ sac.getInliers(inliers);
+ EXPECT_EQ(25, inliers.size());
+
+ Eigen::VectorXf coeff;
+ sac.getModelCoefficients(coeff);
+
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 2, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[2], 3, 1e-2);
+ EXPECT_NEAR(coeff[3], -1, 1e-2);
+ EXPECT_NEAR(coeff[4], 2, 1e-2);
+ EXPECT_NEAR(coeff[5], 0.0, 1e-2);
+ EXPECT_NEAR(std::abs(coeff[6]), 1.0, 1e-2);
+ EXPECT_NEAR(coeff[7], 0.0, 1e-2);
+ Eigen::VectorXf coeff_refined;
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 2, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[2], 3, 1e-2);
+ EXPECT_NEAR(coeff[3], -1, 1e-2);
+ EXPECT_NEAR(coeff[4], 2, 1e-2);
+ EXPECT_NEAR(coeff[5], 0.0, 1e-2);
+ EXPECT_NEAR(std::abs(coeff[6]), 1.0, 1e-2);
+ EXPECT_NEAR(coeff[7], 2.220446049250313e-16, 1e-2);
+}
+
+// A horn shaped torus
+TEST(SampleConsensusModelTorusHorn, RANSAC)
+{
+
+ srand(0);
+
+ PointCloud<PointXYZ> cloud;
+ PointCloud<Normal> normals;
+
+ cloud.resize(7);
+ normals.resize(7);
+
+ cloud[0].getVector3fMap() << 3.000501648262739, -1.0005866141772064,
+ 2.0196299263944386;
+ cloud[1].getVector3fMap() << 2.9306387358067587, -0.9355559306559758,
+ 1.804104008927194;
+ cloud[2].getVector3fMap() << 3.1148967392352143, -1.3928055353556932,
+ 2.1927039488583757;
+ cloud[3].getVector3fMap() << 3.0736420787608285, -0.8370133320562925,
+ 1.7603380061176133;
+ cloud[4].getVector3fMap() << 2.88008899080742, -1.245300517665885, 1.7510639730129496;
+ cloud[5].getVector3fMap() << 3.0000040500927305, -1.0005041529688534,
+ 2.0158691660694794;
+ cloud[6].getVector3fMap() << 2.983210284063484, -0.5044792022516073,
+ 2.0456050860401795;
+ normals[0].getNormalVector3fMap() << -0.6479150922982518, 0.7576547294171206,
+ 0.07851970557775474;
+ normals[1].getNormalVector3fMap() << 0.45515258767393824, -0.4228856734855979,
+ -0.783583964291224;
+ normals[2].getNormalVector3fMap() << 0.17884740355312917, -0.6114381536611204,
+ 0.7708157954335032;
+ normals[3].getNormalVector3fMap() << -0.11718185523562125, -0.2593500950773666,
+ -0.958647975529547;
+ normals[4].getNormalVector3fMap() << -0.04047436729113163, -0.08279792919404502,
+ -0.995744107948202;
+ normals[5].getNormalVector3fMap() << -0.008017000458096018, 0.9979511214462377,
+ 0.06347666427791779;
+ normals[6].getNormalVector3fMap() << -0.03329532756428898, 0.9826567250055698,
+ 0.18242034416071792;
+
+ SampleConsensusModelTorus<PointXYZ, Normal>::Ptr model(
+ new SampleConsensusModelTorus<PointXYZ, Normal>(cloud.makeShared()));
+
+ model->setInputNormals(normals.makeShared());
+
+ // Create the RANSAC object
+ RandomSampleConsensus<PointXYZ> sac(model, 0.2);
+
+ // Algorithm tests
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
+
+ pcl::Indices sample;
+ sac.getModel(sample);
+ EXPECT_EQ(4, sample.size());
+ pcl::Indices inliers;
+ sac.getInliers(inliers);
+ EXPECT_EQ(7, inliers.size());
+
+ Eigen::VectorXf coeff;
+ sac.getModelCoefficients(coeff);
+
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[2], 3, 1e-2);
+ EXPECT_NEAR(coeff[3], -1, 1e-2);
+ EXPECT_NEAR(coeff[4], 2, 1e-2);
+ EXPECT_NEAR(coeff[5], 0.0, 1e-2);
+ EXPECT_NEAR(coeff[6], 0.0, 1e-2);
+ EXPECT_NEAR(std::abs(coeff[7]), 1.0, 1e-2);
+ return;
+
+ Eigen::VectorXf coeff_refined;
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[2], 3, 1e-2);
+ EXPECT_NEAR(coeff[3], -1, 1e-2);
+ EXPECT_NEAR(coeff[4], 2, 1e-2);
+ EXPECT_NEAR(coeff[5], 0.0, 1e-2);
+ EXPECT_NEAR(coeff[6], 0.0, 1e-2);
+ EXPECT_NEAR(coeff[7], 1.0, 1e-2);
+}
+
+TEST(SampleConsensusModelTorusRefine, RANSAC)
+{
+
+ srand(0);
+
+ PointCloud<PointXYZ> cloud;
+ PointCloud<Normal> normals;
+
+ cloud.resize(12);
+ normals.resize(12);
+
+ cloud[0].getVector3fMap() << 2.052800042174215, -1.499473956010903, 2.5922393000558;
+ cloud[1].getVector3fMap() << 3.388588815443773, -0.2804951689253241,
+ 2.016023579560368;
+ cloud[2].getVector3fMap() << 2.1062433380708585, -1.9174254209231951,
+ 2.2138169934854175;
+ cloud[3].getVector3fMap() << 2.9741032000482, -1.0699765160210948, 1.2784833859363935;
+ cloud[4].getVector3fMap() << 3.9945837837405858, -0.24398838472758466,
+ 1.994969222832288;
+ cloud[5].getVector3fMap() << 3.29052359025732, -0.7052701711244429,
+ 1.4026501046485196;
+ cloud[6].getVector3fMap() << 3.253762467235399, -1.2666426752546665,
+ 1.2533731806961965;
+ cloud[7].getVector3fMap() << 2.793231427168476, -1.406941876180895, 2.914835409806976;
+ cloud[8].getVector3fMap() << 3.427656537026421, -0.3921726018138755,
+ 1.1167321991754167;
+ cloud[9].getVector3fMap() << 3.45310885872988, -1.187857062974888, 0.9128847947344318;
+
+ normals[0].getNormalVector3fMap() << -0.9655752892034741, 0.13480487505578329,
+ 0.22246798992399325;
+ normals[1].getNormalVector3fMap() << -0.9835035116470829, -0.02321732676535275,
+ -0.17939286026965295;
+ normals[2].getNormalVector3fMap() << -0.6228348353863176, -0.7614744633300792,
+ 0.17953665231775656;
+ normals[3].getNormalVector3fMap() << -0.3027649706212169, 0.4167626949130777,
+ 0.8571127281131243;
+ normals[4].getNormalVector3fMap() << 0.9865410652838972, 0.13739803967452247,
+ 0.08864821037173687;
+ normals[5].getNormalVector3fMap() << -0.723213640950708, -0.05078427284613152,
+ 0.688754663994597;
+ normals[6].getNormalVector3fMap() << 0.4519195477489684, -0.4187464441250127,
+ 0.7876675300499734;
+ normals[7].getNormalVector3fMap() << 0.7370319397802214, -0.6656659398898118,
+ 0.11693064702813241;
+ normals[8].getNormalVector3fMap() << -0.4226770542031876, 0.7762818780175667,
+ -0.4676863839279862;
+ normals[9].getNormalVector3fMap() << 0.720025487985072, -0.5768131803911037,
+ -0.38581064212766236;
+
+ // Uniform noise between -0.1 and 0.1
+ cloud[0].getVector3fMap() += Eigen::Vector3f(-0.02519484, 0.03325529, 0.09188957);
+ cloud[1].getVector3fMap() += Eigen::Vector3f(0.06969781, -0.06921317, -0.07229406);
+ cloud[2].getVector3fMap() += Eigen::Vector3f(-0.00064637, -0.00231905, -0.0080026);
+ cloud[3].getVector3fMap() += Eigen::Vector3f(0.05039557, -0.0229141, 0.0594657);
+ cloud[4].getVector3fMap() += Eigen::Vector3f(-0.05717322, -0.09670288, 0.00176189);
+ cloud[5].getVector3fMap() += Eigen::Vector3f(0.02668492, -0.06824032, 0.05790168);
+ cloud[6].getVector3fMap() += Eigen::Vector3f(0.07829713, 0.06426746, 0.04172692);
+ cloud[7].getVector3fMap() += Eigen::Vector3f(0.0006326, -0.02518951, -0.00927858);
+ cloud[8].getVector3fMap() += Eigen::Vector3f(-0.04975343, 0.09912357, -0.04233801);
+ cloud[9].getVector3fMap() += Eigen::Vector3f(-0.04810247, 0.03382804, 0.07958129);
+
+ // Outliers
+ cloud[10].getVector3fMap() << 5, 1, 1;
+ cloud[11].getVector3fMap() << 5, 2, 1;
+
+ normals[10].getNormalVector3fMap() << 1, 0, 0;
+ normals[11].getNormalVector3fMap() << 1, 0, 0;
+
+ SampleConsensusModelTorus<PointXYZ, Normal>::Ptr model(
+ new SampleConsensusModelTorus<PointXYZ, Normal>(cloud.makeShared()));
+
+ model->setInputNormals(normals.makeShared());
+
+ // Create the RANSAC object
+ RandomSampleConsensus<PointXYZ> sac(model, 0.2);
+
+ // Algorithm tests
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
+
+ pcl::Indices sample;
+ sac.getModel(sample);
+ EXPECT_EQ(4, sample.size());
+ pcl::Indices inliers;
+ sac.getInliers(inliers);
+ EXPECT_EQ(10, inliers.size());
+
+ Eigen::VectorXf coeff;
+ sac.getModelCoefficients(coeff);
+
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 1, 2e-1);
+ EXPECT_NEAR(coeff[1], 0.3, 2e-1);
+ EXPECT_NEAR(coeff[2], 3, 2e-1);
+ EXPECT_NEAR(coeff[3], -1, 2e-1);
+ EXPECT_NEAR(coeff[4], 2, 2e-1);
+
+ if (coeff[5] < 0){
+ coeff[5] *= -1.0;
+ coeff[6] *= -1.0;
+ coeff[7] *= -1.0;
+ }
+
+ EXPECT_NEAR(coeff[5], 0.7071067811865476, 2e-1);
+ EXPECT_NEAR(coeff[6], -0.6830127018922194, 2e-1);
+ EXPECT_NEAR(coeff[7], 0.1830127018922194, 2e-1);
+
+ Eigen::VectorXf coeff_refined(8);
+ return;
+
+ // Optimize goes from 2e-1 to 1e-1
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 1, 1e-1);
+ EXPECT_NEAR(coeff[1], 0.3, 1e-1);
+ EXPECT_NEAR(coeff[2], 3, 1e-1);
+ EXPECT_NEAR(coeff[3], -1, 1e-1);
+ EXPECT_NEAR(coeff[4], 2, 1e-1);
+ EXPECT_NEAR(coeff[5], 0.7071067811865476, 1e-1);
+ EXPECT_NEAR(coeff[6], -0.6830127018922194, 1e-1);
+ EXPECT_NEAR(coeff[7], 0.1830127018922194, 1e-1);
+}
+
+TEST(SampleConsensusModelTorus, RANSAC)
+{
+ srand(0);
+
+ PointCloud<PointXYZ> cloud;
+ PointCloud<Normal> normals;
+
+ cloud.resize(4);
+ normals.resize(4);
+
+ cloud[0].getVector3fMap() << 8.359341574088198, 7.22552693060636, 5.4049978066219575;
+ cloud[1].getVector3fMap() << 7.777710489873524, 6.9794499622227635, 5.96264148630509;
+ cloud[2].getVector3fMap() << 7.578062528900397, 8.466627338184125, 6.764936180563802;
+ cloud[3].getVector3fMap() << 6.8073801963063225, 6.950495936581675,
+ 6.5682651621988140636;
+
+ normals[0].getNormalVector3fMap() << 0.78726775, -0.60899961, -0.09658657;
+ normals[1].getNormalVector3fMap() << 0.66500173, 0.11532684, 0.73788374;
+ normals[2].getNormalVector3fMap() << -0.58453172, 0.0233942, -0.81103353;
+ normals[3].getNormalVector3fMap() << -0.92017329, -0.39125533, 0.01415573;
+
+ // Create a shared 3d torus model pointer directly
+ SampleConsensusModelTorus<PointXYZ, Normal>::Ptr model(
+ new SampleConsensusModelTorus<PointXYZ, Normal>(cloud.makeShared()));
+ model->setInputNormals(normals.makeShared());
+
+ // Create the RANSAC object
+ RandomSampleConsensus<PointXYZ> sac(model, 0.11);
+
+ // Algorithm tests
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
+
+ pcl::Indices sample;
+ sac.getModel(sample);
+ EXPECT_EQ(4, sample.size());
+ pcl::Indices inliers;
+ sac.getInliers(inliers);
+ EXPECT_EQ(4, inliers.size());
+
+ Eigen::VectorXf coeff;
+ sac.getModelCoefficients(coeff);
+
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 1, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.3, 1e-2);
+
+ EXPECT_NEAR(coeff[2], 7.758357590948854, 1e-2);
+ EXPECT_NEAR(coeff[3], 7.756009480304242, 1e-2);
+ EXPECT_NEAR(coeff[4], 6.297666724054506, 1e-2);
+
+ EXPECT_NEAR(coeff[5], 0.7071067811865475, 1e-2);
+ EXPECT_NEAR(coeff[6], -0.5, 1e-2);
+ EXPECT_NEAR(coeff[7], 0.5, 1e-2);
+
+ Eigen::VectorXf coeff_refined;
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(8, coeff.size());
+
+ EXPECT_NEAR(coeff[0], 1, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.3, 1e-2);
+
+ EXPECT_NEAR(coeff[2], 7.758357590948854, 1e-2);
+ EXPECT_NEAR(coeff[3], 7.756009480304242, 1e-2);
+ EXPECT_NEAR(coeff[4], 6.297666724054506, 1e-2);
+
+ if (coeff[5] < 0){
+ coeff[5] *= -1.0;
+ coeff[6] *= -1.0;
+ coeff[7] *= -1.0;
+ }
+
+ EXPECT_NEAR(coeff[5], 0.7071067811865475, 1e-2);
+ EXPECT_NEAR(coeff[6], -0.5, 1e-2);
+ EXPECT_NEAR(coeff[7], 0.5, 1e-2);
+}
+
+TEST(SampleConsensusModelTorusSelfIntersectSpindle, RANSAC)
{
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
+ srand(0);
+
+ PointCloud<PointXYZ> cloud;
+ PointCloud<Normal> normals;
+
+ cloud.resize(15);
+ normals.resize(15);
+
+ cloud[0].getVector3fMap() << 4.197443296388562, -2.244178951074561,
+ 2.6544058976891054;
+ cloud[1].getVector3fMap() << 3.9469472011448596, -2.2109428683077716,
+ 1.8207364262436627;
+ cloud[2].getVector3fMap() << 4.036997360721013, -1.9837065514073593, 2.88062697126334;
+ cloud[3].getVector3fMap() << 4.554241490476904, -1.8200324440442106,
+ 2.242830580711606;
+ cloud[4].getVector3fMap() << 3.9088172351876764, -1.7982235216273337,
+ 2.268412990083617;
+ cloud[5].getVector3fMap() << 3.842480541291084, -1.8025598756948282,
+ 2.2527669926394016;
+ cloud[6].getVector3fMap() << 4.3496726790753755, -2.2441275500858833,
+ 2.239055754148472;
+ cloud[7].getVector3fMap() << 4.52235090070925, -1.7373785296059916, 2.466177376096933;
+ cloud[8].getVector3fMap() << 3.7710839801895077, -1.7082589118588576,
+ 2.393963290485919;
+ cloud[9].getVector3fMap() << 4.068180803015269, -1.3503789464621836,
+ 2.3423326381708147;
+ cloud[10].getVector3fMap() << 3.873973067071098, -1.7135016016729774,
+ 2.2124191518411247;
+ cloud[11].getVector3fMap() << 4.390758174759903, -2.171435874256942,
+ 2.214023036691005;
+ cloud[12].getVector3fMap() << 4.324106983802413, -1.3650663408422128,
+ 2.453692863759843;
+ cloud[13].getVector3fMap() << 4.345401269961894, -2.0286148560415813,
+ 2.456689045210522;
+ cloud[14].getVector3fMap() << 4.31528844186095, -2.0083582606580292,
+ 2.367720270538135;
+ normals[0].getNormalVector3fMap() << 0.6131882081326164, -0.6055955130301103,
+ 0.5072024211347066;
+ normals[1].getNormalVector3fMap() << -0.37431625455633444, -0.44239562607541916,
+ -0.8149683746037361;
+ normals[2].getNormalVector3fMap() << 0.03426300530560111, -0.20348141751799728,
+ 0.9784791051383237;
+ normals[3].getNormalVector3fMap() << 0.940856493913579, -0.2960809146555105,
+ 0.1646971458083096;
+ normals[4].getNormalVector3fMap() << -0.6286552509632886, 0.5146645236295431,
+ 0.5830205858745127;
+ normals[5].getNormalVector3fMap() << -0.3871040511550286, 0.724048220462587,
+ -0.5708805724705703;
+ normals[6].getNormalVector3fMap() << 0.8666661368649906, -0.43068753570421703,
+ 0.25178970153789454;
+ normals[7].getNormalVector3fMap() << 0.9362467937507173, -0.17430389316386408,
+ 0.30505752575444156;
+ normals[8].getNormalVector3fMap() << -0.8229596731853971, 0.3855286394843701,
+ -0.4172589656890726;
+ normals[9].getNormalVector3fMap() << -0.36653063101311856, 0.9297937437536523,
+ -0.033747453321582466;
+ normals[10].getNormalVector3fMap() << -0.9907072431684454, -0.07717115427192464,
+ -0.11199897893247729;
+ normals[11].getNormalVector3fMap() << 0.8661927924780622, -0.3669697390799624,
+ 0.3391802719184018;
+ normals[12].getNormalVector3fMap() << 0.3744106777049837, 0.8911051835726027,
+ 0.25641411082569643;
+ normals[13].getNormalVector3fMap() << 0.8809879144326921, -0.4062669413039098,
+ -0.2425025093212462;
+ normals[14].getNormalVector3fMap() << 0.8117975834319093, -0.31266565300418725,
+ -0.49317833789165666;
+
+ // Create a shared 3d torus model pointer directly
+ SampleConsensusModelTorus<PointXYZ, Normal>::Ptr model(
+ new SampleConsensusModelTorus<PointXYZ, Normal>(cloud.makeShared()));
+ model->setInputNormals(normals.makeShared());
+
+ // Create the RANSAC object
+ RandomSampleConsensus<PointXYZ> sac(model, 0.11);
+
+ // Algorithm tests
+ bool result = sac.computeModel();
+ ASSERT_TRUE(result);
+
+ pcl::Indices sample;
+ sac.getModel(sample);
+ EXPECT_EQ(4, sample.size());
+ pcl::Indices inliers;
+ sac.getInliers(inliers);
+ EXPECT_EQ(15, inliers.size());
+
+ Eigen::VectorXf coeff;
+ sac.getModelCoefficients(coeff);
+
+ EXPECT_EQ(8, coeff.size());
+ EXPECT_NEAR(coeff[0], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.35, 1e-2);
+ EXPECT_NEAR(coeff[2], 4.1, 1e-2);
+ EXPECT_NEAR(coeff[3], -1.9, 1e-2);
+ EXPECT_NEAR(coeff[4], 2.3, 1e-2);
+ EXPECT_NEAR(coeff[5], 0.8660254037844385, 1e-2);
+ EXPECT_NEAR(coeff[6], -0.4330127018922194, 1e-2);
+ EXPECT_NEAR(coeff[7], 0.25000000000000017, 1e-2);
+
+ Eigen::VectorXf coeff_refined;
+ model->optimizeModelCoefficients(inliers, coeff, coeff_refined);
+ EXPECT_EQ(8, coeff.size());
+ EXPECT_NEAR(coeff[0], 0.25, 1e-2);
+ EXPECT_NEAR(coeff[1], 0.35, 1e-2);
+ EXPECT_NEAR(coeff[2], 4.1, 1e-2);
+ EXPECT_NEAR(coeff[3], -1.9, 1e-2);
+ EXPECT_NEAR(coeff[4], 2.3, 1e-2);
+
+ if (coeff[5] < 0){
+ coeff[5] *= -1.0;
+ coeff[6] *= -1.0;
+ coeff[7] *= -1.0;
+ }
+
+ EXPECT_NEAR(coeff[5], 0.8660254037844385, 1e-2);
+ EXPECT_NEAR(coeff[6], -0.4330127018922194, 1e-2);
+ EXPECT_NEAR(coeff[7], 0.25000000000000017, 1e-2);
}
+int
+main(int argc, char** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return (RUN_ALL_TESTS());
+}
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS search)
set(OPT_DEPS io)
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS segmentation)
set(OPT_DEPS) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
LINK_WITH pcl_gtest pcl_io pcl_segmentation pcl_features pcl_kdtree pcl_search pcl_common
ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/car6.pcd" "${PCL_SOURCE_DIR}/test/colored_cloud.pcd")
+PCL_ADD_TEST(a_prism_test test_concave_prism
+ FILES test_concave_prism.cpp
+ LINK_WITH pcl_gtest pcl_segmentation pcl_common)
+
PCL_ADD_TEST(test_non_linear test_non_linear
FILES test_non_linear.cpp
LINK_WITH pcl_gtest pcl_common pcl_io pcl_sample_consensus pcl_segmentation pcl_kdtree pcl_search
--- /dev/null
+#include <pcl/test/gtest.h>
+#include <pcl/point_types.h>
+#include <pcl/point_cloud.h>
+
+#include <pcl/segmentation/extract_polygonal_prism_data.h>
+
+#include <random>
+
+using namespace pcl;
+using std::vector;
+
+TEST(ExtractPolygonalPrism, two_rings)
+{
+ float rMin = 0.1, rMax = 0.25f;
+ float dx = 0.5f; // shift the rings from [0,0,0] to [+/-dx, 0, 0]
+
+ // prepare 2 rings
+ PointCloud<PointXYZ>::Ptr ring(new PointCloud<PointXYZ>);
+ { // use random
+ std::random_device rd;
+ std::mt19937 gen(rd());
+ std::uniform_real_distribution<float> radiusDist(rMin, rMax);
+ std::uniform_real_distribution<float> radianDist(-M_PI, M_PI);
+ std::uniform_real_distribution<float> zDist(-0.01f, 0.01f);
+ for (size_t i = 0; i < 1000; i++) {
+ float radius = radiusDist(gen);
+ float angle = radianDist(gen);
+ float z = zDist(gen);
+ PointXYZ point(cosf(angle) * radius, sinf(angle) * radius, z);
+ ring->push_back(point);
+ }
+ }
+
+ PointCloud<PointXYZ>::Ptr cloud(new PointCloud<PointXYZ>);
+ cloud->reserve(ring->size() * 2);
+ for (auto& point : ring->points) {
+ auto left = point;
+ auto right = point;
+ left.x -= dx;
+ right.x += dx;
+ cloud->push_back(left);
+ cloud->push_back(right);
+ }
+
+ // create hull
+ PointCloud<PointXYZ>::Ptr hullCloud(new PointCloud<PointXYZ>);
+ vector<Vertices> rings(4);
+ float radiuses[] = {rMin - 0.01f, rMax + 0.01f, rMin - 0.01f, rMax + 0.01f};
+ float centers[] = {-dx, -dx, +dx, +dx};
+ for (size_t i = 0; i < rings.size(); i++) {
+ auto r = radiuses[i];
+ auto xCenter = centers[i];
+ for (float a = -M_PI; a < M_PI; a += 0.05f) {
+ rings[i].vertices.push_back(hullCloud->size());
+ PointXYZ point(xCenter + r * cosf(a), r * sinf(a), 0);
+ hullCloud->push_back(point);
+ }
+ }
+
+ // add more points before using prism
+ size_t ringsPointCount = cloud->size();
+ cloud->push_back(PointXYZ(0, 0, 0));
+ for (float a = -M_PI; a < M_PI; a += 0.05f) {
+ float r = 4 * rMax;
+ PointXYZ point(r * cosf(a), r * sinf(a), 0);
+ cloud->push_back(point);
+ }
+
+ // do prism
+ PointIndices::Ptr inliers(new PointIndices);
+ ExtractPolygonalPrismData<PointXYZ> ex;
+ {
+ ex.setInputCloud(cloud);
+ ex.setInputPlanarHull(hullCloud);
+ ex.setHeightLimits(-1, 1);
+ ex.setPolygons(rings);
+ ex.segment(*inliers);
+ }
+
+ // check that all of the rings are in the prism.
+ EXPECT_EQ(inliers->indices.size(), ringsPointCount);
+ for(std::size_t i=0; i<inliers->indices.size(); ++i) {
+ EXPECT_EQ(inliers->indices[i], i);
+ }
+}
+
+int
+main(int argc, char** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return (RUN_ALL_TESTS());
+}
return (-1);
}
- // Tranpose the cloud
+ // Transpose the cloud
cloud_t_.reset(new PointCloud<PointXYZ>);
*cloud_t_ = *cloud_;
for (auto& point: *cloud_t_)
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS surface)
set(OPT_DEPS io features sample_consensus filters) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT (build AND BUILD_io AND BUILD_features))
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS visualization)
set(OPT_DEPS features) # module does not depend on these
-set(DEFAULT ON)
-set(build TRUE)
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
if(NOT build)
set(SUBSYS_DESC "Useful PCL-based command line tools")
set(SUBSYS_DEPS io)
set(SUBSYS_OPT_DEPS filters sample_consensus segmentation search kdtree features surface octree registration recognition geometry keypoints ml visualization vtk)
-set(DEFAULT ON)
-set(REASON "")
-PCL_SUBSYS_OPTION(build ${SUBSYS_NAME} ${SUBSYS_DESC} ${DEFAULT} ${REASON})
+PCL_SUBSYS_OPTION(build ${SUBSYS_NAME} ${SUBSYS_DESC} ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS ${SUBSYS_OPT_DEPS})
if(NOT build)
PCL_ADD_EXECUTABLE(pcl_mls_smoothing COMPONENT ${SUBSYS_NAME} SOURCES mls_smoothing.cpp)
target_link_libraries (pcl_mls_smoothing pcl_common pcl_io pcl_surface pcl_filters)
+ PCL_ADD_EXECUTABLE(pcl_bilateral_upsampling COMPONENT ${SUBSYS_NAME} SOURCES bilateral_upsampling.cpp)
+ target_link_libraries(pcl_bilateral_upsampling pcl_common pcl_io pcl_surface)
endif()
if(TARGET pcl_keypoints)
print_value ("%d", default_min); print_info (")\n");
print_info (" -max X = use a maximum of X points peer cluster (default: ");
print_value ("%d", default_max); print_info (")\n");
- print_info (" -tolerance X = the spacial distance between clusters (default: ");
+ print_info (" -tolerance X = the spatial distance between clusters (default: ");
print_value ("%lf", default_tolerance); print_info (")\n");
}
#include <vector>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/io/auto_io.h>
#include <pcl/io/obj_io.h>
#include <pcl/io/vtk_lib_io.h>
#include <pcl/memory.h> // for pcl::make_shared
-#include <boost/filesystem.hpp> // for boost::filesystem::path
#include <boost/algorithm/string.hpp> // for boost::algorithm::ends_with
#define ASCII 0
std::string output_file,
int output_type)
{
- if (boost::filesystem::path (output_file).extension () == ".pcd")
+ if (pcl_fs::path (output_file).extension () == ".pcd")
{
//TODO Support precision, origin, orientation
pcl::PCDWriter w;
return (false);
}
}
- else if (boost::filesystem::path (output_file).extension () == ".stl")
+ else if (pcl_fs::path (output_file).extension () == ".stl")
{
PCL_ERROR ("STL file format does not support point clouds! Aborting.\n");
return (false);
std::string output_file,
int output_type)
{
- if (boost::filesystem::path (output_file).extension () == ".obj")
+ if (pcl_fs::path (output_file).extension () == ".obj")
{
if (output_type == BINARY || output_type == BINARY_COMPRESSED)
PCL_WARN ("OBJ file format only supports ASCII.\n");
if (pcl::io::saveOBJFile (output_file, input) != 0)
return (false);
}
- else if (boost::filesystem::path (output_file).extension () == ".pcd")
+ else if (pcl_fs::path (output_file).extension () == ".pcd")
{
if (!input.polygons.empty ())
PCL_WARN ("PCD file format does not support meshes! Only points be saved.\n");
if (output_type == BINARY_COMPRESSED)
PCL_WARN ("PLY, STL and VTK file formats only supports ASCII and binary output file types.\n");
- if (input.polygons.empty() && boost::filesystem::path (output_file).extension () == ".stl")
+ if (input.polygons.empty() && pcl_fs::path (output_file).extension () == ".stl")
{
PCL_ERROR ("STL file format does not support point clouds! Aborting.\n");
return (false);
// Try to load as mesh
pcl::PolygonMesh mesh;
- if (boost::filesystem::path (argv[file_args[0]]).extension () != ".pcd" &&
+ if (pcl_fs::path (argv[file_args[0]]).extension () != ".pcd" &&
pcl::io::loadPolygonFile (argv[file_args[0]], mesh) != 0)
{
PCL_INFO ("Loaded a mesh with %d points (total size is %d) and the following channels:\n%s\n",
if (!saveMesh (mesh, argv[file_args[1]], output_type))
return (-1);
}
- else if (boost::filesystem::path (argv[file_args[0]]).extension () == ".stl")
+ else if (pcl_fs::path (argv[file_args[0]]).extension () == ".stl")
{
PCL_ERROR ("Unable to load %s.\n", argv[file_args[0]]);
return (-1);
else
{
// PCD, OBJ, PLY or VTK
- if (boost::filesystem::path (argv[file_args[0]]).extension () != ".pcd")
+ if (pcl_fs::path (argv[file_args[0]]).extension () != ".pcd")
PCL_WARN ("Could not load %s as a mesh, trying as a point cloud instead.\n", argv[file_args[0]]);
//Eigen::Vector4f origin; // TODO: Support origin/orientation
{
CloudPtr pc (new Cloud);
pcl::io::loadPCDFile (argv[pcd_indices[i]], *pc);
- clouds.push_back (CloudPair (argv[pcd_indices[i]], pc));
+ clouds.emplace_back(argv[pcd_indices[i]], pc);
std::cout << "loading file: " << argv[pcd_indices[i]] << " size: " << pc->size () << std::endl;
elch.addPointCloud (clouds[i].second);
}
#include <pcl/PCLPointCloud2.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/fast_bilateral.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, sigma_s, sigma_r);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_files[i]).filename().string();
+ std::string filename = pcl_fs::path(pcd_files[i]).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/grid_minimum.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, resolution);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_file).filename().string();
+ std::string filename = pcl_fs::path(pcd_file).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
*
*/
#include <pcl/io/image_grabber.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/io/pcd_io.h>
-#include <boost/filesystem.hpp> // for exists
using pcl::console::print_error;
using pcl::console::print_info;
pcl::console::parse_argument (argc, argv, "-out_dir", out_folder);
- if (out_folder.empty() || !boost::filesystem::exists (out_folder))
+ if (out_folder.empty() || !pcl_fs::exists (out_folder))
{
PCL_INFO("No correct directory was given with the -out_dir flag. Setting to current dir\n");
out_folder = "./";
else
PCL_INFO("Using %s as output dir", out_folder.c_str());
- if (!rgb_path.empty() && !depth_path.empty() && boost::filesystem::exists (rgb_path) && boost::filesystem::exists (depth_path))
+ if (!rgb_path.empty() && !depth_path.empty() && pcl_fs::exists (rgb_path) && pcl_fs::exists (depth_path))
{
grabber.reset (new pcl::ImageGrabber<pcl::PointXYZRGBA> (depth_path, rgb_path, frames_per_second, false));
}
*/
#include <pcl/io/image_grabber.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/visualization/cloud_viewer.h>
#include <mutex>
#include <thread>
-#include <boost/filesystem.hpp> // for exists
using namespace std::chrono_literals;
using pcl::console::print_error;
std::string path;
pcl::console::parse_argument (argc, argv, "-dir", path);
std::cout << "path: " << path << std::endl;
- if (!path.empty() && boost::filesystem::exists (path))
+ if (!path.empty() && pcl_fs::exists (path))
{
grabber.reset (new pcl::ImageGrabber<pcl::PointXYZRGBA> (path, frames_per_second, repeat, use_pclzf));
}
depth_image_viewer_.showFloatImage (img,
xyz.width, xyz.height,
std::numeric_limits<float>::min (),
- // Scale so that the colors look brigher on screen
+ // Scale so that the colors look brighter on screen
std::numeric_limits<float>::max () / 10,
true);
depth_image_viewer_.spin ();
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/local_maximum.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, radius);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_file).filename().string();
+ std::string filename = pcl_fs::path(pcd_file).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
{
CloudPtr pc (new Cloud);
pcl::io::loadPCDFile (argv[pcd_indices[i]], *pc);
- clouds.push_back (CloudPair (argv[pcd_indices[i]], pc));
+ clouds.emplace_back(argv[pcd_indices[i]], pc);
std::cout << "loading file: " << argv[pcd_indices[i]] << " size: " << pc->size () << std::endl;
lum.addPointCloud (clouds[i].second);
}
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/morphological_filter.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, resolution, method);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_file).filename().string();
+ std::string filename = pcl_fs::path(pcd_file).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
#include <pcl/io/pcd_io.h>
#include <pcl/features/normal_3d.h>
#include <pcl/features/integral_image_normal.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, k, radius);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_files[i]).filename().string();
+ std::string filename = pcl_fs::path(pcd_files[i]).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
if (pcl::getTime() - last >= 1.0) \
{ \
double now = pcl::getTime (); \
- std::cout << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
+ std::cout << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
count = 0; \
last = now; \
} \
++count; \
if (now - last >= 1.0) \
{ \
- std::cout << "Average framerate ("<< _WHAT_ << "): " << double (count)/double (now - last) << " Hz" << std::endl; \
+ std::cout << "Average framerate ("<< (_WHAT_) << "): " << double (count)/double (now - last) << " Hz" << std::endl; \
count = 0; \
last = now; \
} \
++count; \
if (now - last >= 1.0) \
{ \
- std::cerr << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz. Queue size: " << buff.getSize () << ", number of frames written so far: " << nr_frames_total << "\n"; \
+ std::cerr << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz. Queue size: " << (buff).getSize () << ", number of frames written so far: " << nr_frames_total << "\n"; \
count = 0; \
last = now; \
} \
++count; \
if (now - last >= 1.0) \
{ \
- std::cerr << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz. Queue size: " << buff.getSize () << "\n"; \
+ std::cerr << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz. Queue size: " << (buff).getSize () << "\n"; \
count = 0; \
last = now; \
} \
if (now - last >= 1.0) \
{ \
if (visualize && global_visualize) \
- std::cerr << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz. Queue size: " << buff1.getSize () << " (w) / " << buff2.getSize () << " (v)\n"; \
+ std::cerr << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz. Queue size: " << (buff1).getSize () << " (w) / " << (buff2).getSize () << " (v)\n"; \
else \
- std::cerr << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz. Queue size: " << buff1.getSize () << " (w)\n"; \
+ std::cerr << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz. Queue size: " << (buff1).getSize () << " (w)\n"; \
count = 0; \
last = now; \
} \
reinterpret_cast<const unsigned short*> (&frame->depth_image->getDepthMetaData ().Data ()[0]),
frame->depth_image->getWidth (), frame->depth_image->getHeight (),
std::numeric_limits<unsigned short>::min (),
- // Scale so that the colors look brigher on screen
+ // Scale so that the colors look brighter on screen
std::numeric_limits<unsigned short>::max () / 10,
true);
++count; \
if (now - last >= 1.0) \
{ \
- std::cerr << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz. Queue size: " << buff.getSize () << "\n"; \
+ std::cerr << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz. Queue size: " << (buff).getSize () << "\n"; \
count = 0; \
last = now; \
} \
++count; \
if (now - last >= 1.0) \
{ \
- std::cout << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
+ std::cout << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
count = 0; \
last = now; \
} \
++count; \
if (now - last >= 1.0) \
{ \
- std::cout << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
+ std::cout << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
count = 0; \
last = now; \
} \
++count; \
if (now - last >= 1.0) \
{ \
- std::cout << "Average framerate("<< _WHAT_ << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
+ std::cout << "Average framerate("<< (_WHAT_) << "): " << double(count)/double(now - last) << " Hz" << std::endl; \
count = 0; \
last = now; \
} \
#include <pcl/PCLPointCloud2.h>
#include <pcl/io/pcd_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/passthrough.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, field_name, min, max, inside, keep_organized);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_file).filename().string();
+ std::string filename = pcl_fs::path(pcd_file).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
*
*/
#include <pcl/io/pcd_grabber.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/image_viewer.h>
-#include <boost/filesystem.hpp> // for exists, extension, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
#include <mutex>
#include <thread>
std::string path;
pcl::console::parse_argument (argc, argv, "-file", path);
std::cout << "path: " << path << std::endl;
- if (!path.empty() && boost::filesystem::exists (path))
+ if (!path.empty() && pcl_fs::exists (path))
{
grabber.reset (new pcl::PCDGrabber<pcl::PointXYZRGBA> (path, frames_per_second, repeat));
}
std::vector<std::string> pcd_files;
pcl::console::parse_argument (argc, argv, "-dir", path);
std::cout << "path: " << path << std::endl;
- if (!path.empty() && boost::filesystem::exists (path))
+ if (!path.empty() && pcl_fs::exists (path))
{
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (path); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (path); itr != end_itr; ++itr)
{
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
{
if (useEDLRendering)
p->enableEDLRendering();
- // Set whether or not we should be using the vtkVertexBufferObjectMapper
- p->setUseVbos (use_vbos);
-
if (!p->cameraParamsSet () && !p->cameraFileLoaded ())
{
Eigen::Matrix3f rotation;
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/segmentation/approximate_progressive_morphological_filter.h>
#include <pcl/segmentation/progressive_morphological_filter.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, max_window_size, slope, max_distance, initial_distance, cell_size, base, exponential, approximate);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_file).filename().string();
+ std::string filename = pcl_fs::path(pcd_file).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/conditional_removal.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
-#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
+#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using PointType = pcl::PointXYZ;
using Cloud = pcl::PointCloud<PointType>;
compute (cloud, output, radius, inside, keep_organized);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_file).filename().string();
+ std::string filename = pcl_fs::path(pcd_file).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
#include <pcl/sample_consensus/ransac.h>
#include <pcl/sample_consensus/sac_model_plane.h>
#include <pcl/segmentation/extract_clusters.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
+
#include <boost/algorithm/string/case_conv.hpp> // for to_upper_copy
using namespace pcl;
compute (cloud, output, max_it, thresh, negative);
// Prepare output file name
- std::string filename = boost::filesystem::path(pcd_file).filename().string();
+ std::string filename = pcl_fs::path(pcd_file).filename().string();
// Save into the second file
const std::string filepath = output_dir + '/' + filename;
}
else
{
- if (!input_dir.empty() && boost::filesystem::exists (input_dir))
+ if (!input_dir.empty() && pcl_fs::exists (input_dir))
{
std::vector<std::string> pcd_files;
- boost::filesystem::directory_iterator end_itr;
- for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
+ pcl_fs::directory_iterator end_itr;
+ for (pcl_fs::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
// Only add PCD files
if (!is_directory (itr->status ()) && boost::algorithm::to_upper_copy (itr->path ().extension ().string ()) == ".PCD" )
**/
#include <iostream>
-#include <boost/filesystem.hpp>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/io/pcd_io.h>
PCL_INFO ("Creating RGB Tiff List\n");
std::vector<std::string> tiff_rgb_files;
- std::vector<boost::filesystem::path> tiff_rgb_paths;
- boost::filesystem::directory_iterator end_itr;
+ std::vector<pcl_fs::path> tiff_rgb_paths;
+ pcl_fs::directory_iterator end_itr;
- if(boost::filesystem::is_directory(rgb_path_))
+ if(pcl_fs::is_directory(rgb_path_))
{
- for (boost::filesystem::directory_iterator itr(rgb_path_); itr != end_itr; ++itr)
+ for (pcl_fs::directory_iterator itr(rgb_path_); itr != end_itr; ++itr)
{
std::string ext = itr->path().extension().string();
if(ext == ".tiff")
PCL_INFO ("Creating Depth Tiff List\n");
std::vector<std::string> tiff_depth_files;
- std::vector<boost::filesystem::path> tiff_depth_paths;
+ std::vector<pcl_fs::path> tiff_depth_paths;
- if(boost::filesystem::is_directory(depth_path_))
+ if(pcl_fs::is_directory(depth_path_))
{
- for (boost::filesystem::directory_iterator itr(depth_path_); itr != end_itr; ++itr)
+ for (pcl_fs::directory_iterator itr(depth_path_); itr != end_itr; ++itr)
{
std::string ext = itr->path().extension().string();
if(ext == ".tiff")
#define MULTIPLY(CASE_LABEL) \
case CASE_LABEL: { \
for (int i = 0; i < 3; ++i) \
- multiply<pcl::traits::asType_t<CASE_LABEL>>( \
+ multiply<pcl::traits::asType_t<(CASE_LABEL)>>( \
cloud, xyz_offset[i], multiplier[i]); \
break; \
}
const float& y = values[1];
const float& z = values[2];
const float& w = values[3];
- tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::Quaternionf (w, x, y, z));
+ tform.topLeftCorner<3, 3> () = Eigen::Matrix3f (Eigen::Quaternionf (w, x, y, z));
}
else
{
const float& ay = values[1];
const float& az = values[2];
const float& theta = values[3];
- tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::AngleAxisf (theta, Eigen::Vector3f (ax, ay, az)));
+ tform.topLeftCorner<3, 3> () = Eigen::Matrix3f (Eigen::AngleAxisf (theta, Eigen::Vector3f (ax, ay, az)));
}
else
{
#include <pcl/io/pcd_io.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/filters/filter.h> // for removeNaNFromPointCloud
#include <pcl/segmentation/unary_classifier.h>
-#include <boost/filesystem.hpp> // for path, exists, ...
using namespace pcl;
using namespace pcl::io;
bool
loadTrainedFeatures (std::vector<FeatureT::Ptr> &out,
- const boost::filesystem::path &base_dir)
+ const pcl_fs::path &base_dir)
{
- if (!boost::filesystem::exists (base_dir) && !boost::filesystem::is_directory (base_dir))
+ if (!pcl_fs::exists (base_dir) && !pcl_fs::is_directory (base_dir))
return false;
- for (boost::filesystem::directory_iterator it (base_dir); it != boost::filesystem::directory_iterator (); ++it)
+ for (pcl_fs::directory_iterator it (base_dir); it != pcl_fs::directory_iterator (); ++it)
{
- if (!boost::filesystem::is_directory (it->status ()) &&
+ if (!pcl_fs::is_directory (it->status ()) &&
it->path ().extension ().string () == ".pcd")
{
const std::string path = it->path ().string ();
#include <pcl/PCLPointCloud2.h>
#include <pcl/io/auto_io.h>
#include <pcl/filters/uniform_sampling.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-#include <boost/filesystem.hpp>
+
#include <algorithm>
#include <string>
PCDWriter w_pcd;
PLYWriter w_ply;
- std::string output_ext = boost::filesystem::path (filename).extension ().string ();
+ std::string output_ext = pcl_fs::path (filename).extension ().string ();
std::transform (output_ext.begin (), output_ext.end (), output_ext.begin (), ::tolower);
if (output_ext == ".pcd")
#include <pcl/filters/voxel_grid.h>
#include <pcl/memory.h> // for pcl::make_shared
#include <pcl/point_types.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/console/parse.h>
#include <vtkGeneralTransform.h>
#include <vtkMath.h>
#include <boost/algorithm/string.hpp> // for boost::is_any_of, boost::split, boost::token_compress_on, boost::trim
-#include <boost/filesystem.hpp> // for boost::filesystem::create_directories, boost::filesystem::exists, boost::filesystem::path, boost::filesystem::path::extension
using namespace pcl;
vtkPolyData*
loadDataSet (const char* file_name)
{
- std::string extension = boost::filesystem::path (file_name).extension ().string ();
+ std::string extension = pcl_fs::path (file_name).extension ().string ();
if (extension == ".ply")
{
vtkPLYReader* reader = vtkPLYReader::New ();
const std::string output_dir = st.at (st.size () - 1) + "_output";
- boost::filesystem::path outpath (output_dir);
- if (!boost::filesystem::exists (outpath))
+ pcl_fs::path outpath (output_dir);
+ if (!pcl_fs::exists (outpath))
{
- if (!boost::filesystem::create_directories (outpath))
+ if (!pcl_fs::create_directories (outpath))
{
PCL_ERROR ("Error creating directory %s.\n", output_dir.c_str ());
return (-1);
set(SUBSYS_DESC "Point cloud tracking library")
set(SUBSYS_DEPS common search kdtree filters octree)
-set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} OPT_DEPS OpenMP)
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
-include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY(${LIB_NAME} COMPONENT ${SUBSYS_NAME} SOURCES ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}" pcl_common pcl_kdtree pcl_search pcl_filters pcl_octree)
PCL_MAKE_PKGCONFIG(${LIB_NAME} COMPONENT ${SUBSYS_NAME} DESC ${SUBSYS_DESC} PCL_DEPS ${SUBSYS_DEPS})
* \ingroup tracking
*/
template <typename PointInT>
-class NearestPairPointCloudCoherence : public PointCloudCoherence<PointInT> {
+class PCL_EXPORTS NearestPairPointCloudCoherence
+: public PointCloudCoherence<PointInT> {
public:
using PointCloudCoherence<PointInT>::getClassName;
using PointCloudCoherence<PointInT>::coherence_name_;
/** \brief Resampling phase of particle filter method. Sampling the particles
* according to the weights calculated in weight method. In particular,
- * "sample with replacement" is archieved by walker's alias method.
+ * "sample with replacement" is achieved by walker's alias method.
*/
virtual void
resample();
/** Pyramidal Kanade Lucas Tomasi tracker.
* This is an implementation of the Pyramidal Kanade Lucas Tomasi tracker that
* operates on organized 3D keypoints with color/intensity information (this is
- * the default behaviour but you can alterate it by providing another operator
+ * the default behaviour but you can alternate it by providing another operator
* as second template argument). It is an affine tracker that iteratively
* computes the optical flow to find the best guess for a point p at t given its
* location at t-1. User is advised to respect the Tomasi condition: the
return (keypoints_);
};
- /** \return the status of points to track.
- * Status == 0 --> points successfully tracked;
- * Status < 0 --> point is lost;
- * Status == -1 --> point is out of bond;
- * Status == -2 --> optical flow can not be computed for this point.
- */
- PCL_DEPRECATED(1, 15, "use getStatusOfPointsToTrack instead")
- inline pcl::PointIndicesConstPtr
- getPointsToTrackStatus() const
- {
- pcl::PointIndicesPtr res(new pcl::PointIndices);
- res->indices.insert(
- res->indices.end(), keypoints_status_->begin(), keypoints_status_->end());
- return (res);
- }
-
/** \return the status of points to track.
* Status == 0 --> points successfully tracked;
* Status < 0 --> point is lost;
#include <pcl/point_types.h>
// clang-format off
-PCL_INSTANTIATE(ApproxNearestPairPointCloudCoherence, PCL_XYZ_POINT_TYPES)
PCL_INSTANTIATE(DistanceCoherence, PCL_XYZ_POINT_TYPES)
PCL_INSTANTIATE(HSVColorCoherence,
(pcl::PointXYZRGB)
(pcl::PointXYZRGBNormal)
(pcl::PointXYZRGBA))
PCL_INSTANTIATE(NearestPairPointCloudCoherence, PCL_XYZ_POINT_TYPES)
+// NearestPairPointCloudCoherence is the parent class of ApproxNearestPairPointCloudCoherence.
+// They must be instantiated in this order, otherwise visibility attributes of the former are not applied correctly.
+PCL_INSTANTIATE(ApproxNearestPairPointCloudCoherence, PCL_XYZ_POINT_TYPES)
PCL_INSTANTIATE(NormalCoherence, PCL_NORMAL_POINT_TYPES)
// clang-format on
#endif // PCL_NO_PRECOMPILE
set(SUBSYS_DESC "Point cloud visualization library")
set(SUBSYS_DEPS common io kdtree geometry search octree)
-if(NOT VTK_FOUND)
- set(DEFAULT FALSE)
- set(REASON "VTK was not found.")
-else()
- set(DEFAULT TRUE)
- set(REASON)
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
-endif()
-
-PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
PCL_SUBSYS_DEPEND(build NAME ${SUBSYS_NAME} DEPS ${SUBSYS_DEPS} EXT_DEPS vtk OPT_DEPS openni openni2 ensenso davidSDK dssdk rssdk)
if(ANDROID)
- set(build FALSE)
message("VTK was found, but cannot be compiled for Android. Please use VES instead.")
+ return()
endif()
if(OPENGL_FOUND)
- if(OPENGL_INCLUDE_DIR)
- include_directories(SYSTEM "${OPENGL_INCLUDE_DIR}")
- endif()
if(OPENGL_DEFINITIONS)
add_definitions("${OPENGL_DEFINITIONS}")
endif()
endif()
set(incs
- "include/pcl/${SUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/cloud_viewer.h"
"include/pcl/${SUBSYS_NAME}/histogram_visualizer.h"
"include/pcl/${SUBSYS_NAME}/mouse_event.h"
"include/pcl/${SUBSYS_NAME}/window.h"
"include/pcl/${SUBSYS_NAME}/range_image_visualizer.h"
- "include/pcl/${SUBSYS_NAME}/vtk.h"
"include/pcl/${SUBSYS_NAME}/simple_buffer_visualizer.h"
"include/pcl/${SUBSYS_NAME}/pcl_plotter.h"
"include/pcl/${SUBSYS_NAME}/qvtk_compatibility.h"
target_link_libraries("${LIB_NAME}" "-framework Cocoa")
endif()
-target_link_libraries("${LIB_NAME}" pcl_common pcl_io pcl_kdtree ${OPENGL_LIBRARIES})
+target_link_libraries("${LIB_NAME}" pcl_common pcl_io pcl_kdtree pcl_geometry pcl_search ${OPENGL_LIBRARIES})
if(${VTK_VERSION} VERSION_GREATER_EQUAL 9.0)
#Some libs are referenced through depending on IO
getCloudNames () const
{
std::vector<std::string> names;
+ names.reserve(cloud_indices_.size());
+
for (const auto& i : cloud_indices_)
names.push_back (i.first);
return names;
#pragma once
+#include <pcl/pcl_exports.h>
+
#include <map>
#include <string>
{
namespace visualization
{
- class RenWinInteract
+ class PCL_EXPORTS RenWinInteract
{
public:
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: lmeds.h 1370 2011-06-19 01:06:01Z jspricke $
- *
- */
-
-#pragma once
-
-#if defined __GNUC__
-# pragma GCC system_header
-#endif
-PCL_DEPRECATED_HEADER(1, 15, "Please include the needed eigen headers directly.")
-
-#include <Eigen/Geometry>
-#include <Eigen/Dense>
for (vtkIdType i = 0; i < nr_points; ++i)
{
// Check if the point is invalid
- if (!std::isfinite ((*cloud)[i].x) ||
- !std::isfinite ((*cloud)[i].y) ||
- !std::isfinite ((*cloud)[i].z))
+ if (!pcl::isXYZFinite((*cloud)[i]))
continue;
std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
const typename pcl::PointCloud<PointT>::ConstPtr &cloud,
const std::string &id, int viewport)
{
- return (!addPolygon<PointT> (cloud, 0.5, 0.5, 0.5, id, viewport));
+ return (addPolygon<PointT> (cloud, 0.5, 0.5, 0.5, id, viewport));
}
////////////////////////////////////////////////////////////////////////////////////////////
template <typename P1, typename P2> bool
pcl::visualization::PCLVisualizer::addLine (const P1 &pt1, const P2 &pt2, const std::string &id, int viewport)
{
- return (!addLine (pt1, pt2, 0.5, 0.5, 0.5, id, viewport));
+ return (addLine (pt1, pt2, 0.5, 0.5, 0.5, id, viewport));
}
////////////////////////////////////////////////////////////////////////////////////////////
lines->InsertCellPoint (2 * cell_count + 1);
cell_count ++;
}
+ nr_normals = cell_count;
}
else
{
nr_normals = (cloud->size () - 1) / level + 1 ;
pts = new float[2 * nr_normals * 3];
- for (vtkIdType i = 0, j = 0; (j < nr_normals) && (i < static_cast<vtkIdType>(cloud->size())); i += level)
+ vtkIdType j = 0;
+ for (vtkIdType i = 0; (j < nr_normals) && (i < static_cast<vtkIdType>(cloud->size())); i += level)
{
if (!pcl::isFinite((*cloud)[i]) || !pcl::isNormalFinite((*normals)[i]))
continue;
lines->InsertCellPoint (2 * j + 1);
++j;
}
+ nr_normals = j;
}
data->SetArray (&pts[0], 2 * nr_normals * 3, 0, vtkFloatArray::VTK_DATA_ARRAY_DELETE);
Eigen::Affine3f source_transformation;
source_transformation.linear () = source_points->sensor_orientation_.matrix ();
- source_transformation.translation () = source_points->sensor_origin_.head (3);
+ source_transformation.translation () = source_points->sensor_origin_.template head<3> ();
Eigen::Affine3f target_transformation;
target_transformation.linear () = target_points->sensor_orientation_.matrix ();
- target_transformation.translation () = target_points->sensor_origin_.head (3);
+ target_transformation.translation () = target_points->sensor_origin_.template head<3> ();
int j = 0;
// Draw lines between the best corresponding points
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite ((*cloud_)[cp].x) ||
- !std::isfinite ((*cloud_)[cp].y) ||
- !std::isfinite ((*cloud_)[cp].z))
+ if (!pcl::isXYZFinite((*cloud_)[cp]))
continue;
memcpy (&rgb, (reinterpret_cast<const char *> (&(*cloud_)[cp])) + rgba_offset, sizeof (pcl::RGB));
colors[j ] = rgb.r;
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite ((*cloud_)[cp].x) ||
- !std::isfinite ((*cloud_)[cp].y) ||
- !std::isfinite ((*cloud_)[cp].z))
+ if (!pcl::isXYZFinite((*cloud_)[cp]))
continue;
///@todo do this with the point_types_conversion in common, first template it!
{
PointCloudColorHandler<PointT>::setInputCloud (cloud);
field_idx_ = pcl::getFieldIndex<PointT> (field_name_, fields_);
- if (field_idx_ != -1)
- capable_ = true;
- else
+ if (field_idx_ == -1) {
capable_ = false;
+ return;
+ }
+ if (fields_[field_idx_].datatype != pcl::PCLPointField::PointFieldTypes::FLOAT32) {
+ capable_ = false;
+ PCL_ERROR("[pcl::PointCloudColorHandlerGenericField::setInputCloud] This currently only works with float32 fields, but field %s has a different type.\n", field_name_.c_str());
+ return;
+ }
+ capable_ = true;
}
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite ((*cloud_)[cp].x) || !std::isfinite ((*cloud_)[cp].y) || !std::isfinite ((*cloud_)[cp].z))
+ if (!pcl::isXYZFinite((*cloud_)[cp]))
continue;
const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud_)[cp]);
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite ((*cloud_)[cp].x) ||
- !std::isfinite ((*cloud_)[cp].y) ||
- !std::isfinite ((*cloud_)[cp].z))
+ if (!pcl::isXYZFinite((*cloud_)[cp]))
continue;
colors[j ] = (*cloud_)[cp].r;
* buffer objects by default, transparently for the user.
* \param[in] use_vbos set to true to use VBOs
*/
+ PCL_DEPRECATED(1, 18, "this function has no effect")
inline void
setUseVbos (const bool use_vbos) { use_vbos_ = use_vbos; }
PCLVisualizer (const std::string &name = "", const bool create_interactor = true);
/** \brief PCL Visualizer constructor. It looks through the passed argv arguments to find the "-cam *.cam" argument.
- * If the search failed, the name for cam file is calculated with boost uuid. If there is no such file, camera is not initilalized.
+ * If the search failed, the name for cam file is calculated with boost uuid. If there is no such file, camera is not initialized.
* \param[in] argc
* \param[in] argv
* \param[in] name the window name (empty by default)
std::string
getCameraFile () const;
- /** \brief Update camera parameters and render. */
- PCL_DEPRECATED(1,15,"updateCamera will be removed, as it does nothing.")
- inline void
- updateCamera () {};
-
/** \brief Reset camera parameters and render. */
void
resetCamera ();
* buffer objects by default, transparently for the user.
* \param[in] use_vbos set to true to use VBOs
*/
+ PCL_DEPRECATED(1, 18, "this function has no effect")
void
setUseVbos (bool use_vbos);
*
* All rights reserved
*/
+#pragma once
+
#include <pcl/pcl_macros.h>
#include <pcl/pcl_config.h>
+++ /dev/null
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2020-, Open Perception
- *
- * All rights reserved
- */
-
-PCL_DEPRECATED_HEADER(1, 15, "Use required vtk includes instead.")
#pragma once
#include <vtkRenderWindowInteractor.h>
+#include <pcl/pcl_exports.h>
-vtkRenderWindowInteractor* vtkRenderWindowInteractorFixNew ();
+PCL_EXPORTS vtkRenderWindowInteractor* vtkRenderWindowInteractorFixNew ();
#include <fstream>
#include <list>
#include <pcl/common/angles.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/visualization/common/io.h>
#include <pcl/visualization/interactor_style.h>
#include <vtkVersion.h>
#include <boost/algorithm/string/classification.hpp> // for is_any_of
#include <boost/algorithm/string/split.hpp> // for split
-#include <boost/filesystem.hpp> // for exists
#define ORIENT_MODE 0
#define SELECT_MODE 1
Eigen::Vector3f pos_vec = extrinsics.block<3, 1> (0, 3);
// Rotate the view vector
- Eigen::Matrix3f rotation = extrinsics.block<3, 3> (0, 0);
+ Eigen::Matrix3f rotation = extrinsics.topLeftCorner<3, 3> ();
Eigen::Vector3f y_axis (0.f, 1.f, 0.f);
Eigen::Vector3f up_vec (rotation * y_axis);
}
else
{
- if (boost::filesystem::exists (camera_file_))
+ if (pcl_fs::exists (camera_file_))
{
if (loadCameraParameters (camera_file_))
{
#endif
#include <pcl/common/time.h>
+#include <pcl/common/pcl_filesystem.h>
#include <pcl/visualization/common/shapes.h>
#include <pcl/visualization/pcl_visualizer.h>
#else
#include <boost/uuid/sha1.hpp>
#endif
-#include <boost/filesystem.hpp>
+
#include <boost/algorithm/string.hpp> // for split
#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/console/parse.h>
style_->setCloudActorMap (cloud_actor_map_);
style_->setShapeActorMap (shape_actor_map_);
style_->UseTimersOn ();
- style_->setUseVbos (use_vbos_);
}
/////////////////////////////////////////////////////////////////////////////////////////////
std::string camera_file = getUniqueCameraFile (argc, argv);
if (!camera_file.empty ())
{
- if (boost::filesystem::exists (camera_file) && style_->loadCameraParameters (camera_file))
+ if (pcl_fs::exists (camera_file) && style_->loadCameraParameters (camera_file))
{
camera_file_loaded_ = true;
}
}
// Get the actor pointer
vtkActor* actor = vtkActor::SafeDownCast (am_it->second);
- if (!actor)
+ if (!actor && property != PCL_VISUALIZER_FONT_SIZE) // vtkTextActor is not a subclass of vtkActor
return (false);
switch (property)
pcl::visualization::PCLVisualizer::resetCameraViewpoint (const std::string &id)
{
vtkSmartPointer<vtkMatrix4x4> camera_pose;
- const CloudActorMap::iterator it = cloud_actor_map_->find(id);
+ const auto it = cloud_actor_map_->find(id);
if (it != cloud_actor_map_->end ())
camera_pose = it->second.viewpoint_transformation_;
else
trans_view (x, y) = static_cast<float> (view_transform->GetElement (x, y));
//NOTE: vtk view coordinate system is different than the standard camera coordinates (z forward, y down, x right)
- //thus, the fliping in y and z
+ //thus, the flipping in y and z
for (auto &point : cloud->points)
{
point.getVector4fMap () = trans_view * point.getVector4fMap ();
transOCtoCC->Concatenate (cam_tmp->GetViewTransformMatrix ());
//NOTE: vtk view coordinate system is different than the standard camera coordinates (z forward, y down, x right)
- //thus, the fliping in y and z
+ //thus, the flipping in y and z
vtkSmartPointer<vtkMatrix4x4> cameraSTD = vtkSmartPointer<vtkMatrix4x4>::New ();
cameraSTD->Identity ();
cameraSTD->SetElement (0, 0, 1);
float w3 = 1.0f / world_coords[3];
world_coords[0] *= w3;
- // vtk view coordinate system is different than the standard camera coordinates (z forward, y down, x right), thus, the fliping in y and z
+ // vtk view coordinate system is different than the standard camera coordinates (z forward, y down, x right), thus, the flipping in y and z
world_coords[1] *= -w3;
world_coords[2] *= -w3;
Eigen::Matrix4f &transformation)
{
transformation.setIdentity ();
- transformation.block<3, 3> (0, 0) = orientation.toRotationMatrix ();
- transformation.block<3, 1> (0, 3) = origin.head (3);
+ transformation.topLeftCorner<3, 3> () = orientation.toRotationMatrix ();
+ transformation.block<3, 1> (0, 3) = origin.head<3> ();
}
//////////////////////////////////////////////////////////////////////////////////////////////
pcl::visualization::PCLVisualizer::getGeometryHandlerIndex (const std::string &id)
{
auto am_it = style_->getCloudActorMap ()->find (id);
- if (am_it != cloud_actor_map_->end ())
+ if (am_it == cloud_actor_map_->end ())
return (-1);
return (am_it->second.geometry_handler_index_);
return (-1);
}
- boost::filesystem::path full_path (tex_mat.tex_file.c_str ());
- if (!boost::filesystem::exists (full_path))
+ pcl_fs::path full_path (tex_mat.tex_file.c_str ());
+ if (!pcl_fs::exists (full_path))
{
- boost::filesystem::path parent_dir = full_path.parent_path ();
+ pcl_fs::path parent_dir = full_path.parent_path ();
std::string upper_filename = tex_mat.tex_file;
boost::to_upper (upper_filename);
std::string real_name;
try
{
- if (!boost::filesystem::exists (parent_dir))
+ if (!pcl_fs::exists (parent_dir))
{
PCL_WARN ("[PCLVisualizer::textureFromTexMaterial] Parent directory '%s' doesn't exist!\n",
parent_dir.string ().c_str ());
return (-1);
}
- if (!boost::filesystem::is_directory (parent_dir))
+ if (!pcl_fs::is_directory (parent_dir))
{
PCL_WARN ("[PCLVisualizer::textureFromTexMaterial] Parent '%s' is not a directory !\n",
parent_dir.string ().c_str ());
return (-1);
}
- using paths_vector = std::vector<boost::filesystem::path>;
+ using paths_vector = std::vector<pcl_fs::path>;
paths_vector paths;
- std::copy (boost::filesystem::directory_iterator (parent_dir),
- boost::filesystem::directory_iterator (),
+ std::copy (pcl_fs::directory_iterator (parent_dir),
+ pcl_fs::directory_iterator (),
back_inserter (paths));
for (const auto& path : paths)
{
- if (boost::filesystem::is_regular_file (path))
+ if (pcl_fs::is_regular_file (path))
{
std::string name = path.string ();
boost::to_upper (name);
return (-1);
}
}
- catch (const boost::filesystem::filesystem_error& ex)
+ catch (const pcl_fs::filesystem_error& ex)
{
PCL_WARN ("[PCLVisualizer::textureFromTexMaterial] Error %s when looking for file %s\n!",
// Calculate sha1 using canonical paths
for (const int &p_file_index : p_file_indices)
{
- boost::filesystem::path path (argv[p_file_index]);
- if (boost::filesystem::exists (path))
+ pcl_fs::path path (argv[p_file_index]);
+ if (pcl_fs::exists (path))
{
- path = boost::filesystem::canonical (path);
+ path = pcl_fs::canonical (path);
const auto pathStr = path.string ();
sha1.process_bytes (pathStr.c_str(), pathStr.size());
valid = true;
// Build camera filename
if (valid)
{
- unsigned int digest[5];
+ boost::uuids::detail::sha1::digest_type digest;
sha1.get_digest (digest);
sstream << ".";
- sstream << std::hex << digest[0] << digest[1] << digest[2] << digest[3] << digest[4];
+ for (int i = 0; i < static_cast<int>(sizeof(digest)/sizeof(unsigned int)); ++i) {
+ sstream << std::hex << *(reinterpret_cast<unsigned int*>(&digest[0]) + i);
+ }
sstream << ".cam";
}
}
{
field_idx_ = pcl::getFieldIndex (*cloud, field_name);
capable_ = field_idx_ != -1;
+ if (field_idx_ != -1 && cloud_->fields[field_idx_].datatype != pcl::PCLPointField::PointFieldTypes::FLOAT32) {
+ capable_ = false;
+ PCL_ERROR("[pcl::PointCloudColorHandlerGenericField] This currently only works with float32 fields, but field %s has a different type.\n", field_name.c_str());
+ }
}
///////////////////////////////////////////////////////////////////////////////////////////
field_idx_ = pcl::getFieldIndex (*cloud, "label");
capable_ = field_idx_ != -1;
static_mapping_ = static_mapping;
+ if (field_idx_ != -1 && cloud_->fields[field_idx_].datatype != pcl::PCLPointField::PointFieldTypes::UINT32) {
+ capable_ = false;
+ PCL_ERROR("[pcl::PointCloudColorHandlerLabelField] This currently only works with uint32 fields, but label field has a different type.\n");
+ }
}
///////////////////////////////////////////////////////////////////////////////////////////