--- /dev/null
+:warning: This is a issue tracker, please use our mailing list for questions: www.pcl-users.org. :warning:
+
+<!--- Provide a general summary of the issue in the Title above -->
+
+## Your Environment
+<!--- Include as many relevant details about the environment you experienced the bug in -->
+* Operating System and version:
+* Compiler:
+* PCL Version:
+
+## Expected Behavior
+<!--- If you're describing a bug, tell us what should happen -->
+<!--- If you're suggesting a change/improvement, tell us how it should work -->
+
+## Current Behavior
+<!--- If describing a bug, tell us what happens instead of the expected behavior -->
+<!--- If suggesting a change/improvement, explain the difference from current behavior -->
+
+## Possible Solution
+<!--- Not obligatory, but suggest a fix/reason for the bug, -->
+<!--- or ideas how to implement the addition or change -->
+
+## Code to Reproduce
+<!--- Provide a link to a live example, or an unambiguous set of steps to -->
+<!--- reproduce this bug. Include code to reproduce, if relevant -->
+
+## Context
+<!--- How has this issue affected you? What are you trying to accomplish? -->
+<!--- Providing context helps us come up with a solution that is most useful in the real world -->
+
CMAKE_C_FLAGS="-Wall -Wextra -Wabi -O2"
CMAKE_CXX_FLAGS="-Wall -Wextra -Wabi -O2"
-DOWNLOAD_DIR=$HOME/download
+if [ "$TRAVIS_OS_NAME" == "linux" ]; then
+ if [ "$CC" == "clang" ]; then
+ CMAKE_C_FLAGS="$CMAKE_C_FLAGS -Qunused-arguments"
+ CMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS -Qunused-arguments"
+ fi
+fi
-export FLANN_ROOT=$HOME/flann
-export VTK_DIR=$HOME/vtk
-export QHULL_ROOT=$HOME/qhull
-export DOXYGEN_DIR=$HOME/doxygen
+function before_install ()
+{
+ if [ "$TRAVIS_OS_NAME" == "linux" ]; then
+ if [ "$CC" == "clang" ]; then
+ sudo ln -s ../../bin/ccache /usr/lib/ccache/clang
+ sudo ln -s ../../bin/ccache /usr/lib/ccache/clang++
+ fi
+ fi
+}
function build ()
{
case $CC in
- clang ) build_clang;;
- gcc ) build_gcc;;
+ clang ) build_lib;;
+ gcc ) build_lib_core;;
esac
}
-function build_clang ()
+function build_lib ()
{
# A complete build
# Configure
mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS=$CMAKE_C_FLAGS -DCMAKE_CXX_FLAGS=$CMAKE_CXX_FLAGS \
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
-DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_QT_VERSION=4 \
+ -DBUILD_simulation=ON \
-DBUILD_global_tests=OFF \
+ -DBUILD_examples=OFF \
+ -DBUILD_tools=OFF \
+ -DBUILD_apps=OFF \
+ -DBUILD_apps_3d_rec_framework=OFF \
+ -DBUILD_apps_cloud_composer=OFF \
+ -DBUILD_apps_in_hand_scanner=OFF \
+ -DBUILD_apps_modeler=OFF \
+ -DBUILD_apps_optronic_viewer=OFF \
+ -DBUILD_apps_point_cloud_editor=OFF \
$PCL_DIR
# Build
make -j2
}
-function build_gcc ()
+function build_examples ()
+{
+ # A complete build
+ # Configure
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_QT_VERSION=4 \
+ -DBUILD_simulation=ON \
+ -DBUILD_global_tests=OFF \
+ -DBUILD_examples=ON \
+ -DBUILD_tools=OFF \
+ -DBUILD_apps=OFF \
+ $PCL_DIR
+ # Build
+ make -j2
+}
+
+function build_tools ()
+{
+ # A complete build
+ # Configure
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_QT_VERSION=4 \
+ -DBUILD_simulation=ON \
+ -DBUILD_global_tests=OFF \
+ -DBUILD_examples=OFF \
+ -DBUILD_tools=ON \
+ -DBUILD_apps=OFF \
+ $PCL_DIR
+ # Build
+ make -j2
+}
+
+function build_apps ()
+{
+ # A complete build
+ # Configure
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_QT_VERSION=4 \
+ -DBUILD_simulation=OFF \
+ -DBUILD_outofcore=OFF \
+ -DBUILD_people=OFF \
+ -DBUILD_global_tests=OFF \
+ -DBUILD_examples=OFF \
+ -DBUILD_tools=OFF \
+ -DBUILD_apps=ON \
+ -DBUILD_apps_3d_rec_framework=ON \
+ -DBUILD_apps_cloud_composer=ON \
+ -DBUILD_apps_in_hand_scanner=ON \
+ -DBUILD_apps_modeler=ON \
+ -DBUILD_apps_optronic_viewer=OFF \
+ -DBUILD_apps_point_cloud_editor=ON \
+ $PCL_DIR
+ # Build
+ make -j2
+}
+
+function build_lib_core ()
{
# A reduced build, only pcl_common
# Configure
mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS=$CMAKE_C_FLAGS -DCMAKE_CXX_FLAGS=$CMAKE_CXX_FLAGS \
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
-DPCL_ONLY_CORE_POINT_TYPES=ON \
-DBUILD_2d=OFF \
-DBUILD_features=OFF \
make -j2
}
-function test ()
+function test_core ()
{
# Configure
mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS=$CMAKE_C_FLAGS \
- -DCMAKE_CXX_FLAGS=$CMAKE_CXX_FLAGS \
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
-DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_NO_PRECOMPILE=ON \
+ -DBUILD_tools=OFF \
+ -DBUILD_examples=OFF \
+ -DBUILD_apps=OFF \
+ -DBUILD_2d=ON \
+ -DBUILD_features=ON \
+ -DBUILD_filters=ON \
+ -DBUILD_geometry=ON \
+ -DBUILD_io=ON \
+ -DBUILD_kdtree=ON \
+ -DBUILD_keypoints=ON \
+ -DBUILD_ml=OFF \
+ -DBUILD_octree=ON \
+ -DBUILD_outofcore=OFF \
+ -DBUILD_people=OFF \
+ -DBUILD_recognition=OFF \
+ -DBUILD_registration=OFF \
+ -DBUILD_sample_consensus=ON \
+ -DBUILD_search=ON \
+ -DBUILD_segmentation=OFF \
+ -DBUILD_simulation=OFF \
+ -DBUILD_stereo=OFF \
+ -DBUILD_surface=OFF \
+ -DBUILD_tracking=OFF \
+ -DBUILD_visualization=OFF \
-DBUILD_global_tests=ON \
+ -DBUILD_tests_2d=ON \
+ -DBUILD_tests_common=ON \
+ -DBUILD_tests_features=ON \
+ -DBUILD_tests_filters=OFF \
+ -DBUILD_tests_geometry=ON \
+ -DBUILD_tests_io=OFF \
+ -DBUILD_tests_kdtree=ON \
+ -DBUILD_tests_keypoints=ON \
+ -DBUILD_tests_octree=ON \
+ -DBUILD_tests_outofcore=OFF \
+ -DBUILD_tests_people=OFF \
+ -DBUILD_tests_recognition=OFF \
+ -DBUILD_tests_registration=OFF \
+ -DBUILD_tests_sample_consensus=ON \
+ -DBUILD_tests_search=ON \
+ -DBUILD_tests_segmentation=OFF \
+ -DBUILD_tests_surface=OFF \
+ -DBUILD_tests_visualization=OFF \
+ $PCL_DIR
+ # Build and run tests
+ make -j2 tests
+}
+
+function test_ext_1 ()
+{
+ # Configure
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_NO_PRECOMPILE=ON \
+ -DBUILD_tools=OFF \
+ -DBUILD_examples=OFF \
+ -DBUILD_apps=OFF \
+ -DBUILD_2d=ON \
+ -DBUILD_features=ON \
+ -DBUILD_filters=ON \
+ -DBUILD_geometry=ON \
+ -DBUILD_io=ON \
+ -DBUILD_kdtree=ON \
+ -DBUILD_keypoints=OFF \
+ -DBUILD_ml=OFF \
+ -DBUILD_octree=ON \
+ -DBUILD_outofcore=ON \
+ -DBUILD_people=OFF \
+ -DBUILD_recognition=OFF \
+ -DBUILD_registration=ON \
+ -DBUILD_sample_consensus=ON \
+ -DBUILD_search=ON \
+ -DBUILD_segmentation=OFF \
+ -DBUILD_simulation=OFF \
+ -DBUILD_stereo=OFF \
+ -DBUILD_surface=ON \
+ -DBUILD_tracking=OFF \
+ -DBUILD_visualization=ON \
+ -DBUILD_global_tests=ON \
+ -DBUILD_tests_2d=OFF \
+ -DBUILD_tests_common=OFF \
+ -DBUILD_tests_features=OFF \
+ -DBUILD_tests_filters=OFF \
+ -DBUILD_tests_geometry=OFF \
+ -DBUILD_tests_io=ON \
+ -DBUILD_tests_kdtree=OFF \
+ -DBUILD_tests_keypoints=OFF \
+ -DBUILD_tests_octree=OFF \
+ -DBUILD_tests_outofcore=ON \
+ -DBUILD_tests_people=OFF \
+ -DBUILD_tests_recognition=OFF \
+ -DBUILD_tests_registration=ON \
+ -DBUILD_tests_sample_consensus=OFF \
+ -DBUILD_tests_search=OFF \
+ -DBUILD_tests_segmentation=OFF \
+ -DBUILD_tests_surface=ON \
+ -DBUILD_tests_visualization=ON \
+ $PCL_DIR
+ # Build and run tests
+ make -j2 tests
+}
+
+function test_ext_2 ()
+{
+ # Configure
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
-DPCL_NO_PRECOMPILE=ON \
+ -DBUILD_tools=OFF \
+ -DBUILD_examples=OFF \
+ -DBUILD_apps=OFF \
+ -DBUILD_2d=ON \
+ -DBUILD_features=ON \
+ -DBUILD_filters=ON \
+ -DBUILD_geometry=ON \
+ -DBUILD_io=ON \
+ -DBUILD_kdtree=ON \
+ -DBUILD_keypoints=OFF \
+ -DBUILD_ml=ON \
+ -DBUILD_octree=ON \
+ -DBUILD_outofcore=OFF \
+ -DBUILD_people=ON \
+ -DBUILD_recognition=ON \
+ -DBUILD_registration=ON \
+ -DBUILD_sample_consensus=ON \
+ -DBUILD_search=ON \
+ -DBUILD_segmentation=ON \
+ -DBUILD_simulation=OFF \
+ -DBUILD_stereo=OFF \
+ -DBUILD_surface=OFF \
+ -DBUILD_tracking=OFF \
+ -DBUILD_visualization=ON \
+ -DBUILD_global_tests=ON \
+ -DBUILD_tests_2d=OFF \
+ -DBUILD_tests_common=OFF \
+ -DBUILD_tests_features=OFF \
+ -DBUILD_tests_filters=ON \
+ -DBUILD_tests_geometry=OFF \
+ -DBUILD_tests_io=OFF \
+ -DBUILD_tests_kdtree=OFF \
+ -DBUILD_tests_keypoints=OFF \
+ -DBUILD_tests_octree=OFF \
+ -DBUILD_tests_outofcore=OFF \
+ -DBUILD_tests_people=ON \
+ -DBUILD_tests_recognition=ON \
+ -DBUILD_tests_registration=OFF \
+ -DBUILD_tests_sample_consensus=OFF \
+ -DBUILD_tests_search=OFF \
+ -DBUILD_tests_segmentation=ON \
+ -DBUILD_tests_surface=OFF \
+ -DBUILD_tests_visualization=OFF \
$PCL_DIR
# Build and run tests
- make tests
+ make -j2 tests
}
function doc ()
{
# Do not generate documentation for pull requests
if [[ $TRAVIS_PULL_REQUEST != 'false' ]]; then exit; fi
- # Add installed doxygen to path and install sphinx
- export PATH=$DOXYGEN_DIR/bin:$PATH
- pip install --user sphinx sphinxcontrib-doxylink
+ # Install sphinx
+ pip install --user sphinx pyparsing==2.1.9 sphinxcontrib-doxylink
# Configure
mkdir $BUILD_DIR && cd $BUILD_DIR
cmake -DDOXYGEN_USE_SHORT_NAMES=OFF \
# Commit and push
cd $DOC_DIR
git add --all
- git commit --amend -m "Documentation for commit $TRAVIS_COMMIT" -q
+ git commit --amend --reset-author -m "Documentation for commit $TRAVIS_COMMIT" -q
git push --force
else
exit 2
fi
}
-function install_flann()
-{
- local pkg_ver=1.8.4
- local pkg_file="flann-${pkg_ver}-src"
- local pkg_url="http://people.cs.ubc.ca/~mariusm/uploads/FLANN/${pkg_file}.zip"
- local pkg_md5sum="a0ecd46be2ee11a68d2a7d9c6b4ce701"
- local FLANN_DIR=$HOME/flann
- local config=$FLANN_DIR/include/flann/config.h
- echo "Installing FLANN ${pkg_ver}"
- if [[ -d $FLANN_DIR ]]; then
- if [[ -e ${config} ]]; then
- local version=`grep -Po "(?<=FLANN_VERSION_ \").*(?=\")" ${config}`
- if [[ "$version" = "$pkg_ver" ]]; then
- local modified=`stat -c %y ${config} | cut -d ' ' -f1`
- echo " > Found cached installation of FLANN"
- echo " > Version ${pkg_ver}, built on ${modified}"
- return 0
- fi
- fi
- fi
- download ${pkg_url} ${pkg_md5sum}
- if [[ $? -ne 0 ]]; then
- return $?
- fi
- unzip -qq pkg
- cd ${pkg_file}
- mkdir build && cd build
- cmake .. \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_INSTALL_PREFIX=$FLANN_DIR \
- -DBUILD_MATLAB_BINDINGS=OFF \
- -DBUILD_PYTHON_BINDINGS=OFF \
- -DBUILD_CUDA_LIB=OFF \
- -DBUILD_C_BINDINGS=OFF \
- -DUSE_OPENMP=OFF
- make -j2 && make install && touch ${config}
- return $?
-}
-
-function install_vtk()
-{
- local pkg_ver=5.10.1
- local pkg_file="vtk-${pkg_ver}"
- local pkg_url="http://www.vtk.org/files/release/${pkg_ver:0:4}/${pkg_file}.tar.gz"
- local pkg_md5sum="264b0052e65bd6571a84727113508789"
- local VTK_DIR=$HOME/vtk
- local config=$VTK_DIR/include/vtk-${pkg_ver:0:4}/vtkConfigure.h
- echo "Installing VTK ${pkg_ver}"
- if [[ -d $VTK_DIR ]]; then
- if [[ -e ${config} ]]; then
- local version=`grep -Po "(?<=VTK_VERSION \").*(?=\")" ${config}`
- if [[ "$version" = "$pkg_ver" ]]; then
- local modified=`stat -c %y ${config} | cut -d ' ' -f1`
- echo " > Found cached installation of VTK"
- echo " > Version ${pkg_ver}, built on ${modified}"
- return 0
- fi
- fi
- fi
- download ${pkg_url} ${pkg_md5sum}
- if [[ $? -ne 0 ]]; then
- return $?
- fi
- tar xzf pkg
- cd "VTK${pkg_ver}"
- mkdir build && cd build
- cmake .. \
- -Wno-dev \
- -DCMAKE_BUILD_TYPE=Release \
- -DBUILD_SHARED_LIBS=ON \
- -DCMAKE_INSTALL_PREFIX=$VTK_DIR \
- -DBUILD_DOCUMENTATION=OFF \
- -DBUILD_EXAMPLES=OFF \
- -DBUILD_TESTING=OFF \
- -DVTK_USE_BOOST=ON \
- -DVTK_USE_CHARTS=ON \
- -DVTK_USE_VIEWS=ON \
- -DVTK_USE_RENDERING=ON \
- -DVTK_USE_CHEMISTRY=OFF \
- -DVTK_USE_HYBRID=OFF \
- -DVTK_USE_PARALLEL=OFF \
- -DVTK_USE_PATENTED=OFF \
- -DVTK_USE_INFOVIS=ON \
- -DVTK_USE_GL2PS=OFF \
- -DVTK_USE_MYSQL=OFF \
- -DVTK_USE_FFMPEG_ENCODER=OFF \
- -DVTK_USE_TEXT_ANALYSIS=OFF \
- -DVTK_WRAP_JAVA=OFF \
- -DVTK_WRAP_PYTHON=OFF \
- -DVTK_WRAP_TCL=OFF \
- -DVTK_USE_QT=OFF \
- -DVTK_USE_GUISUPPORT=OFF \
- -DVTK_USE_SYSTEM_ZLIB=ON \
- -DCMAKE_CXX_FLAGS="-D__STDC_CONSTANT_MACROS"
- make -j2 && make install && touch ${config}
- return $?
-}
-
-function install_qhull()
-{
- local pkg_ver=2012.1
- local pkg_file="qhull-${pkg_ver}"
- local pkg_url="http://www.qhull.org/download/${pkg_file}-src.tgz"
- local pkg_md5sum="d0f978c0d8dfb2e919caefa56ea2953c"
- local QHULL_DIR=$HOME/qhull
- local announce=$QHULL_DIR/share/doc/qhull/Announce.txt
- echo "Installing QHull ${pkg_ver}"
- if [[ -d $QHULL_DIR ]]; then
- if [[ -e ${announce} ]]; then
- local version=`grep -Po "(?<=Qhull )[0-9.]*(?= )" ${announce}`
- if [[ "$version" = "$pkg_ver" ]]; then
- local modified=`stat -c %y ${announce} | cut -d ' ' -f1`
- echo " > Found cached installation of QHull"
- echo " > Version ${pkg_ver}, built on ${modified}"
- return 0
- fi
- fi
- fi
- download ${pkg_url} ${pkg_md5sum}
- if [[ $? -ne 0 ]]; then
- return $?
- fi
- tar xzf pkg
- cd ${pkg_file}
- mkdir -p build && cd build
- cmake .. \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_CXX_FLAGS=-fPIC \
- -DCMAKE_C_FLAGS=-fPIC \
- -DCMAKE_INSTALL_PREFIX=$QHULL_DIR
- make -j2 && make install && touch ${announce}
- return $?
-}
-
-function install_doxygen()
-{
- local pkg_ver=1.8.9.1
- local pkg_file="doxygen-${pkg_ver}"
- local pkg_url="http://ftp.stack.nl/pub/users/dimitri/${pkg_file}.src.tar.gz"
- local pkg_md5sum="3d1a5c26bef358c10a3894f356a69fbc"
- local DOXYGEN_EXE=$DOXYGEN_DIR/bin/doxygen
- echo "Installing Doxygen ${pkg_ver}"
- if [[ -d $DOXYGEN_DIR ]]; then
- if [[ -e $DOXYGEN_EXE ]]; then
- local version=`$DOXYGEN_EXE --version`
- if [[ "$version" = "$pkg_ver" ]]; then
- local modified=`stat -c %y $DOXYGEN_EXE | cut -d ' ' -f1`
- echo " > Found cached installation of Doxygen"
- echo " > Version ${pkg_ver}, built on ${modified}"
- return 0
- fi
- fi
- fi
- download ${pkg_url} ${pkg_md5sum}
- if [[ $? -ne 0 ]]; then
- return $?
- fi
- tar xzf pkg
- cd ${pkg_file}
- ./configure --prefix $DOXYGEN_DIR
- make -j2 && make install
- return $?
-}
-
-function install_dependencies()
-{
- install_flann
- install_vtk
- install_qhull
- install_doxygen
-}
-
-function download()
-{
- mkdir -p $DOWNLOAD_DIR && cd $DOWNLOAD_DIR && rm -rf *
- wget --output-document=pkg $1
- if [[ $? -ne 0 ]]; then
- return $?
- fi
- if [[ $# -ge 2 ]]; then
- echo "$2 pkg" > "md5"
- md5sum -c "md5" --quiet --status
- if [[ $? -ne 0 ]]; then
- echo "MD5 mismatch"
- return 1
- fi
- fi
- return 0
-}
-
case $1 in
- install ) install_dependencies;;
+ before-install ) before_install;;
build ) build;;
- test ) test;;
+ build-examples ) build_examples;;
+ build-tools ) build_tools;;
+ build-apps ) build_apps;;
+ test-core ) test_core;;
+ test-ext-1 ) test_ext_1;;
+ test-ext-2 ) test_ext_2;;
doc ) doc;;
esac
-sudo: false
+sudo: required
language: cpp
-compiler:
- - gcc
- - clang
+cache:
+ ccache: true
+addons:
+ apt:
+ sources:
+ - kalakris-cmake
+ - boost-latest
+ - kubuntu-backports
+ - sourceline: 'ppa:kedazo/doxygen-updates-precise'
+ - sourceline: 'ppa:v-launchpad-jochen-sprickerhof-de/pcl'
+ packages:
+ - cmake
+ - libboost1.55-all-dev
+ - libeigen3-dev
+ - libgtest-dev
+ - doxygen-latex
+ - dvipng
+ - libusb-1.0-0-dev
+ - libqhull-dev
+ - libvtk5-dev
+ - libflann-dev
+ - doxygen
+ - libqt4-dev
+ - libqt4-opengl-dev
+ - libvtk5-qt4-dev
+ - libglew-dev
+ - libopenni-dev
+before_install:
+ - bash .travis.sh before-install
+
env:
- matrix:
- - TASK="build"
global:
- secure: XQw5SBf/7b1SHFR+kKklBWhWVgNvm4vIi+wwyajFSbDLOPpsAqtnDKeA2DV9ciaQJ3CVAvBoyxYgzAvpbsb5k95jadbvu9aSlo/AQnAbz+8DhkJL25DwJAn8G4s4zD1MFi7P4fxJHZsv/l9UcdW4BzjEhh0VidWCO4hP6I9BAQc=
- secure: dRKTSeQI2Jad+/K9XCkNZxuu8exPi2wGzf6D0ogd1Nb2ZIUsOtnHSME4DO+xv7F5ZYrythHTrfezQl5hhcK+cr7A12okxlvmF/gVFuGCBPkUbyWPOrxx/Ic5pqdVnmrMFG1hFmr1KmOxCVx0F48JfGNd4ZgtUBAmnIomRp8sXRI=
- secure: WTZ238yAEfXRyll1n8yau3FUW9HTvq6scKIl9AmNZrnzTr9dktupWrBVV6CtvaufT1mSmDigZ7VGC6T71HkyRIyb2qfVTrnjnxE96Wtcci6PfkuQc2L2puuZYo8dXaBRoOgJKGHFo/uKVKWnp7t55dp3lBJJmclHhon+K2hMSJw=
- secure: LNsNoBvqY/jYDoBjWCE5cM+f1H8xOwSBc/tbWZo6E/jPRjUOLzXSicMMUMrlVto+bFzSUT8OVajV3XmoRx+Qntzv6bDSAGjdycvHd2YZQPn8BYrsFtR4So7SsJkF9FlxzbiOXaiSRpwGn7TP/DO7Neubrr4IS2ef4nWowGrnCE8=
- secure: PZivWbaCWFA2BFFY8n3UMxdEWjz7rBh568u9LF5LH3HgWADnfiwWzNriACqX9fhe7tSmDru5Bk978s+xPPAY9v24cfiDEX5a5MQ/XVr2rP48n3vlUDWERDhIodJ73F9F9GGZXToGdNz0MBUAHgiv7Lb0GYUfmOYzUJjWghngLBw=
-matrix:
+
+jobs:
include:
+ - stage: Core Build
+ env: TASK="build"
+ compiler: gcc
+ script: bash .travis.sh $TASK
+ - env: TASK="build"
+ compiler: clang
+ script: bash .travis.sh $TASK
+ - stage: Extended Build and Tests
+ compiler: clang
+ env: TASK="build-examples"
+ script: bash .travis.sh $TASK
+ - compiler: clang
+ env: TASK="build-tools"
+ script: bash .travis.sh $TASK
+ # - compiler: clang
+ # env: TASK="build-apps"
+ # script: bash .travis.sh $TASK
- compiler: gcc
- env: TASK="test"
- - env: TASK="doc"
-addons:
- apt:
- sources:
- - kalakris-cmake
- - boost-latest
- - kubuntu-backports
- packages:
- - cmake
- - libboost1.55-all-dev
- - libeigen3-dev
- - libgtest-dev
- - doxygen-latex
- - dvipng
- - libusb-1.0-0-dev
-cache:
- directories:
- - $HOME/flann
- - $HOME/vtk
- - $HOME/qhull
- - $HOME/doxygen
-before_install:
- - bash .travis.sh install
-script:
- - bash .travis.sh $TASK
+ env: TASK="doc"
+ script: bash .travis.sh $TASK
+ - compiler: gcc
+ env: TASK="test-core"
+ script: bash .travis.sh $TASK
+ - compiler: gcc
+ env: TASK="test-ext-1"
+ script: bash .travis.sh $TASK
+ - compiler: gcc
+ env: TASK="test-ext-2"
+ script: bash .travis.sh $TASK
"include/pcl/${SUBSYS_NAME}/impl/morphology.hpp"
)
- set(LIB_NAME "pcl_${SUBSYS_NAME}")
-
if(${VTK_FOUND})
set(VTK_USE_FILE "${VTK_USE_FILE}" CACHE INTERNAL "VTK_USE_FILE")
include("${VTK_USE_FILE}")
endif(${VTK_FOUND})
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" ${VTK_INCLUDE_DIRECTORIES})
- PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${incs} ${impl_incs})
- link_directories(${VTK_LINK_DIRECTORIES})
- target_link_libraries("${LIB_NAME}" ${VTK_LIBRARIES} pcl_io)
- PCL_MAKE_PKGCONFIG("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}" "${SUBSYS_DEPS}" "" "" "" "")
#Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
output[i].magnitude_x = (*magnitude_x)[i].intensity;
output[i].magnitude_y = (*magnitude_y)[i].intensity;
output[i].magnitude =
- sqrtf ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
- (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
+ std::sqrt ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
+ (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
output[i].direction =
atan2f ((*magnitude_y)[i].intensity, (*magnitude_x)[i].intensity);
}
output[i].magnitude_x = (*magnitude_x)[i].intensity;
output[i].magnitude_y = (*magnitude_y)[i].intensity;
output[i].magnitude =
- sqrtf ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
- (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
+ std::sqrt ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
+ (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
output[i].direction =
atan2f ((*magnitude_y)[i].intensity, (*magnitude_x)[i].intensity);
}
output[i].magnitude_x = (*magnitude_x)[i].intensity;
output[i].magnitude_y = (*magnitude_y)[i].intensity;
output[i].magnitude =
- sqrtf ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
- (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
+ std::sqrt ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
+ (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
output[i].direction =
atan2f ((*magnitude_y)[i].intensity, (*magnitude_x)[i].intensity);
}
output[i].magnitude_x = (*magnitude_x)[i].intensity;
output[i].magnitude_y = (*magnitude_y)[i].intensity;
output[i].magnitude =
- sqrtf ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
- (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
+ std::sqrt ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity +
+ (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
output[i].direction =
atan2f ((*magnitude_y)[i].intensity, (*magnitude_x)[i].intensity);
}
# ChangeList
+## *= 1.8.1 (08.08.2017) =*
+
+* Replaced `make_shared` invocations on aligned allocated vars
+ [[#1405]](https://github.com/PointCloudLibrary/pcl/pull/1405)
+* Created an issue template for bug reporting
+ [[#1637]](https://github.com/PointCloudLibrary/pcl/pull/1637)
+* PCL logo image is now locally available
+ [[#1677]](https://github.com/PointCloudLibrary/pcl/pull/1677)
+* Updated the Windows all in one installer for MSVC15
+ [[#1762]](https://github.com/PointCloudLibrary/pcl/pull/1762)
+* Added compile support to VTK 7.1
+ [[#1770]](https://github.com/PointCloudLibrary/pcl/pull/1770)
+* Fixed badges markup in README.md
+ [[#1873]](https://github.com/PointCloudLibrary/pcl/pull/1873)
+* Replaced C-style `sqrtf` with `std::sqrt`
+ [[#1901]](https://github.com/PointCloudLibrary/pcl/pull/1901)
+
+### `CMake:`
+
+* Tweaks to PCL_DEFINITIONS behavior (to be **deprecated** in future
+ versions)
+ [[#1478]](https://github.com/PointCloudLibrary/pcl/pull/1478)
+* VTK directory can now be manually specified during configuration
+ [[#1605]](https://github.com/PointCloudLibrary/pcl/pull/1605)
+* Updated the find Boost cmake macro to support the latest versions plus
+ exported definitions now give priority to finding the same Boost version
+ PCL was compiled with.
+ [[#1630]](https://github.com/PointCloudLibrary/pcl/pull/1630)
+* Corrected PCL_ROOT in PCLConfig.cmake
+ [[#1678]](https://github.com/PointCloudLibrary/pcl/pull/1678)
+* Removed automatic override of VTK_LIBRARIES
+ [[#1760]](https://github.com/PointCloudLibrary/pcl/pull/1760)
+* Updated find boost versions
+ [[#1788]](https://github.com/PointCloudLibrary/pcl/pull/1788)
+ [[#1855]](https://github.com/PointCloudLibrary/pcl/pull/1855)
+ [[#1856]](https://github.com/PointCloudLibrary/pcl/pull/1856)
+* Updated CUDA compute capabilities
+ [[#1789]](https://github.com/PointCloudLibrary/pcl/pull/1789)
+* Extend linking of `delayimp.lib` to all MSVC version
+ [[#1823]](https://github.com/PointCloudLibrary/pcl/pull/1823)
+* Removal of `MSVCxx` variables
+ [[#1830]](https://github.com/PointCloudLibrary/pcl/pull/1830)
+* Fixed path link to Documents of Windows Start-Menu
+ [[#1857]](https://github.com/PointCloudLibrary/pcl/pull/1857)
+* Fixed CPack for Documents
+ [[#1858]](https://github.com/PointCloudLibrary/pcl/pull/1858)
+* Fixed bug present when Ensenso SDK path included spaces
+ [[#1875]](https://github.com/PointCloudLibrary/pcl/pull/1875)
+* `-D_FORCE_INLINES` definition added for CUDA targets to prevent
+ issues between old versions of the CUDA Toolkit and new versions
+ of gcc
+ [[#1900]](https://github.com/PointCloudLibrary/pcl/pull/1900)
+* Implemented new versioning scheme for PCL, employing the suffix
+ Â `-dev` in between releases.
+ Â [[#1905]](https://github.com/PointCloudLibrary/pcl/pull/1905)
+* Corrected search paths for Eigen on Windows
+ [[#1912]](https://github.com/PointCloudLibrary/pcl/pull/1912)
+* SSE definitions are now exported and cleanup of Eigen's
+ definitions
+ [[#1917]](https://github.com/PointCloudLibrary/pcl/pull/1917)
+* Added support to dynamic linking against FLANN on Windows
+ [[#1919]](https://github.com/PointCloudLibrary/pcl/pull/1919)
+* Add new search path for GTest to the finder script
+ [[#1920]](https://github.com/PointCloudLibrary/pcl/pull/1920)
+* Fix discovery of PCL deployed out of install path
+ [[#1923]](https://github.com/PointCloudLibrary/pcl/pull/1923)
+
+
+### `libpcl_2d:`
+
+* Removed the non-free lena-grayscale-png image :(
+ [[#1676]](https://github.com/PointCloudLibrary/pcl/pull/1676)
+* 2d library is no longer generated since it contained no symbols
+ [[#1679]](https://github.com/PointCloudLibrary/pcl/pull/1679)
+
+### `libpcl_common:`
+
+* Changed default alpha value to 255 on all RGB(A) point types
+ [[#1385]](https://github.com/PointCloudLibrary/pcl/pull/1385)
+* Fixed an issue preventing aligned memory allocation on 32-bit Windows
+ systems
+ [[#1665]](https://github.com/PointCloudLibrary/pcl/pull/1665)
+* Fixed compile error on test_common on MSVC
+ [[#1689]](https://github.com/PointCloudLibrary/pcl/pull/1689)
+* Fixed parallel plane test condition on `pcl::planeWithPlaneIntersection`
+ [[#1698]](https://github.com/PointCloudLibrary/pcl/pull/1698)
+* Fixed endless loop condition in `compute3DCentroid`
+ [[#1704]](https://github.com/PointCloudLibrary/pcl/pull/1704)
+* `toPCLPointCloud2` is not resilient to an empty pointcloud input
+ [[#1723]](https://github.com/PointCloudLibrary/pcl/pull/1723)
+* Normal accumulator `normalized()` is now resilient to a 0 filled vector
+ [[#1728]](https://github.com/PointCloudLibrary/pcl/pull/1728)
+* Defined additional types in `PointCloud` to ensure STL container
+ compatibility
+ [[#1741]](https://github.com/PointCloudLibrary/pcl/pull/1741)
+* Aligned malloc now works on Android as well
+ [[#1774]](https://github.com/PointCloudLibrary/pcl/pull/1774)
+* Added missing include to boost shared_ptr in vertices
+ [[#1790]](https://github.com/PointCloudLibrary/pcl/pull/1790)
+* Prevent incorrect copy of adjacent point in `fromPCLPointCloud2()`
+ [[#1813]](https://github.com/PointCloudLibrary/pcl/pull/1813)
+* Restored `Eigen::umeyama` for Eigen 3.3+
+ [[#1820]](https://github.com/PointCloudLibrary/pcl/pull/1820)
+ [[#1887]](https://github.com/PointCloudLibrary/pcl/pull/1887)
+* Fixed type in deprecation messages
+ [[#1878]](https://github.com/PointCloudLibrary/pcl/pull/1878)
+* Improved support for mingw aligned allocation
+ [[#1904]](https://github.com/PointCloudLibrary/pcl/pull/1904)
+* Added test for macro `_USE_MATH_DEFINES` to avoid warnings
+ [[#1956]](https://github.com/PointCloudLibrary/pcl/pull/1956)
+
+### `libpcl_cuda:`
+
+* Fixed macro definitions for the Windows platform
+ [[#1568]](https://github.com/PointCloudLibrary/pcl/pull/1568)
+
+### `libpcl_features:`
+
+* NormalEstimation[OMP] and FPFHEstimation[OMP] are now instantiated for
+ the same types as the non OMP variants.
+ [[#1642]](https://github.com/PointCloudLibrary/pcl/pull/1642)
+* Prevention of the addition of duplicate keys in `PFHEstimation`
+ [[#1701]](https://github.com/PointCloudLibrary/pcl/pull/1701)
+* Bug fixes in OUR-CVFH
+ [[#1827]](https://github.com/PointCloudLibrary/pcl/pull/1827)
+* Fixed incorrect initialization of SHOT
+ [[#1859]](https://github.com/PointCloudLibrary/pcl/pull/1859)
+ [[#1876]](https://github.com/PointCloudLibrary/pcl/pull/1876)
+
+### `libpcl_filters:`
+
+* ExtractIndices filter now aborts prematurely and prints error verbose
+ in case it detects an index which exceeds the size on the input data
+ [[#1670]](https://github.com/PointCloudLibrary/pcl/pull/1670)
+* Potential reduction of computational time of `ModelOutlierRemoval`
+ [[#1735]](https://github.com/PointCloudLibrary/pcl/pull/1735)
+* Improved code readability in CropBox
+ [[#1817]](https://github.com/PointCloudLibrary/pcl/pull/1817)
+
+### `libpcl_gpu:`
+
+* Added support to NVidia Pascal GPUs
+ [[#1824]](https://github.com/PointCloudLibrary/pcl/pull/1824)
+* Fixed compilation error in KinfuLS
+ [[#1872]](https://github.com/PointCloudLibrary/pcl/pull/1872)
+* Fixed CUDA architecture check
+ [[#1872]](https://github.com/PointCloudLibrary/pcl/pull/1872)
+
+### `libpcl_io:`
+
+* RGB values are now always saved as uint32 on PCD files
+ [[#1385]](https://github.com/PointCloudLibrary/pcl/pull/1385)
+* Fixed find RealSense macro and compilation error with RealSenseGrabber
+ on Windows
+ [[#1560]](https://github.com/PointCloudLibrary/pcl/pull/1560)
+* Unified verbose on OctreePointCloudCompression
+ [[#1569]](https://github.com/PointCloudLibrary/pcl/pull/1569)
+* Improved performance on saving PLY, OBJ and VTK files
+ [[#1580]](https://github.com/PointCloudLibrary/pcl/pull/1580)
+* Added support to the transparency property `Tr` on pcl::MTLReader
+ and fixed issue with parsing of the material's properties.
+ [[#1599]](https://github.com/PointCloudLibrary/pcl/pull/1599)
+* Fixed function signature mismatch in auto_io
+ [[#1625]](https://github.com/PointCloudLibrary/pcl/pull/1625)
+* Fix `ASCIIReader::setInputFields` interface
+ [[#1690]](https://github.com/PointCloudLibrary/pcl/pull/1690)
+* Adopted pcl_isnan in test_buffers to prevent compilation problems on
+ MSVC12
+ [[#1694]](https://github.com/PointCloudLibrary/pcl/pull/1694)
+* Fixed incorrect laser number test condition in VLP Grabber
+ [[#1697]](https://github.com/PointCloudLibrary/pcl/pull/1697)
+* Fixed bug verbose output of compression statistics
+ [[#1749]](https://github.com/PointCloudLibrary/pcl/pull/1749)
+* Fixed a bug in the parsing of PLY headers
+ [[#1750]](https://github.com/PointCloudLibrary/pcl/pull/1750)
+* Replacement of `boost::math::isnan` by `pcl_isnan`
+ [[#1766]](https://github.com/PointCloudLibrary/pcl/pull/1766)
+* Binary files written by `PCDWriter` now have the same permissions
+ as the ASCII ones
+ [[#1779]](https://github.com/PointCloudLibrary/pcl/pull/1779)
+* Fixed ODR violation when compiling with both OpenNI and OpenNI2
+ [[#1818]](https://github.com/PointCloudLibrary/pcl/pull/1818)
+* PLYReader now also accepts the property `vertex_index`
+ [[#1847]](https://github.com/PointCloudLibrary/pcl/pull/1847)
+* Fixed bug in return value of `pcl_converter`
+ Â [[#1903]](https://github.com/PointCloudLibrary/pcl/pull/1903)
+
+
+### `libpcl_keypoints:`
+
+* Fixed memory leak in `ISSKeypoint3D`
+ [[#1815]](https://github.com/PointCloudLibrary/pcl/pull/1815)
+
+### `libpcl_octree:`
+
+* Fixed unexpected octree boundaries' reduction
+ [[#1532]](https://github.com/PointCloudLibrary/pcl/pull/1532)
+ [[#1906]](https://github.com/PointCloudLibrary/pcl/pull/1906)
+* Fixed octree precompilation mechanism
+ [[#1639]](https://github.com/PointCloudLibrary/pcl/pull/1639)
+ [[#1916]](https://github.com/PointCloudLibrary/pcl/pull/1916)
+* Fixed invalid cast in `OctreePointCloudVoxelCentroid`
+ [[#1700]](https://github.com/PointCloudLibrary/pcl/pull/1700)
+
+### `libpcl_recognition:`
+
+* LineMOD bug fixes
+ [[#1835]](https://github.com/PointCloudLibrary/pcl/pull/1835)
+* Removed redundant definition of point types
+ [[#1836]](https://github.com/PointCloudLibrary/pcl/pull/1836)
+
+### `libpcl_registration:`
+
+* Fixed GICP behavior when a guess is provided
+ [[#989]](https://github.com/PointCloudLibrary/pcl/pull/989)
+* Fixed compilation issues in NDT 2D with Eigen 3.3
+ [[#1821]](https://github.com/PointCloudLibrary/pcl/pull/1821)
+* NDT 2D state is now properly initialized
+ [[#1731]](https://github.com/PointCloudLibrary/pcl/pull/1731)
+
+### `libpcl_sample_consensus:`
+
+* Improved error verbose in
+ `SampleConsensusModelPlane::optimizeModelCoefficient`
+ [[#1811]](https://github.com/PointCloudLibrary/pcl/pull/1811)
+
+### `libpcl_segmentation:`
+
+* Fixed bug in organized multiplane segmentation refine function where label
+ indices were not being updated correctly
+ [[#1502]](https://github.com/PointCloudLibrary/pcl/pull/1502)
+* Corrected function signature in lccp segmentation
+ [[#1761]](https://github.com/PointCloudLibrary/pcl/pull/1761)
+* Fixed bug in boundary checking in Organized Connected Component
+ Segmentation
+ [[#1800]](https://github.com/PointCloudLibrary/pcl/pull/1800)
+* Clarified documentation in Super Voxel Clustering
+ [[#1804]](https://github.com/PointCloudLibrary/pcl/pull/1804)
+* Fixed bug causing unnecessary computation in Region Growing
+ [[#1882]](https://github.com/PointCloudLibrary/pcl/pull/1882)
+
+### `libpcl_surface:`
+
+* Double pass mean and covariance estimation are now employed in
+ `ConcaveHull::reconstruct`
+ [[#1567]](https://github.com/PointCloudLibrary/pcl/pull/1567)
+* GP3 bug fixes
+ [[#1850]](https://github.com/PointCloudLibrary/pcl/pull/1850)
+ [[#1879]](https://github.com/PointCloudLibrary/pcl/pull/1879)
+* Fixed buggy index cast in bilateral upsampling
+ [[#1914]](https://github.com/PointCloudLibrary/pcl/pull/1914)
+
+
+### `libpcl_visualization:`
+
+* Fixed bug in addPointCloudNormals which was ignoring view point information
+ [[#1504]](https://github.com/PointCloudLibrary/pcl/pull/1504)
+* Fixed bug camera FOV computation in PCLVisualizerInteractorStyle
+ [[#1611]](https://github.com/PointCloudLibrary/pcl/pull/1611)
+* Fixed a MSVC compilation error with the colormap LUT
+ [[#1635]](https://github.com/PointCloudLibrary/pcl/pull/1635)
+* Abort prematurely when the camera file cannot be opened on
+ `PCLVisualizerInteractorStyle`
+ [[#1776]](https://github.com/PointCloudLibrary/pcl/pull/1776)
+* Fix to `addText3D`
+ [[#1805]](https://github.com/PointCloudLibrary/pcl/pull/1805)
+* Added some exception guards in OpenNI and OpenNI2 Viewer tools
+ [[#1862]](https://github.com/PointCloudLibrary/pcl/pull/1862)
+
+### `PCL Apps:`
+
+* Fixed bug in point cloud editor app which allowed to select points behind
+ the camera
+ [[#1539]](https://github.com/PointCloudLibrary/pcl/pull/1539)
+* Explicitly define OpenGL headers to fix build on Ubuntu arm64
+ [[#1715]](https://github.com/PointCloudLibrary/pcl/pull/1715)
+* Replaced the use of `slot` and `signals` keywords in QT apps for
+ their `Q_*` counterparts to present name clashes with Boost Signals
+ [[#1898]](https://github.com/PointCloudLibrary/pcl/pull/1898)
+
+### `PCL Docs:`
+
+* Fix docs generation on Windows
+ [[#1717]](https://github.com/PointCloudLibrary/pcl/pull/1717)
+
+### `PCL Tests:`
+
+* Modularized the build of unit tests.
+ [[#1768]](https://github.com/PointCloudLibrary/pcl/pull/1768)
+* Removed invalid test condition on test_common_io
+ [[#1884]](https://github.com/PointCloudLibrary/pcl/pull/1884)
+
+### `PCL Tools:`
+
+* `mesh2pcd` has now an option to explicitly disable visualization
+ [[#1768]](https://github.com/PointCloudLibrary/pcl/pull/1768)
+* `mesh_sampling` has now an option to explicitly disable visualization
+ [[#1769]](https://github.com/PointCloudLibrary/pcl/pull/1769)
+* Mesh sampling now has an option to include normal information
+ [[#1795]](https://github.com/PointCloudLibrary/pcl/pull/1795)
+* Fixed incorrect return value in pcl_converter
+ [[#1903]](https://github.com/PointCloudLibrary/pcl/pull/1903)
+
+### `PCL Tutorials:`
+
+* Fixed a crash in the pcl_visualizer tutorial triggered in interactive
+ mode
+ [[#1631]](https://github.com/PointCloudLibrary/pcl/pull/1631)
+* Fixed hyperlink in narf keypoint extraction
+ [[#1777]](https://github.com/PointCloudLibrary/pcl/pull/1777)
+* Typo corrections in random sample consensus
+ [[#1865]](https://github.com/PointCloudLibrary/pcl/pull/1865)
+* Updated matrix transform tutorial and added cube.ply mesh
+ [[#1894]](https://github.com/PointCloudLibrary/pcl/pull/1894)
+ [[#1897]](https://github.com/PointCloudLibrary/pcl/pull/1897)
+* Updated Ensenso tutorial for Ensenso X devices
+ [[#1933]](https://github.com/PointCloudLibrary/pcl/pull/1933)
+
+### `CI:`
+
+* Applied a workaround to a regression bug introduced by doxylink
+ in the docs build job
+ [[#1784]](https://github.com/PointCloudLibrary/pcl/pull/1784)
+* Build jobs refactoring
+ [[#1768]](https://github.com/PointCloudLibrary/pcl/pull/1768)
+* Enable ccache to speed up builds in CI
+ [[#1892]](https://github.com/PointCloudLibrary/pcl/pull/1892)
+
## *= 1.8.0 (14.06.2016) =*
* Added missing `Eigen::aligned_allocator` in vectors and maps that contain
endif()
include("${PCL_SOURCE_DIR}/cmake/pcl_utils.cmake")
-set(PCL_VERSION 1.8.0 CACHE STRING "PCL version")
+set(PCL_VERSION "1.8.1" CACHE STRING "PCL version")
DISSECT_VERSION()
GET_OS_INFO()
SET_INSTALL_DIRS()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
message (STATUS "Found OpenMP")
if(MSVC)
- if(MSVC90)
+ if(MSVC_VERSION EQUAL 1500)
set(OPENMP_DLL VCOMP90)
- elseif(MSVC10)
+ elseif(MSVC_VERSION EQUAL 1600)
set(OPENMP_DLL VCOMP100)
- elseif(MSVC11)
+ elseif(MSVC_VERSION EQUAL 1700)
set(OPENMP_DLL VCOMP110)
- elseif(MSVC12)
+ elseif(MSVC_VERSION EQUAL 1800)
set(OPENMP_DLL VCOMP120)
- elseif(MSVC14)
+ elseif(MSVC_VERSION EQUAL 1900)
set(OPENMP_DLL VCOMP140)
- endif(MSVC90)
+ elseif(MSVC_VERSION EQUAL 1910)
+ set(OPENMP_DLL VCOMP140)
+ endif()
if(OPENMP_DLL)
set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} /DELAYLOAD:${OPENMP_DLL}D.dll")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DELAYLOAD:${OPENMP_DLL}.dll")
# Eigen (required)
find_package(Eigen REQUIRED)
include_directories(SYSTEM ${EIGEN_INCLUDE_DIRS})
-add_definitions(-DEIGEN_USE_NEW_STDVECTOR
- -DEIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET)
+
# FLANN (required)
-if(NOT PCL_SHARED_LIBS OR (WIN32 AND NOT MINGW))
+if(NOT PCL_SHARED_LIBS OR ((WIN32 AND NOT MINGW) AND NOT PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32))
set(FLANN_USE_STATIC ON)
-endif(NOT PCL_SHARED_LIBS OR (WIN32 AND NOT MINGW))
+endif(NOT PCL_SHARED_LIBS OR ((WIN32 AND NOT MINGW) AND NOT PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32))
find_package(FLANN 1.7.0 REQUIRED)
include_directories(${FLANN_INCLUDE_DIRS})
### ---[ Finish up
PCL_WRITE_STATUS_REPORT()
PCL_RESET_MAPS()
-
set(Boost_USE_MULTITHREAD @Boost_USE_MULTITHREAD@)
endif(WIN32)
if(${CMAKE_VERSION} VERSION_LESS 2.8.5)
- SET(Boost_ADDITIONAL_VERSIONS "1.43" "1.43.0" "1.44" "1.44.0" "1.45" "1.45.0" "1.46.1" "1.46.0" "1.46" "1.47" "1.47.0")
+ set(Boost_ADDITIONAL_VERSIONS
+ "1.47.0" "1.47" "1.46.1"
+ "1.46.0" "1.46" "1.45.0" "1.45" "1.44.0" "1.44" "1.43.0" "1.43")
else(${CMAKE_VERSION} VERSION_LESS 2.8.5)
- SET(Boost_ADDITIONAL_VERSIONS "1.47" "1.47.0")
+ set(Boost_ADDITIONAL_VERSIONS
+ "@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@.@Boost_SUBMINOR_VERSION@" "@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@"
+ "1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
+ "1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55"
+ "1.54.0" "1.54" "1.53.0" "1.53" "1.52.0" "1.52" "1.51.0" "1.51"
+ "1.50.0" "1.50" "1.49.0" "1.49" "1.48.0" "1.48" "1.47.0" "1.47")
endif(${CMAKE_VERSION} VERSION_LESS 2.8.5)
# Disable the config mode of find_package(Boost)
set(Boost_NO_BOOST_CMAKE ON)
find_path(EIGEN_INCLUDE_DIRS Eigen/Core
HINTS ${PC_EIGEN_INCLUDEDIR} ${PC_EIGEN_INCLUDE_DIRS}
"${EIGEN_ROOT}" "$ENV{EIGEN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/Eigen 3.0.0" "$ENV{PROGRAMW6432}/Eigen 3.0.0"
+ PATHS "$ENV{PROGRAMFILES}/Eigen3" "$ENV{PROGRAMW6432}/Eigen3"
"$ENV{PROGRAMFILES}/Eigen" "$ENV{PROGRAMW6432}/Eigen"
PATH_SUFFIXES eigen3 include/eigen3 include)
find_package_handle_standard_args(eigen DEFAULT_MSG EIGEN_INCLUDE_DIRS)
- set(EIGEN_DEFINITIONS ${EIGEN_DEFINITIONS} -DEIGEN_USE_NEW_STDVECTOR
- -DEIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET)
+ set(EIGEN_DEFINITIONS ${EIGEN_DEFINITIONS})
endmacro(find_eigen)
#remove this as soon as qhull is shipped with FindQhull.cmake
endif()
if(NOT ENSENSO_ROOT AND ("@HAVE_ENSENSO@" STREQUAL "TRUE"))
- get_filename_component(ENSENSO_ABI_HINT @ENSENSO_INCLUDE_DIR@ PATH)
+ get_filename_component(ENSENSO_ABI_HINT "@ENSENSO_INCLUDE_DIR@" PATH)
endif()
find_path(ENSENSO_INCLUDE_DIR nxLib.h
set(_RSSDK_INCLUDE_DIRS ${RSSDK_DIR}/include)
set(RSSDK_RELEASE_NAME libpxc.lib)
set(RSSDK_DEBUG_NAME libpxc_d.lib)
+ set(RSSDK_SUFFIX Win32)
+ if(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ set(RSSDK_SUFFIX x64)
+ endif()
find_library(RSSDK_LIBRARY
NAMES ${RSSDK_RELEASE_NAME}
PATHS "${RSSDK_DIR}/lib/" NO_DEFAULT_PATH
- PATH_SUFFIXES x64 Win32)
+ PATH_SUFFIXES ${RSSDK_SUFFIX})
find_library(RSSDK_LIBRARY_DEBUG
NAMES ${RSSDK_DEBUG_NAME} ${RSSDK_RELEASE_NAME}
PATHS "${RSSDK_DIR}/lib/" NO_DEFAULT_PATH
- PATH_SUFFIXES x64 Win32)
+ PATH_SUFFIXES ${RSSDK_SUFFIX})
if(NOT RSSDK_LIBRARY_DEBUG)
set(RSSDK_LIBRARY_DEBUG ${RSSDK_LIBRARY})
endif()
macro(find_VTK)
if(PCL_ALL_IN_ONE_INSTALLER AND NOT ANDROID)
if(EXISTS "${PCL_ROOT}/3rdParty/VTK/lib/cmake")
- set(VTK_DIR "${PCL_ROOT}/3rdParty/VTK/lib/cmake/vtk-@VTK_MAJOR_VERSION@.@VTK_MINOR_VERSION@")
+ set(VTK_DIR "${PCL_ROOT}/3rdParty/VTK/lib/cmake/vtk-@VTK_MAJOR_VERSION@.@VTK_MINOR_VERSION@" CACHE PATH "The directory containing VTKConfig.cmake")
else()
- set(VTK_DIR "${PCL_ROOT}/3rdParty/VTK/lib/vtk-@VTK_MAJOR_VERSION@.@VTK_MINOR_VERSION@")
+ set(VTK_DIR "${PCL_ROOT}/3rdParty/VTK/lib/vtk-@VTK_MAJOR_VERSION@.@VTK_MINOR_VERSION@" CACHE PATH "The directory containing VTKConfig.cmake")
endif()
elseif(NOT VTK_DIR AND NOT ANDROID)
- set(VTK_DIR "@VTK_DIR@")
+ set(VTK_DIR "@VTK_DIR@" CACHE PATH "The directory containing VTKConfig.cmake")
endif(PCL_ALL_IN_ONE_INSTALLER AND NOT ANDROID)
if(NOT ANDROID)
find_package(VTK ${QUIET_})
- if (VTK_FOUND)
- set(VTK_LIBRARIES "@VTK_LIBRARIES@")
- endif(VTK_FOUND)
endif()
endmacro(find_VTK)
MESSAGE(STATUS "Found Glew: ${GLEW_LIBRARIES}")
ENDIF(NOT GLEW_FIND_QUIETLY)
IF(GLEW_GLEW_LIBRARY MATCHES glew32s)
- ADD_DEFINITIONS(-DGLEW_STATIC)
+ list(APPEND PCL_DEFINITIONS "-DGLEW_STATIC")
ENDIF(GLEW_GLEW_LIBRARY MATCHES glew32s)
ELSE(GLEW_FOUND)
IF(GLEW_FIND_REQUIRED)
endif(${LIB}_DEFINITIONS AND NOT ${LIB} STREQUAL "VTK")
else(${LIB}_FOUND)
if("${_is_optional}" STREQUAL "OPTIONAL")
- add_definitions("-DDISABLE_${LIB}")
+ list(APPEND PCL_${COMPONENT}_DEFINITIONS "-DDISABLE_${LIB}")
pcl_message("** WARNING ** ${_component} features related to ${_lib} will be disabled")
elseif("${_is_optional}" STREQUAL "REQUIRED")
if((NOT PCL_FIND_ALL) OR (PCL_FIND_ALL EQUAL 1))
get_filename_component(PCL_ROOT "${PCL_DIR}" PATH)
else(WIN32 AND NOT MINGW)
# PCLConfig.cmake is installed to PCL_ROOT/share/pcl-x.y
- get_filename_component(PCL_ROOT "${PCL_DIR}" PATH)
- get_filename_component(PCL_ROOT "${PCL_ROOT}" PATH)
+ get_filename_component(PCL_ROOT "${CMAKE_CURRENT_LIST_DIR}/../.." ABSOLUTE)
endif(WIN32 AND NOT MINGW)
# check whether PCLConfig.cmake is found into a PCL installation or in a build tree
set(PCL_DEBUG_SUFFIX "@CMAKE_DEBUG_POSTFIX@")
set(PCL_RELEASE_SUFFIX "@CMAKE_RELEASE_POSTFIX@")
+#set SSE flags used compiling PCL
+list(APPEND PCL_DEFINITIONS "@PCLCONFIG_SSE_DEFINITIONS@")
+
set(pcl_all_components @PCLCONFIG_AVAILABLE_COMPONENTS@ )
list(LENGTH pcl_all_components PCL_NB_COMPONENTS)
# Check whether the requested PACKAGE_FIND_VERSION is compatible
-set(PACKAGE_VERSION @PCL_VERSION@)
+set(PACKAGE_VERSION @PCL_VERSION_PLAIN@)
# Check whether the requested PACKAGE_FIND_VERSION is compatible
if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
set(PACKAGE_VERSION_EXACT TRUE)
endif()
-endif()
\ No newline at end of file
+endif()
Continuous integration
----------------------
-[ ![Release] [release-image] ] [releases]
-[ ![License] [license-image] ] [license]
+[![Release][release-image]][releases]
+[![License][license-image]][license]
[release-image]: https://img.shields.io/badge/release-1.8.0-green.svg?style=flat
[releases]: https://github.com/PointCloudLibrary/pcl/releases
float avg_dist_neighbours = 0.0;
for (size_t j = 1; j < nn_indices.size (); j++)
- avg_dist_neighbours += sqrtf (nn_distances[j]);
+ avg_dist_neighbours += std::sqrt (nn_distances[j]);
avg_dist_neighbours /= static_cast<float> (nn_indices.size ());
{
float r1 = static_cast<float> (uniform_deviate (rand ()));
float r2 = static_cast<float> (uniform_deviate (rand ()));
- float r1sqr = sqrtf (r1);
+ float r1sqr = std::sqrt (r1);
float OneMinR1Sqr = (1 - r1sqr);
float OneMinR2 = (1 - r2);
a1 *= OneMinR1Sqr;
explicit ComposerMainWindow (QWidget *parent = 0);
~ComposerMainWindow ();
- signals:
+ Q_SIGNALS:
/** \brief Signal emitted when the active project is switched - ie a different project tab is selected */
void
activeProjectChanged (ProjectModel* new_model, ProjectModel* previous_model);
void
saveSelectedCloudToFile ();
- public slots:
+ public Q_SLOTS:
//Slots for File Menu Actions
void
on_action_new_project__triggered (/*QString name = "unsaved project"*/);
void
setInteractorStyle (interactor_styles::INTERACTOR_STYLES style);
- public slots:
+ public Q_SLOTS:
void
refresh ();
void
dataChanged ( const QModelIndex & topLeft, const QModelIndex & bottomRight );
- protected slots:
+ protected Q_SLOTS:
/** \brief Slot called when an item in the model changes
* \param topLeft
* \param bottomRight
virtual ~CloudViewer();
ProjectModel* getModel () const;
- public slots:
+ public Q_SLOTS:
void
addModel (ProjectModel* new_model);
void
addNewProject (ProjectModel* new_model);
- signals:
+ Q_SIGNALS:
void
newModelSelected (ProjectModel *new_model);
}
pcl::ExtractIndices<PointT> filter;
- typename PointCloud<PointT>::Ptr merged_cloud = boost::make_shared<PointCloud<PointT> > ();
+ typename PointCloud<PointT>::Ptr merged_cloud = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
foreach (const CloudItem* input_cloud_item, selected_item_index_map_.keys ())
{
qDebug () << "Extracting "<<selected_item_index_map_.value(input_cloud_item)->indices.size() << " points out of "<<input_cloud->width;
filter.setInputCloud (input_cloud);
filter.setIndices (selected_item_index_map_.value (input_cloud_item));
- typename PointCloud<PointT>::Ptr original_minus_indices = boost::make_shared<PointCloud<PointT> > ();
+ typename PointCloud<PointT>::Ptr original_minus_indices = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
filter.setNegative (true);
filter.filter (*original_minus_indices);
filter.setNegative (false);
- typename PointCloud<PointT>::Ptr selected_points = boost::make_shared<PointCloud<PointT> > ();
+ typename PointCloud<PointT>::Ptr selected_points = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
filter.filter (*selected_points);
qDebug () << "Original minus indices is "<<original_minus_indices->width;
- //Eigen::Vector4f source_origin = input_cloud_item->data (ItemDataRole::ORIGIN).value<Eigen::Vector4f> ();
- //Eigen::Quaternionf source_orientation = input_cloud_item->data (ItemDataRole::ORIENTATION).value<Eigen::Quaternionf> ();
- //pcl::PCLPointCloud2::Ptr cloud_blob = boost::make_shared <pcl::PCLPointCloud2> ();;
- //toPCLPointCloud2 (*original_minus_indices, *cloud_blob);
- //CloudItem* new_cloud_item = new CloudItem (input_cloud_item->text ()
- //, cloud_blob
- // , source_origin
- // , source_orientation);
-
CloudItem* new_cloud_item = CloudItem::createCloudItemFromTemplate<PointT>(input_cloud_item->text (),original_minus_indices);
output.append (new_cloud_item);
-#endif //IMPL_MERGE_SELECTION_H_
\ No newline at end of file
+#endif //IMPL_MERGE_SELECTION_H_
else
pcl::visualization::PCLVisualizer::convertToEigenMatrix (transform_map_.value (input_item->getId ()), transform);
- typename PointCloud<PointT>::Ptr transformed_cloud = boost::make_shared<PointCloud<PointT> > ();
+ typename PointCloud<PointT>::Ptr transformed_cloud = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
transformPointCloud<PointT> (*input_cloud, *transformed_cloud, transform);
CloudItem* new_cloud_item = CloudItem::createCloudItemFromTemplate<PointT>(input_item->text (),transformed_cloud);
-#endif //IMPL_TRANSFORM_CLOUDS_HPP_
\ No newline at end of file
+#endif //IMPL_TRANSFORM_CLOUDS_HPP_
ItemInspector (QWidget* parent = 0);
virtual ~ItemInspector();
- public slots:
+ public Q_SLOTS:
void
setModel (ProjectModel* new_model);
void
/** \brief This is invoked to perform the manipulations specified on the model */
void
manipulateClouds (boost::shared_ptr<ManipulationEvent> manip_event);
- public slots:
+ public Q_SLOTS:
void
commandCompleted (CloudCommand* command);
/** \brief Selects all items in the model */
void
selectAllItems (QStandardItem* item = 0 );
- signals:
+ Q_SIGNALS:
void
enqueueNewAction (AbstractTool* tool, ConstItemList data);
void
copyProperties (const PropertiesModel* to_copy);
- public slots:
+ public Q_SLOTS:
void
propertyChanged (QStandardItem* property_item);
- signals:
+ Q_SIGNALS:
void
propertyChanged (const QStandardItem* property_item, const CloudComposerItem* parent_item_);
QObject
*currentObject () const { return object; }
- public slots:
+ public Q_SLOTS:
/**
Sets the current object the signals that are managed by the
SignalMultiplexer instance should be connected to. Any connections
void
setCurrentObject (QObject *newObject);
- signals:
+ Q_SIGNALS:
/**
Emitted when a new object is set to receive the signals managed by
this SignalMultiplexer instance.
-#endif // SIGNAL_MULTIPLEXER_H_
\ No newline at end of file
+#endif // SIGNAL_MULTIPLEXER_H_
void
enableAllTools ();
- public slots:
+ public Q_SLOTS:
void
activeProjectChanged (ProjectModel* new_model, ProjectModel* previous_model);
/** \brief This slot is called whenever the current project model emits layoutChanged, and calls updateEnabledTools */
void
modelChanged ();
- signals:
+ Q_SIGNALS:
void
enqueueToolAction (AbstractTool* tool);
{
if (euclidean_label_indices[i].indices.size () >= min_cluster_size)
{
- typename PointCloud<PointT>::Ptr cluster = boost::make_shared <PointCloud<PointT> >();
+ typename PointCloud<PointT>::Ptr cluster = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
pcl::copyPointCloud (*input_cloud,euclidean_label_indices[i].indices,*cluster);
qDebug () << "Found cluster with size " << cluster->width;
QString name = input_item->text () + tr ("- Clstr %1").arg (i);
{
if (label_indices[i].indices.size () >= min_plane_size)
{
- typename PointCloud<PointT>::Ptr plane = boost::make_shared <PointCloud<PointT> >();
+ typename PointCloud<PointT>::Ptr plane = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
pcl::copyPointCloud (*input_cloud,label_indices[i].indices,*plane);
qDebug () << "Found plane with size " << plane->width;
QString name = input_item->text () + tr ("- Plane %1").arg (i);
}
}
- typename PointCloud<PointT>::Ptr leftovers = boost::make_shared <PointCloud<PointT> >();
+ typename PointCloud<PointT>::Ptr leftovers = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
if (extracted_indices->size () == 0)
pcl::copyPointCloud (*input_cloud,*leftovers);
else
- #endif //IMPL_TRANSFORM_CLOUDS_HPP_
\ No newline at end of file
+ #endif //IMPL_TRANSFORM_CLOUDS_HPP_
public:
WorkQueue (QObject* parent = 0);
virtual ~WorkQueue();
- public slots:
+ public Q_SLOTS:
void
enqueueNewAction (AbstractTool* new_tool, ConstItemList input_data);
void
checkQueue ();
- signals:
+ Q_SIGNALS:
void
commandProgress (QString command_text, double progress);
{
case (PointTypeFlags::XYZ):
{
- pcl::PointCloud <PointXYZ>::Ptr cloud_ptr = boost::make_shared<pcl::PointCloud <PointXYZ> >();
+ pcl::PointCloud <PointXYZ>::Ptr cloud_ptr = boost::shared_ptr<pcl::PointCloud <PointXYZ> > (new pcl::PointCloud <PointXYZ> );
pcl::fromPCLPointCloud2 (*cloud_blob_ptr_, *cloud_ptr);
cloud_pointer_variant = QVariant::fromValue (cloud_ptr);
//Initialize the search kd-tree for this cloud
}
case (PointTypeFlags::XYZ | PointTypeFlags::RGB):
{
- pcl::PointCloud <PointXYZRGB>::Ptr cloud_ptr = boost::make_shared<pcl::PointCloud <PointXYZRGB> >();
+ pcl::PointCloud <PointXYZRGB>::Ptr cloud_ptr = boost::shared_ptr<pcl::PointCloud <PointXYZRGB> > (new pcl::PointCloud <PointXYZRGB>);
pcl::fromPCLPointCloud2 (*cloud_blob_ptr_, *cloud_ptr);
cloud_pointer_variant = QVariant::fromValue (cloud_ptr);
pcl::search::KdTree<PointXYZRGB>::Ptr kd_search = boost::make_shared<search::KdTree<PointXYZRGB> >();
}
case (PointTypeFlags::XYZ | PointTypeFlags::RGBA):
{
- pcl::PointCloud <PointXYZRGBA>::Ptr cloud_ptr = boost::make_shared<pcl::PointCloud <PointXYZRGBA> >();
+ pcl::PointCloud <PointXYZRGBA>::Ptr cloud_ptr = boost::shared_ptr<pcl::PointCloud <PointXYZRGBA> > (new pcl::PointCloud <PointXYZRGBA>);
pcl::fromPCLPointCloud2 (*cloud_blob_ptr_, *cloud_ptr);
cloud_pointer_variant = QVariant::fromValue (cloud_ptr);
pcl::search::KdTree<PointXYZRGBA>::Ptr kd_search = boost::make_shared<search::KdTree<PointXYZRGBA> >();
return;
}
qDebug () << "Images loaded, making cloud";
- PointCloud<PointXYZRGB>::Ptr cloud = boost::make_shared <PointCloud<PointXYZRGB> >();
+ PointCloud<PointXYZRGB>::Ptr cloud = boost::shared_ptr<PointCloud<PointXYZRGB> > (new PointCloud<PointXYZRGB>);
cloud->points.reserve (depth_dims[0] * depth_dims[1]);
cloud->width = depth_dims[0];
cloud->height = depth_dims[1];
inline Integration&
getIntegration () {return (*integration_);}
- signals:
+ Q_SIGNALS:
/** \brief Emitted when the running mode changes. */
void
runningModeChanged (RunningMode new_running_mode) const;
- public slots:
+ public Q_SLOTS:
/** \brief Start the grabber (enables the scanning pipeline). */
void
explicit MainWindow (QWidget* parent = 0);
~MainWindow ();
- public slots:
+ public Q_SLOTS:
void showHelp ();
void saveAs ();
/** \brief Destructor. */
~OfflineIntegration ();
- public slots:
+ public Q_SLOTS:
/** \brief Start the procedure from a path. */
void
start ();
- private slots:
+ private Q_SLOTS:
/** \brief Loads in new data. */
void
void
setScalingFactor (const double scale);
- public slots:
+ public Q_SLOTS:
/** \brief Requests the scene to be re-drawn (called periodically from a timer). */
void
Eigen::Matrix4f transform_;
- public slots:
+ public Q_SLOTS:
void
confirmSrcPointPressed();
void
void
safePressed();
- private slots:
+ private Q_SLOTS:
void
timeoutSlot();
{
result->first.push_back (classes_[it - sqr_distances->begin ()]);
// TODO leave it squared, and relate param to sigma...
- result->second.push_back (expf (-sqrtf (*it) / gaussian_param));
+ result->second.push_back (expf (-std::sqrt (*it) / gaussian_param));
}
// Return label/score list pair
Ui::MainWindow *ui_;
QTimer *vis_timer_;
- public slots:
+ public Q_SLOTS:
void
adjustPassThroughValues (int new_value)
{
PCL_INFO ("Changed passthrough maximum value to: %f\n", float (new_value) / 10.0f);
}
- private slots:
+ private Q_SLOTS:
void
timeoutSlot ();
- signals:
+ Q_SIGNALS:
void
valueChanged (int new_value);
};
pcl::EdgeAwarePlaneComparator<PointT, pcl::Normal>::Ptr edge_aware_comparator_;
pcl::EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label>::Ptr euclidean_cluster_comparator_;
- public slots:
+ public Q_SLOTS:
void toggleCapturePressed()
{
capture_ = !capture_;
}
- private slots:
+ private Q_SLOTS:
void
timeoutSlot();
/** \brief Fixes the speed in steps of 5ms, default 5, gives 5+1 * 5ms = 30ms = 33,3 Hz playback speed */
unsigned int speed_value_;
- public slots:
+ public Q_SLOTS:
void
playButtonPressed ()
{ play_mode_ = true; }
void
indexSliderValueChanged (int value);
- private slots:
+ private Q_SLOTS:
void
timeoutSlot ();
int
exec();
- public slots:
+ public Q_SLOTS:
void
process();
- signals:
+ Q_SIGNALS:
void
dataUpdated(CloudMeshItem* cloud_mesh_item);
CloudMeshItemUpdater (CloudMeshItem* cloud_mesh_item);
~CloudMeshItemUpdater ();
- public slots:
+ public Q_SLOTS:
void
updateCloudMeshItem();
RenderWindowItem*
createRenderWindow();
- public slots:
+ public Q_SLOTS:
// slots for file menu
void
slotOpenProject();
void
saveGlobalSettings();
- private slots:
+ private Q_SLOTS:
void
slotOpenRecentPointCloud();
void
std::map<std::string, Parameter*> name_parameter_map_;
ParameterModel* parameter_model_;
- protected slots:
+ protected Q_SLOTS:
void
reset();
};
void
addTopLevelItem(RenderWindowItem* render_window_item);
- public slots:
+ public Q_SLOTS:
// slots for file menu
void
slotOpenPointCloud();
void
slotCloseRenderWindow();
- signals:
+ Q_SIGNALS:
void
fileOpened(const QString& filename);
virtual bool
dropMimeData(QTreeWidgetItem * parent, int index, const QMimeData * data, Qt::DropAction action);
- private slots:
+ private Q_SLOTS:
void
slotUpdateOnSelectionChange(const QItemSelection& selected, const QItemSelection& deselected);
bool
runWorker(AbstractWorker* worker);
- signals:
+ Q_SIGNALS:
void
prepared();
- private slots:
+ private Q_SLOTS:
void
slotOnCloudMeshItemUpdate(CloudMeshItem* cloud_mesh_item);
};
std::vector<CloudFilter*> & filter_list);
virtual ~FilterWindow ();
- public slots:
+ public Q_SLOTS:
/** \brief Called if a different item in the filter list is selected. */
virtual void itemSelected (int id);
/** \brief Called when the 'finish' button is pressed. */
/** \brief Called when the 'next' button is pressed. */
virtual void next ();
- signals:
+ Q_SIGNALS:
/** \brief Ommitted when a filter is created. */
void filterCreated ();
return theSingleton;
}
- public slots:
+ public Q_SLOTS:
void selectedSensorChanged (int index);
void cloud_callback (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr cloud);
void refresh ();
// find connected devices
void findConnectedDevices ();
- private slots:
+ private Q_SLOTS:
void addFilter ();
void updateFilter (QListWidgetItem*);
void filterSelectionChanged ();
/** \brief Callback that is used to get cloud data from the grabber. */
void cloudCallback (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr & cloud);
- signals:
+ Q_SIGNALS:
/** \brief Omitted when a new cloud is received. */
void cloudReceived (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr cloud);
#include <QtGui/QColor>
#include <pcl/apps/point_cloud_editor/localTypes.h>
#include <pcl/apps/point_cloud_editor/statistics.h>
+#ifdef OPENGL_IS_A_FRAMEWORK
+# include <OpenGL/gl.h>
+# include <OpenGL/glu.h>
+#else
+# include <GL/gl.h>
+# include <GL/glu.h>
+#endif
/// @brief A wrapper which allows to use any implementation of cloud provided by
/// a third-party library.
void
loadFile(const std::string &filename);
- public slots:
+ public Q_SLOTS:
/// @brief Loads a new cloud.
void
load ();
void
showStat ();
- //signals:
-
protected:
/// initializes GL
void
return (ok_);
}
- private slots:
+ private Q_SLOTS:
/// @brief Accepts and stores the current user inputs, and turns off the
/// dialog box.
void
int
getSelectedSpinBoxValue ();
- private slots:
+ private Q_SLOTS:
void
about ();
/// @brief Destructor
~StatisticsDialog ();
- public slots:
+ public Q_SLOTS:
/// @brief update the dialog box.
void update ();
- private slots:
+ private Q_SLOTS:
void accept ();
private:
float max_x = std::max(final_x_, origin_x_)/(viewport[2]*0.5) - 1.0;
float max_y = (viewport[3] - std::min(origin_y_, final_y_))/(viewport[3]*0.5) - 1.0;
float min_y = (viewport[3] - std::max(origin_y_, final_y_))/(viewport[3]*0.5) - 1.0;
+ // Ignore the points behind the camera
+ if (w < 0)
+ return (false);
// Check the left and right sides
if ((x < min_x) || (x > max_x))
return (false);
#include <pcl/io/openni_grabber.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_pointcloud_changedetector.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/console/parse.h>
find_path(EIGEN_INCLUDE_DIR Eigen/Core
HINTS ${PC_EIGEN_INCLUDEDIR} ${PC_EIGEN_INCLUDE_DIRS} "${EIGEN_ROOT}" "$ENV{EIGEN_ROOT}"
PATHS "$ENV{PROGRAMFILES}/Eigen" "$ENV{PROGRAMW6432}/Eigen"
- "$ENV{PROGRAMFILES}/Eigen 3.0.0" "$ENV{PROGRAMW6432}/Eigen 3.0.0"
+ "$ENV{PROGRAMFILES}/Eigen3" "$ENV{PROGRAMW6432}/Eigen3"
PATH_SUFFIXES eigen3 include/eigen3 include)
if(EIGEN_INCLUDE_DIR)
HINTS "${GTEST_ROOT}" "$ENV{GTEST_ROOT}"
PATHS "$ENV{PROGRAMFILES}/gtest" "$ENV{PROGRAMW6432}/gtest"
PATHS "$ENV{PROGRAMFILES}/gtest-1.7.0" "$ENV{PROGRAMW6432}/gtest-1.7.0"
+ PATH /usr/src/googletest/googletest
PATH /usr/src/gtest
PATH_SUFFIXES gtest usr/src/gtest)
# Libraries
set(RSSDK_RELEASE_NAME libpxc.lib)
set(RSSDK_DEBUG_NAME libpxc_d.lib)
+ set(RSSDK_SUFFIX Win32)
+ if(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ set(RSSDK_SUFFIX x64)
+ endif()
find_library(RSSDK_LIBRARY
NAMES ${RSSDK_RELEASE_NAME}
PATHS "${RSSDK_DIR}/lib/" NO_DEFAULT_PATH
- PATH_SUFFIXES x64 Win32)
+ PATH_SUFFIXES ${RSSDK_SUFFIX})
find_library(RSSDK_LIBRARY_DEBUG
NAMES ${RSSDK_DEBUG_NAME} ${RSSDK_RELEASE_NAME}
PATHS "${RSSDK_DIR}/lib/" NO_DEFAULT_PATH
- PATH_SUFFIXES x64 Win32)
+ PATH_SUFFIXES ${RSSDK_SUFFIX})
if(NOT RSSDK_LIBRARY_DEBUG)
set(RSSDK_LIBRARY_DEBUG ${RSSDK_LIBRARY})
endif()
set(CPACK_NSIS_MODIFY_PATH ON)
set(CPACK_PACKAGE_EXECUTABLES @PCL_EXECUTABLES@)
set(CPACK_NSIS_MENU_LINKS
- "share/doc/pcl/tutorials/html/index.html" "Tutorials"
- "share/doc/pcl/tutorials/html/sources" "Tutorials sources"
- "share/doc/pcl/html/pcl-@PCL_MAJOR_VERSION@.@PCL_MINOR_VERSION@.chm" "Documentation"
+ "share/doc/pcl-@PCL_MAJOR_VERSION@.@PCL_MINOR_VERSION@/tutorials/html/index.html" "Tutorials"
+ "share/doc/pcl-@PCL_MAJOR_VERSION@.@PCL_MINOR_VERSION@/tutorials/html/sources" "Tutorials sources"
+ "share/doc/pcl-@PCL_MAJOR_VERSION@.@PCL_MINOR_VERSION@/html/pcl-@PCL_MAJOR_VERSION@.@PCL_MINOR_VERSION@.chm" "Documentation"
"http://www.pointclouds.org" "PCL Website")
#set(CPACK_NSIS_MENU_LINKS "share/doc/@PROJECT_NAME@/user_guide.pdf" "User's guide")
#set(CPACK_NSIS_MENU_LINKS "share/doc/@PROJECT_NAME@/developer_guide.pdf" "Developer's guide")
if(BUILD_all_in_one_installer)
set(CPACK_NSIS_PACKAGE_NAME "${PROJECT_NAME}-${PCL_VERSION}-AllInOne")
endif(BUILD_all_in_one_installer)
- if(MSVC10)
+ if(MSVC_VERSION EQUAL 1600)
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2010-${win_system_name}")
- elseif(MSVC11)
+ elseif(MSVC_VERSION EQUAL 1700)
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2012-${win_system_name}")
- elseif(MSVC12)
+ elseif(MSVC_VERSION EQUAL 1800)
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2013-${win_system_name}")
- elseif(MSVC14)
+ elseif(MSVC_VERSION EQUAL 1900)
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2015-${win_system_name}")
+ elseif(MSVC_VERSION EQUAL 1910)
+ set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2017-${win_system_name}")
else()
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-${win_system_name}")
endif()
PCL_CPACK_MAKE_COMPS_OPTS(PCL_CPACK_COMPONENTS "${_comps}")
# add documentation
- if(BUILD_documentation)
+ if(WITH_DOCS)
set(CPACK_COMPONENTS_ALL "${CPACK_COMPONENTS_ALL} doc")
set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_DOC_GROUP \"PCL\")\n")
set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_DOC_DISPLAY_NAME \"Documentation\")\n")
set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_DOC_DESCRIPTION \"API documentation and tutorials\")\n")
- endif(BUILD_documentation)
+ endif(WITH_DOCS)
# add PCLConfig
set(CPACK_COMPONENTS_ALL "${CPACK_COMPONENTS_ALL} pclconfig")
set(PCL_CPACK_COMPONENTS "${PCL_CPACK_COMPONENTS}\nset(CPACK_COMPONENT_PCLCONFIG_GROUP \"PCL\")\n")
endif(PCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32 AND WIN32)
if(${CMAKE_VERSION} VERSION_LESS 2.8.5)
- SET(Boost_ADDITIONAL_VERSIONS "1.43" "1.43.0" "1.44" "1.44.0" "1.45" "1.45.0" "1.46.1" "1.46.0" "1.46" "1.47" "1.47.0")
+ set(Boost_ADDITIONAL_VERSIONS
+ "1.47.0" "1.47" "1.46.1"
+ "1.46.0" "1.46" "1.45.0" "1.45" "1.44.0" "1.44" "1.43.0" "1.43")
else(${CMAKE_VERSION} VERSION_LESS 2.8.5)
- SET(Boost_ADDITIONAL_VERSIONS "1.47" "1.47.0" "1.48" "1.48.0" "1.49" "1.49.0")
+ set(Boost_ADDITIONAL_VERSIONS
+ "1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
+ "1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55"
+ "1.54.0" "1.54" "1.53.0" "1.53" "1.52.0" "1.52" "1.51.0" "1.51"
+ "1.50.0" "1.50" "1.49.0" "1.49" "1.48.0" "1.48" "1.47.0" "1.47")
endif(${CMAKE_VERSION} VERSION_LESS 2.8.5)
# Disable the config mode of find_package(Boost)
# Find a complete list for CUDA compute capabilities at http://developer.nvidia.com/cuda-gpus
- if(NOT ${CUDA_VERSION_STRING} VERSION_LESS "6.5")
+ if(NOT ${CUDA_VERSION_STRING} VERSION_LESS "8.0")
+ set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2 5.3 6.0 6.1")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "6.5")
set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2")
elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "6.0")
set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0")
include(${PCL_SOURCE_DIR}/cmake/CudaComputeTargetFlags.cmake)
APPEND_TARGET_ARCH_FLAGS()
+ # Prevent compilation issues between recent gcc versions and old CUDA versions
+ list(APPEND CUDA_NVCC_FLAGS "-D_FORCE_INLINES")
endif()
option(PCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32 "Build against a dynamically linked Boost on Win32 platforms." OFF)
mark_as_advanced(PCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32)
+# Build with dynamic linking for FLANN (advanced users)
+option(PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32 "Build against a dynamically linked FLANN on Win32 platforms." OFF)
+mark_as_advanced(PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32)
+
# Precompile for a minimal set of point types instead of all.
option(PCL_ONLY_CORE_POINT_TYPES "Compile explicitly only for a small subset of point types (e.g., pcl::PointXYZ instead of PCL_XYZ_POINT_TYPES)." OFF)
mark_as_advanced(PCL_ONLY_CORE_POINT_TYPES)
set(PCLCONFIG_INTERNAL_DEPENDENCIES)
set(PCLCONFIG_EXTERNAL_DEPENDENCIES)
set(PCLCONFIG_OPTIONAL_DEPENDENCIES)
+set(PCLCONFIG_SSE_DEFINITIONS "${SSE_FLAGS} ${SSE_DEFINITIONS}")
foreach(_ss ${PCL_SUBSYSTEMS_MODULES})
PCL_GET_SUBSYS_STATUS(_status ${_ss})
if(_status)
endif(NOT ${EXT_DEP_FOUND} OR (NOT (${EXT_DEP_FOUND} STREQUAL "TRUE")))
endforeach(_dep)
endif(SUBSYS_EXT_DEPS)
+ if(SUBSYS_OPT_DEPS)
+ foreach(_dep ${SUBSYS_OPT_DEPS})
+ PCL_GET_SUBSYS_INCLUDE_DIR(_include_dir ${_dep})
+ include_directories(${PROJECT_SOURCE_DIR}/${_include_dir}/include)
+ endforeach(_dep)
+ endif(SUBSYS_OPT_DEPS)
endif(${_var} AND (NOT ("${subsys_status}" STREQUAL "AUTO_OFF")))
endmacro(PCL_SUBSYS_DEPEND)
target_link_libraries(${_name} gomp)
endif()
- if(MSVC90 OR MSVC10)
+ if(MSVC)
target_link_libraries(${_name} delayimp.lib) # because delay load is enabled for openmp.dll
endif()
set_target_properties(${_name} PROPERTIES
- VERSION ${PCL_VERSION}
+ VERSION ${PCL_VERSION_PLAIN}
SOVERSION ${PCL_MAJOR_VERSION}.${PCL_MINOR_VERSION}
DEFINE_SYMBOL "PCLAPI_EXPORTS")
if(USE_PROJECT_FOLDERS)
target_link_libraries(${_name} ${Boost_LIBRARIES})
set_target_properties(${_name} PROPERTIES
- VERSION ${PCL_VERSION}
+ VERSION ${PCL_VERSION_PLAIN}
SOVERSION ${PCL_MAJOR_VERSION}
DEFINE_SYMBOL "PCLAPI_EXPORTS")
if(USE_PROJECT_FOLDERS)
endif()
endif()
endmacro(PCL_ADD_GRABBER_DEPENDENCY)
+
+###############################################################################
+# Set the dependencies for a specific test module on the provided variable
+# _var The variable to be filled with the dependencies
+# _module The module name
+macro(PCL_SET_TEST_DEPENDENCIES _var _module)
+ set(${_var} global_tests ${_module} ${PCL_SUBSYS_DEPS_${_module}})
+endmacro(PCL_SET_TEST_DEPENDENCIES)
PCL_MINOR_VERSION "${PCL_VERSION}")
string(REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1"
PCL_REVISION_VERSION "${PCL_VERSION}")
- string(REGEX REPLACE "^[0-9]+\\.[0-9]+\\.[0-9]+(.*)" "\\1"
- PCL_CANDIDATE_VERSION "${PCL_VERSION}")
+ set(PCL_VERSION_PLAIN "${PCL_MAJOR_VERSION}.${PCL_MINOR_VERSION}.${PCL_REVISION_VERSION}")
+ if(${PCL_VERSION} MATCHES "^[0-9]+\\.[0-9]+\\.[0-9]+-dev$")
+ set(PCL_DEV_VERSION 1)
+ set(PCL_VERSION_PLAIN "${PCL_VERSION_PLAIN}.99")
+ else()
+ set(PCL_DEV_VERSION 0)
+ endif()
endmacro(DISSECT_VERSION)
###############################################################################
includedir=${prefix}/include/@PROJECT_NAME_LOWER@-@PCL_MAJOR_VERSION@.@PCL_MINOR_VERSION@
Name: @PKG_NAME@
Description: @PKG_DESC@
-Version: @PCL_VERSION@
+Version: @PCL_VERSION_PLAIN@
Requires: @PKG_EXTERNAL_DEPS@
Libs:
Cflags: -I${includedir} @PKG_CFLAGS@
includedir=${prefix}/include/@PROJECT_NAME_LOWER@-@PCL_MAJOR_VERSION@.@PCL_MINOR_VERSION@
Name: @PKG_NAME@
Description: @PKG_DESC@
-Version: @PCL_VERSION@
+Version: @PCL_VERSION_PLAIN@
Requires: @PKG_EXTERNAL_DEPS@
Libs: -L${libdir} -l@PKG_NAME@ @PKG_LIBFLAGS@ @PKG_INTERNAL_DEPS@
Cflags: -I${includedir} @PKG_CFLAGS@
#include <string>
#include <vector>
#include <ostream>
+#include <boost/shared_ptr.hpp>
#include <pcl/pcl_macros.h>
namespace pcl
template<typename PointType1, typename PointType2> inline float
euclideanDistance (const PointType1& p1, const PointType2& p2)
{
- return (sqrtf (squaredEuclideanDistance (p1, p2)));
+ return (std::sqrt (squaredEuclideanDistance (p1, p2)));
}
}
/*@*/
template <typename PointT> void
get (PointT& t, size_t) const
{
- t.getNormalVector4fMap () = normal;
- t.getNormalVector4fMap ().normalize ();
+#if EIGEN_VERSION_AT_LEAST (3, 3, 0)
+ t.getNormalVector4fMap () = normal.normalized ();
+#else
+ if (normal.squaredNorm() > 0)
+ t.getNormalVector4fMap () = normal.normalized ();
+ else
+ t.getNormalVector4fMap () = Eigen::Vector4f::Zero ();
+#endif
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
while (cloud_iterator.isValid ())
{
// Check if the point is invalid
- if (!pcl_isfinite (cloud_iterator->x) ||
- !pcl_isfinite (cloud_iterator->y) ||
- !pcl_isfinite (cloud_iterator->z))
- continue;
- centroid[0] += cloud_iterator->x;
- centroid[1] += cloud_iterator->y;
- centroid[2] += cloud_iterator->z;
- ++cp;
+ if (pcl::isFinite (*cloud_iterator))
+ {
+ centroid[0] += cloud_iterator->x;
+ centroid[1] += cloud_iterator->y;
+ centroid[2] += cloud_iterator->z;
+ ++cp;
+ }
++cloud_iterator;
}
centroid /= static_cast<Scalar> (cp);
typename Eigen::internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type
pcl::umeyama (const Eigen::MatrixBase<Derived>& src, const Eigen::MatrixBase<OtherDerived>& dst, bool with_scaling)
{
+#if EIGEN_VERSION_AT_LEAST (3, 3, 0)
+ return Eigen::umeyama (src, dst, with_scaling);
+#else
typedef typename Eigen::internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
typedef typename Eigen::internal::traits<TransformationMatrixType>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
// Eq. (39)
VectorType S = VectorType::Ones (m);
- if (sigma.determinant () < 0)
+
+ if ( svd.matrixU ().determinant () * svd.matrixV ().determinant () < 0 )
S (m - 1) = -1;
// Eq. (40) and (43)
- const VectorType& d = svd.singularValues ();
- Index rank = 0;
- for (Index i = 0; i < m; ++i)
- if (!Eigen::internal::isMuchSmallerThan (d.coeff (i), d.coeff (0)))
- ++rank;
- if (rank == m - 1)
- {
- if (svd.matrixU ().determinant () * svd.matrixV ().determinant () > 0)
- Rt.block (0, 0, m, m).noalias () = svd.matrixU () * svd.matrixV ().transpose ();
- else
- {
- const Scalar s = S (m - 1);
- S (m - 1) = -1;
- Rt.block (0, 0, m, m).noalias () = svd.matrixU () * S.asDiagonal () * svd.matrixV ().transpose ();
- S (m - 1) = s;
- }
- }
- else
- {
- Rt.block (0, 0, m, m).noalias () = svd.matrixU () * S.asDiagonal () * svd.matrixV ().transpose ();
- }
+ Rt.block (0,0,m,m).noalias () = svd.matrixU () * S.asDiagonal () * svd.matrixV ().transpose ();
- // Eq. (42)
if (with_scaling)
{
// Eq. (42)
- const Scalar c = 1 / src_var * svd.singularValues ().dot (S);
+ const Scalar c = Scalar (1)/ src_var * svd.singularValues ().dot (S);
// Eq. (41)
Rt.col (m).head (m) = dst_mean;
}
return (Rt);
+#endif
}
//////////////////////////////////////////////////////////////////////////////////////////
plane_a_norm.normalize ();
plane_b_norm.normalize ();
- // Test if planes are parallel (test_cos == 1)
+ // Test if planes are parallel
double test_cos = plane_a_norm.dot (plane_b_norm);
- double upper_limit = 1 + angular_tolerance;
- double lower_limit = 1 - angular_tolerance;
+ double tolerance_cos = 1 - sin (fabs (angular_tolerance));
- if ((test_cos > lower_limit) && (test_cos < upper_limit))
+ if (fabs (test_cos) > tolerance_cos)
{
PCL_DEBUG ("Plane A and Plane B are parallel.\n");
return (false);
template <typename FloatVectorT> inline float
L2_Norm (FloatVectorT a, FloatVectorT b, int dim)
{
- return sqrtf(L2_Norm_SQR(a, b, dim));
+ return std::sqrt (L2_Norm_SQR(a, b, dim));
}
float norm = 0.0;
for (int i = 0; i < dim; ++i)
- norm += (sqrtf (a[i]) - sqrtf (b[i])) * (sqrtf (a[i]) - sqrtf (b[i]));
+ norm += (std::sqrt (a[i]) - std::sqrt (b[i])) * (std::sqrt (a[i]) - std::sqrt (b[i]));
- return sqrtf (norm);
+ return std::sqrt (norm);
}
float norm = 0.0, result;
for (int i = 0; i < dim; ++i)
- norm += sqrtf (a[i] * b[i]);
+ norm += std::sqrt (a[i] * b[i]);
if (norm > 0)
result = -logf (norm);
float norm = 0.0;
for (int i = 0; i < dim; ++i)
- norm += sqrtf (fabsf (a[i] - b[i]));
+ norm += std::sqrt (fabsf (a[i] - b[i]));
return norm;
}
for (int i = 0; i < dim; ++i)
norm += (P1 * a[i] - P2 * b[i]) * (P1 * a[i] - P2 * b[i]);
- return sqrtf (norm);
+ return std::sqrt (norm);
}
{
return false;
// Reduce degree of polynomial
- //polynomial_degree = (unsigned int) (0.5f* (sqrtf (8*samplePoints.size ()+1) - 3));
+ //polynomial_degree = (unsigned int) (0.5f* (std::sqrt (8*samplePoints.size ()+1) - 3));
//parameters_size = BivariatePolynomialT<real>::getNoOfParametersFromDegree (polynomial_degree);
//cout << "Not enough points, so degree of polynomial was decreased to "<<polynomial_degree
// << " ("<<samplePoints.size ()<<" points => "<<parameters_size<<" parameters)\n";
cloud.points.resize (num_points);
uint8_t* cloud_data = reinterpret_cast<uint8_t*>(&cloud.points[0]);
- // Check if we can copy adjacent points in a single memcpy
+ // Check if we can copy adjacent points in a single memcpy. We can do so if there
+ // is exactly one field to copy and it is the same size as the source and destination
+ // point types.
if (field_map.size() == 1 &&
field_map[0].serialized_offset == 0 &&
field_map[0].struct_offset == 0 &&
- msg.point_step == sizeof(PointT))
+ field_map[0].size == msg.point_step &&
+ field_map[0].size == sizeof(PointT))
{
uint32_t cloud_row_step = static_cast<uint32_t> (sizeof (PointT) * cloud.width);
const uint8_t* msg_data = &msg.data[0];
// Fill point cloud binary data (padding and all)
size_t data_size = sizeof (PointT) * cloud.points.size ();
msg.data.resize (data_size);
- memcpy (&msg.data[0], &cloud.points[0], data_size);
+ if (data_size)
+ {
+ memcpy(&msg.data[0], &cloud.points[0], data_size);
+ }
// Fill fields metadata
msg.fields.clear ();
inline RGB ()
{
- r = g = b = a = 0;
+ r = g = b = 0;
+ a = 255;
}
friend std::ostream& operator << (std::ostream& os, const RGB& p);
{
x = y = z = 0.0f;
data[3] = 1.0f;
- r = g = b = a = 0;
+ r = g = b = 0;
+ a = 255;
}
inline PointXYZRGB (uint8_t _r, uint8_t _g, uint8_t _b)
{
r = _r;
g = _g;
b = _b;
- a = 0;
+ a = 255;
}
friend std::ostream& operator << (std::ostream& os, const PointXYZRGB& p);
x = y = z = 0.0f;
data[3] = 1.0f;
r = g = b = 0;
- a = 0;
+ a = 255;
label = 255;
}
inline PointXYZRGBL (uint8_t _r, uint8_t _g, uint8_t _b, uint32_t _label)
r = _r;
g = _g;
b = _b;
- a = 0;
+ a = 255;
label = _label;
}
{
x = y = z = 0.0f;
data[3] = 1.0f;
- r = g = b = a = 0;
+ r = g = b = 0;
+ a = 255;
normal_x = normal_y = normal_z = data_n[3] = 0.0f;
curvature = 0;
}
x = y = z = 0.0f;
data[3] = 1.0f;
normal_x = normal_y = normal_z = data_n[3] = 0.0f;
- rgba = 0;
+ r = g = b = 0;
+ a = 255;
radius = confidence = curvature = 0.0f;
}
#include <iostream>
#include <stdarg.h>
#include <stdio.h>
+#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
+#endif
#include <math.h>
// MSCV doesn't have std::{isnan,isfinite}
#if defined(__APPLE__) || defined(_WIN64) || GLIBC_MALLOC_ALIGNED || FREEBSD_MALLOC_ALIGNED
#define MALLOC_ALIGNED 1
-#else
- #define MALLOC_ALIGNED 0
+#endif
+
+#if defined (HAVE_MM_MALLOC)
+ #include <mm_malloc.h>
#endif
inline void*
ptr = _mm_malloc (size, 16);
#elif defined (_MSC_VER)
ptr = _aligned_malloc (size, 16);
+#elif defined (ANDROID)
+ ptr = memalign (16, size);
#else
#error aligned_malloc not supported on your platform
ptr = 0;
#if defined (MALLOC_ALIGNED) || defined (HAVE_POSIX_MEMALIGN)
std::free (ptr);
#elif defined (HAVE_MM_MALLOC)
- ptr = _mm_free (ptr);
+ _mm_free (ptr);
#elif defined (_MSC_VER)
_aligned_free (ptr);
+#elif defined (ANDROID)
+ free (ptr);
#else
#error aligned_free not supported on your platform
#endif
typedef boost::shared_ptr<PointCloud<PointT> > Ptr;
typedef boost::shared_ptr<const PointCloud<PointT> > ConstPtr;
+ // std container compatibility typedefs according to
+ // http://en.cppreference.com/w/cpp/concept/Container
+ typedef PointT value_type;
+ typedef PointT& reference;
+ typedef const PointT& const_reference;
+ typedef typename VectorType::difference_type difference_type;
+ typedef typename VectorType::size_type size_type;
+
// iterators
typedef typename VectorType::iterator iterator;
typedef typename VectorType::const_iterator const_iterator;
{
if (field.name == "rgb")
{
- return (field.datatype == pcl::PCLPointField::FLOAT32 &&
+ // For fixing the alpha value bug #1141, the rgb field can also match
+ // uint32.
+ return ((field.datatype == pcl::PCLPointField::FLOAT32 ||
+ field.datatype == pcl::PCLPointField::UINT32) &&
field.count == 1);
}
else
}
else
{
+ // For fixing the alpha value bug #1141, rgb can also match uint32
return (field.name == traits::name<PointT, fields::rgb>::value &&
- field.datatype == traits::datatype<PointT, fields::rgb>::value &&
+ (field.datatype == traits::datatype<PointT, fields::rgb>::value ||
+ field.datatype == pcl::PCLPointField::UINT32) &&
field.count == traits::datatype<PointT, fields::rgb>::size);
}
}
float r1Sqr = r1*r1,
r2Sqr = r2*r2,
dSqr = squaredEuclideanDistance (point1, point2),
- d = sqrtf (dSqr);
+ d = std::sqrt (dSqr);
float cos_impact_angle = (r2Sqr + dSqr - r1Sqr)/ (2.0f*r2*d);
cos_impact_angle = (std::max) (0.0f, (std::min) (1.0f, cos_impact_angle));
impact_angle = acosf (cos_impact_angle); // Using the cosine rule
//return acosf ( (Eigen::Vector3f (point.x, point.y, point.z)-getSensorPos ()).normalized ().dot ( (Eigen::Vector3f (neighbor1.x, neighbor1.y, neighbor1.z)-Eigen::Vector3f (point.x, point.y, point.z)).normalized ()));
//float d1_squared = squaredEuclideanDistance (point, neighbor1),
- //d1 = sqrtf (d1_squared),
+ //d1 = std::sqrt (d1_squared),
//d2_squared = squaredEuclideanDistance (point, neighbor2),
- //d2 = sqrtf (d2_squared),
+ //d2 = std::sqrt (d2_squared),
//d3_squared = squaredEuclideanDistance (neighbor1, neighbor2);
//float cos_surface_change = (d1_squared + d2_squared - d3_squared)/ (2.0f*d1*d2),
//surface_change = acosf (cos_surface_change);
else
break;
}
- //std::cout << x<<","<<y<<"->"<<x2<<","<<y2<<": "<<sqrtf (pixel_distance)<<"m\n";
+ //std::cout << x<<","<<y<<"->"<<x2<<","<<y2<<": "<<std::sqrt (pixel_distance)<<"m\n";
weight += 1.0f;
- average_pixel_distance += sqrtf (pixel_distance);
+ average_pixel_distance += std::sqrt (pixel_distance);
}
average_pixel_distance /= weight;
//std::cout << x<<","<<y<<","<<offset_x<<","<<offset_y<<" => "<<average_pixel_distance<<"\n";
//cout << __PRETTY_FUNCTION__ << " called.\n";
float delta_x = (image_x+static_cast<float> (image_offset_x_)-center_x_)*focal_length_x_reciprocal_,
delta_y = (image_y+static_cast<float> (image_offset_y_)-center_y_)*focal_length_y_reciprocal_;
- point[2] = range / (sqrtf (delta_x*delta_x + delta_y*delta_y + 1));
+ point[2] = range / (std::sqrt (delta_x*delta_x + delta_y*delta_y + 1));
point[0] = delta_x*point[2];
point[1] = delta_y*point[2];
point = to_world_system_ * point;
* \param[out] msg the resultant PCLPointCloud2 binary blob
*/
template<typename PointT>
- PCL_DEPRECATED ("pcl::fromROSMsg is deprecated, please use fromPCLPointCloud2 instead.")
+ PCL_DEPRECATED ("pcl::toROSMsg is deprecated, please use toPCLPointCloud2 instead.")
void
toROSMsg (const pcl::PointCloud<PointT>& cloud, pcl::PCLPointCloud2& msg)
{
* \note will throw std::runtime_error if there is a problem
*/
template<typename CloudT>
- PCL_DEPRECATED ("pcl::fromROSMsg is deprecated, please use fromPCLPointCloud2 instead.")
+ PCL_DEPRECATED ("pcl::toROSMsg is deprecated, please use toPCLPointCloud2 instead.")
void
toROSMsg (const CloudT& cloud, pcl::PCLImage& msg)
{
* will throw std::runtime_error if there is a problem
*/
inline void
- PCL_DEPRECATED ("pcl::fromROSMsg is deprecated, please use fromPCLPointCloud2 instead.")
+ PCL_DEPRECATED ("pcl::toROSMsg is deprecated, please use toPCLPointCloud2 instead.")
toROSMsg (const pcl::PCLPointCloud2& cloud, pcl::PCLImage& msg)
{
toPCLPointCloud2 (cloud, msg);
std::ostream&
operator << (std::ostream& os, const PointXYZRGBL& p)
{
- os << "(" << p.x << "," << p.y << "," << p.z << " - " << p.r << "," << p.g << "," << p.b << " - " << p.label << ")";
+ os << "(" << p.x << "," << p.y << "," << p.z << " - "
+ << static_cast<int>(p.r) << ","
+ << static_cast<int>(p.g) << ","
+ << static_cast<int>(p.b) << " - "
+ << p.label << ")";
return (os);
}
std::ostream&
operator << (std::ostream& os, const PointXYZRGBNormal& p)
{
- os << "(" << p.x << "," << p.y << "," << p.z << " - " << p.rgb << " - " << p.normal[0] << "," << p.normal[1] << "," << p.normal[2] << " - " << p.r << ", " << p.g << ", " << p.b << " - " << p.curvature << ")";
+ os << "(" << p.x << "," << p.y << "," << p.z << " - "<< p.rgb << " - " << p.normal[0] << "," << p.normal[1] << "," << p.normal[2] << " - "
+ << static_cast<int>(p.r) << ", "
+ << static_cast<int>(p.g) << ", "
+ << static_cast<int>(p.b) << " - "
+ << p.curvature << ")";
return (os);
}
std::ostream&
operator << (std::ostream& os, const PointSurfel& p)
{
- const unsigned char* rgba_ptr = reinterpret_cast<const unsigned char*>(&p.rgba);
os <<
"(" << p.x << "," << p.y << "," << p.z << " - " <<
p.normal_x << "," << p.normal_y << "," << p.normal_z << " - "
- << static_cast<int>(*rgba_ptr) << ","
- << static_cast<int>(*(rgba_ptr+1)) << ","
- << static_cast<int>(*(rgba_ptr+2)) << ","
- << static_cast<int>(*(rgba_ptr+3)) << " - " <<
+ << static_cast<int>(p.r) << ","
+ << static_cast<int>(p.g) << ","
+ << static_cast<int>(p.b) << ","
+ << static_cast<int>(p.a) << " - " <<
p.radius << " - " << p.confidence << " - " << p.curvature << ")";
return (os);
}
if ( distance_quotient_squared < min_distance_quotient_squared
|| distance_quotient_squared > max_distance_quotient_squared)
{
- //std::cout << "Skipping because of mismatching distances "<<sqrtf (distance1_squared)
- // << " and "<<sqrtf (distance1_corr_squared)<<".\n";
+ //std::cout << "Skipping because of mismatching distances "<<std::sqrt (distance1_squared)
+ // << " and "<<std::sqrt (distance1_corr_squared)<<".\n";
continue;
}
- float distance = sqrtf (distance_squared);
+ float distance = std::sqrt (distance_squared);
Eigen::Vector3f corr3=corr1, corr4=corr2;
corr3[0]+=distance; corr4[0]+=distance;
continue;
}
still_in_range = true;
- float distance = sqrtf (distance_squared),
+ float distance = std::sqrt (distance_squared),
weight = distance*max_dist_reciprocal;
vector_average.add (neighbor, weight);
}
pcl::PointCloud<pcl::PointXYZRGB>::Ptr output (new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::cuda::toPCL (*data, *output);
- viewer.showCloud (output);
+ viewer.showCloud (output, "cloud");
}
#include <cmath>
#include <string>
-#ifdef WIN32
+#ifdef _WIN32
# define NOMINMAX
# define WIN32_LEAN_AND_MEAN
# include <Windows.h>
inline double
pcl::cuda::getTime ()
{
-#ifdef WIN32
+#ifdef _WIN32
LARGE_INTEGER frequency;
LARGE_INTEGER timer_tick;
QueryPerformanceFrequency(&frequency);
*
*/
-#ifdef WIN32
+#ifdef _WIN32
# define WIN32_LEAN_AND_MEAN
#endif
else(HTML_HELP_COMPILER)
set(DOCUMENTATION_HTML_HELP NO)
endif(HTML_HELP_COMPILER)
+ if(DOCUMENTATION_HTML_HELP)
+ set(DOCUMENTATION_SEARCHENGINE NO)
+ else(DOCUMENTATION_HTML_HELP)
+ set(DOCUMENTATION_SEARCHENGINE YES)
+ endif(DOCUMENTATION_HTML_HELP)
if(DOXYGEN_DOT_EXECUTABLE)
set(HAVE_DOT YES)
else(DOXYGEN_DOT_EXECUTABLE)
PATTERN "*.md5" EXCLUDE
PATTERN "*.map" EXCLUDE
PATTERN "*.chm" EXCLUDE)
+ install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/pointcloudlibrary_logo.png"
+ DESTINATION "${DOC_INSTALL_DIR}/html"
+ COMPONENT doc)
endif(DOCUMENTATION_HTML_HELP STREQUAL YES)
endif(DOXYGEN_FOUND)
USE_MATHJAX = NO
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
MATHJAX_EXTENSIONS =
-SEARCHENGINE = YES
+SEARCHENGINE = @DOCUMENTATION_SEARCHENGINE@
SERVER_BASED_SEARCH = YES
#---------------------------------------------------------------------------
BSD license</a> and is open source software. <b>It is free for commercial and
research use.</b>
-\image html http://www.pointclouds.org/assets/images/contents/logos/pointcloudlibrary_logo.png
+\image html pointcloudlibrary_logo.png
PCL is cross-platform, and has been successfully compiled and deployed on
Linux, MacOS, Windows, and Android. To simplify development, PCL is split into
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.pngmath', 'sphinxcontrib.doxylink.doxylink']
-pngmath_dvipng_args = ['-gamma 1.5', '-D 110', '-bg Transparent']
+extensions = ['sphinx.ext.imgmath', 'sphinxcontrib.doxylink.doxylink']
+imgmath_dvipng_args = ['-gamma', '1.5', '-D', '110', '-bg', 'Transparent']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
-html_title = None
+# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Home'
}
html_show_copyright = False
html_show_sphinx = False
-html_add_permalinks = None
-needs_sphinx = 1.0
+html_add_permalinks = u''
+needs_sphinx = u'1.1'
file_insertion_enabled = True
raw_enabled = True
The capture parameters (exposure, gain etc..) are set to default values.
If you have performed and stored an extrinsic calibration it will be temporary reset.
+If you are using an Ensenso X device you have to calibrate the device before trying to run the PCL driver. If you don't you will get an error like this:
+
+.. code-block:: cpp
+ Initialising nxLib
+ Opening Ensenso stereo camera id = 0
+ openDevice: NxLib error ExecutionFailed (17) occurred while accessing item /Execute.
+
+ {
+ "ErrorSymbol": "InvalidCalibrationData",
+ "ErrorText": "Stereo camera calibration data is corrupted or not supported yet by the current software version.",
+ "Execute": {
+ "Command": "Open",
+ "Parameters": {
+ "AllowFirmwareUpload": null,
+ "Cameras": "171197",
+ "FirmwareUpload": {
+ "Camera": null,
+ "Projector": null
+ },
+ "LoadCalibration": null,
+ "Projector": null,
+ "Threads": null
+ }
+ },
+ "Time": 8902,
+ "TimeExecute": 8901,
+ "TimeFinalize": 0.03477,
+ "TimePrepare": 0.01185
+ }
+
.. code-block:: cpp
ensenso_ptr->enumDevices ();
:language: cmake
:linenos:
-After you have made the executable, you can run it. Simply do::
+After you have made the executable, run it passing a path to a PCD or PLY file.
+To reproduce the results shown below, you can download the `cube.ply
+<https://raw.github.com/PointCloudLibrary/pcl/master/test/cube.ply>`_ file::
$ ./matrix_transform cube.ply
In the beginning we do command line parsing, read a point cloud from disc (or
create it if not provided), create a range image and visualize it. All of these
-steps are already covered in the previous tutorial Range image visualization.
+steps are already covered in the previous tutorial `Range image visualization <http://www.pointclouds.org/documentation/tutorials/range_image_visualization.php#range-image-visualization>`_ .
The interesting part begins here:
Theoretical Primer
------------------
-The abbreviation of "RANdom SAmple Consensus" is RANSAC, and it is an iterative method that is used to estimate parameters of a meathematical model from a set of data containing outliers. This algorithm was published by Fischler and Bolles in 1981. The RANSAC algorithm assumes that all of the data we are looking at is comprised of both inliers and outliers. Inliers can be explained by a model with a particular set of parameter values, while outliers do not fit that model in any circumstance. Another necessary assumption is that a procedure which can optimally estimate the parameters of the chosen model from the data is available.
+The abbreviation of "RANdom SAmple Consensus" is RANSAC, and it is an iterative method that is used to estimate parameters of a mathematical model from a set of data containing outliers. This algorithm was published by Fischler and Bolles in 1981. The RANSAC algorithm assumes that all of the data we are looking at is comprised of both inliers and outliers. Inliers can be explained by a model with a particular set of parameter values, while outliers do not fit that model in any circumstance. Another necessary assumption is that a procedure which can optimally estimate the parameters of the chosen model from the data is available.
From [Wikipedia]_:
#include <pcl/point_cloud.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_pointcloud_changedetector.h>
#include <iostream>
#include <vector>
#include <pcl/point_cloud.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_search.h>
#include <iostream>
#include <vector>
void keyboardEventOccurred (const pcl::visualization::KeyboardEvent &event,
void* viewer_void)
{
- boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer = *static_cast<boost::shared_ptr<pcl::visualization::PCLVisualizer> *> (viewer_void);
+ pcl::visualization::PCLVisualizer *viewer = static_cast<pcl::visualization::PCLVisualizer *> (viewer_void);
if (event.getKeySym () == "r" && event.keyDown ())
{
std::cout << "r was pressed => removing all text" << std::endl;
void mouseEventOccurred (const pcl::visualization::MouseEvent &event,
void* viewer_void)
{
- boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer = *static_cast<boost::shared_ptr<pcl::visualization::PCLVisualizer> *> (viewer_void);
+ pcl::visualization::PCLVisualizer *viewer = static_cast<pcl::visualization::PCLVisualizer *> (viewer_void);
if (event.getButton () == pcl::visualization::MouseEvent::LeftButton &&
event.getType () == pcl::visualization::MouseEvent::MouseButtonRelease)
{
viewer->setBackgroundColor (0, 0, 0);
viewer->addCoordinateSystem (1.0);
- viewer->registerKeyboardCallback (keyboardEventOccurred, (void*)&viewer);
- viewer->registerMouseCallback (mouseEventOccurred, (void*)&viewer);
+ viewer->registerKeyboardCallback (keyboardEventOccurred, (void*)viewer.get ());
+ viewer->registerMouseCallback (mouseEventOccurred, (void*)viewer.get ());
return (viewer);
}
/** @brief Destructor */
~PCLViewer ();
- public slots:
+ public Q_SLOTS:
/** @brief Triggered whenever the "Save file" button is clicked */
void
saveFileButtonPressed ();
explicit PCLViewer (QWidget *parent = 0);
~PCLViewer ();
-public slots:
+public Q_SLOTS:
void
randomButtonPressed ();
if (depth_file_specified)
pcl::console::parse (argc, argv, "-d", depth_path);
- PointCloudT::Ptr cloud = boost::make_shared < PointCloudT >();
+ PointCloudT::Ptr cloud = boost::shared_ptr<PointCloudT> (new PointCloudT);
NormalCloudT::Ptr input_normals = boost::make_shared < NormalCloudT > ();
bool pcd_file_specified = pcl::console::find_switch (argc, argv, "-p");
#include <Eigen/StdVector>
#include <Eigen/Geometry>
-#include <Eigen/Sparse>
#endif // PCL_FEATURES_EIGEN_H_
/// ----- Compute current neighbour polar coordinates -----
/// Get distance between the neighbour and the origin
- float r = sqrtf (nn_dists[ne]);
+ float r = std::sqrt (nn_dists[ne]);
/// Project point into the tangent plane
Eigen::Vector3f proj;
for (size_t i = 0; i < grid.points.size (); ++i)
{
bin = static_cast<int> ((((atan2 (grid.points[i].normal_y, grid.points[i].normal_x) + M_PI) * 180 / M_PI) / bin_angle)) % nbins;
- w = sqrtf (grid.points[i].normal_y * grid.points[i].normal_y + grid.points[i].normal_x * grid.points[i].normal_x);
+ w = std::sqrt (grid.points[i].normal_y * grid.points[i].normal_y + grid.points[i].normal_x * grid.points[i].normal_x);
sum_w += w;
spatial_data[bin] += w;
}
Eigen::Vector4f v23 (p2 - p3);
a = v21.norm (); b = v31.norm (); c = v23.norm (); s = (a+b+c) * 0.5f;
if (s * (s-a) * (s-b) * (s-c) <= 0.001f)
- continue;
+ {
+ nn_idx--;
+ continue;
+ }
v21.normalize ();
v31.normalize ();
}
// D3 ( herons formula )
- d3v.push_back (sqrtf (sqrtf (s * (s-a) * (s-b) * (s-c))));
+ d3v.push_back (std::sqrt (std::sqrt (s * (s-a) * (s-b) * (s-c))));
if (vxlcnt_sum <= 21)
{
wt_d3.push_back (0);
#define PCL_FEATURES_IMPL_GFPFH_H_
#include <pcl/features/gfpfh.h>
-#include <pcl/octree/octree.h>
#include <pcl/octree/octree_search.h>
#include <pcl/common/eigen.h>
rsd.setSearchSurface (input_);
rsd.setInputNormals (normals_);
rsd.setRadiusSearch (std::max (search_radius_, std::sqrt (3.0) * width_ / 2));
-// pcl::KdTree<PointInT>::Ptr tree = boost::make_shared<pcl::KdTreeFLANN<PointInT> >();
-// tree->setInputCloud(input_);
-// rsd.setSearchMethod(tree);
rsd.compute (*radii);
// Save the type of each point
flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z);
- const float scale = 1.0f / sqrtf (normal_length);
+ const float scale = 1.0f / std::sqrt (normal_length);
normal.normal_x = normal_x * scale;
normal.normal_y = normal_y * scale;
flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z);
- const float scale = 1.0f / sqrtf (normal_length);
+ const float scale = 1.0f / std::sqrt (normal_length);
normal.normal_x = normal_x * scale;
normal.normal_y = normal_y * scale;
{
// Normalize distance and intensity values to: 0.0 <= d,i < nr_distance_bins,nr_intensity_bins
const float eps = std::numeric_limits<float>::epsilon ();
- float d = static_cast<float> (nr_distance_bins) * sqrtf (squared_distances[idx]) / (radius + eps);
+ float d = static_cast<float> (nr_distance_bins) * std::sqrt (squared_distances[idx]) / (radius + eps);
float i = static_cast<float> (nr_intensity_bins) *
(cloud.points[indices[idx]].intensity - min_intensity) / (max_intensity - min_intensity + eps);
}
else
{
- const float normInv = 1.0f / sqrtf (length);
+ const float normInv = 1.0f / std::sqrt (length);
normal.normal_x = -nx * normInv;
normal.normal_y = -ny * normInv;
}
else
{
- const float normInv = 1.0f / sqrtf (length);
+ const float normInv = 1.0f / std::sqrt (length);
output.points[index].normal_x = nx * normInv;
output.points[index].normal_y = ny * normInv;
standard_dev += diff * diff;
diff_vector[point_i] = diff;
}
- standard_dev = sqrtf (standard_dev / static_cast<float> (features_at_scale_vectorized_[scale_i].size ()));
+ standard_dev = std::sqrt (standard_dev / static_cast<float> (features_at_scale_vectorized_[scale_i].size ()));
PCL_DEBUG ("[pcl::MultiscaleFeaturePersistence::extractUniqueFeatures] Standard deviation for scale %f is %f\n", scale_values_[scale_i], standard_dev);
// Select only points outside (mean +/- alpha * standard_dev)
//{
//diff = (diff - middle_value)*normalization_factor2;
//diff = 0.5f + 0.5f*diff;
- ////diff = 0.5f + 0.5f*sqrtf(diff);
+ ////diff = 0.5f + 0.5f*std::sqrt(diff);
////diff = 0.5f + 0.5f*powf(diff, 0.3f);
//}
//ret += diff;
Xk_real += static_cast<float> (s_matrix (n, column_i) * cos (2.0f * M_PI / static_cast<double> (N_ * k * n)));
Xk_imag += static_cast<float> (s_matrix (n, column_i) * sin (2.0f * M_PI / static_cast<double> (N_ * k * n)));
}
- dft_col[k] = sqrtf (Xk_real*Xk_real + Xk_imag*Xk_imag);
+ dft_col[k] = std::sqrt (Xk_real*Xk_real + Xk_imag*Xk_imag);
}
dft_matrix.col (column_i).matrix () = dft_col;
}
}
int h_index = (d <= 0) ? 0 : std::ceil (size_hists * (d / distance_normalization_factor)) - 1;
+ /* from http://www.pcl-users.org/OUR-CVFH-problem-td4028436.html
+ h_index will be 13 when d is computed on the farthest away point.
+
+ adding the following after computing h_index fixes the problem:
+ */
+ if(h_index > 12)
+ h_index = 12;
for (int j = 0; j < num_hists; j++)
quadrants[j][h_index] += hist_incr * weights[j];
float hist_incr = 100.0f / static_cast<float> (indices.size () * (indices.size () - 1) / 2);
std::pair<int, int> key;
+ bool key_found = false;
+
// Iterate over all the points in the neighborhood
for (size_t i_idx = 0; i_idx < indices.size (); ++i_idx)
{
// Check to see if we already estimated this pair in the global hashmap
std::map<std::pair<int, int>, Eigen::Vector4f, std::less<std::pair<int, int> >, Eigen::aligned_allocator<std::pair<const std::pair<int, int>, Eigen::Vector4f> > >::iterator fm_it = feature_map_.find (key);
if (fm_it != feature_map_.end ())
+ {
pfh_tuple_ = fm_it->second;
+ key_found = true;
+ }
else
{
// Compute the pair NNi to NNj
if (!computePairFeatures (cloud, normals, indices[i_idx], indices[j_idx],
pfh_tuple_[0], pfh_tuple_[1], pfh_tuple_[2], pfh_tuple_[3]))
continue;
+
+ key_found = false;
}
}
else
}
pfh_histogram[h_index] += hist_incr;
- if (use_cache_)
+ if (use_cache_ && !key_found)
{
// Save the value in the hashmap
feature_map_[key] = pfh_tuple_;
float neighbor_distance_squared = squaredEuclideanDistance(neighbor, point);
if (neighbor_distance_squared <= local_surface.max_neighbor_distance_squared)
return 0.0f;
- float ret = 1.0f - sqrtf(local_surface.max_neighbor_distance_squared / neighbor_distance_squared);
+ float ret = 1.0f - std::sqrt (local_surface.max_neighbor_distance_squared / neighbor_distance_squared);
if (neighbor.range < point.range)
ret = -ret;
return ret;
Eigen::Vector3f eigen_values, eigen_vector1, eigen_vector2;
vector_average.doPCA(eigen_values, eigen_vector1, eigen_vector2, main_direction);
- magnitude = sqrtf(eigen_values[2]);
+ magnitude = std::sqrt (eigen_values[2]);
//magnitude = eigen_values[2];
//magnitude = 1.0f - powf(1.0f-magnitude, 5);
//magnitude = 1.0f - powf(1.0f-magnitude, 10);
//magnitude += magnitude - powf(magnitude,2);
//magnitude += magnitude - powf(magnitude,2);
- //magnitude = sqrtf(local_surface->eigen_values[0]/local_surface->eigen_values.sum());
- //magnitude = sqrtf(local_surface->eigen_values_no_jumps[0]/local_surface->eigen_values_no_jumps.sum());
+ //magnitude = std::sqrt (local_surface->eigen_values[0]/local_surface->eigen_values.sum());
+ //magnitude = std::sqrt (local_surface->eigen_values_no_jumps[0]/local_surface->eigen_values_no_jumps.sum());
//if (surface_structure_[y*range_image_->width+x+1]==NULL||surface_structure_[y*range_image_->width+x-1]==NULL)
//{
// Normalize distance and angle values to: 0.0 <= d,g < nr_distances_bins,nr_gradient_bins
const float eps = std::numeric_limits<float>::epsilon ();
- float d = static_cast<float> (nr_distance_bins) * sqrtf (sqr_distances[idx]) / (radius + eps);
+ float d = static_cast<float> (nr_distance_bins) * std::sqrt (sqr_distances[idx]) / (radius + eps);
float g = static_cast<float> (nr_gradient_bins) * gradient_angle_from_center / (static_cast<float> (M_PI) + eps);
// Compute the bin indices that need to be updated
kdtree.nearestKSearch (static_cast<int> (point_i), 16, k_indices, k_distances);
for (int k_i = 0; k_i < static_cast<int> (k_indices.size ()); ++k_i)
- add_edge (point_i, k_indices[k_i], Weight (sqrtf (k_distances[k_i])), cloud_graph);
+ add_edge (point_i, k_indices[k_i], Weight (std::sqrt (k_distances[k_i])), cloud_graph);
}
const size_t E = num_edges (cloud_graph),
for (size_t point_j = 0; point_j < input_->points.size (); ++point_j)
{
float d_g = geodesic_distances_[point_i][point_j];
- float phi_i_j = 1.0f / sqrtf (2.0f * static_cast<float> (M_PI) * scale_squared) * expf ( (-1) * d_g*d_g / (2.0f * scale_squared));
+ float phi_i_j = 1.0f / std::sqrt (2.0f * static_cast<float> (M_PI) * scale_squared) * expf ( (-1) * d_g*d_g / (2.0f * scale_squared));
point_density_i += phi_i_j;
phi_row[point_j] = phi_i_j;
// ----- Compute current neighbour polar coordinates -----
// Get distance between the neighbour and the origin
- float r = sqrtf (nn_dists[ne]);
+ float r = std::sqrt (nn_dists[ne]);
// Project point into the tangent plane
Eigen::Vector3f proj;
shot_ (), lrf_radius_ (0),
sqradius_ (0), radius3_4_ (0), radius1_4_ (0), radius1_2_ (0),
nr_grid_sector_ (32),
- maxAngularSectors_ (28),
+ maxAngularSectors_ (32),
descLength_ (0)
{
feature_name_ = "SHOTEstimation";
// Instantiations of specific point types
#ifdef PCL_ONLY_CORE_POINT_TYPES
PCL_INSTANTIATE_PRODUCT(FPFHEstimation, ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointNormal))((pcl::Normal)(pcl::PointNormal))((pcl::FPFHSignature33)))
- PCL_INSTANTIATE_PRODUCT(FPFHEstimationOMP, ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA))((pcl::Normal))((pcl::FPFHSignature33)))
+ PCL_INSTANTIATE_PRODUCT(FPFHEstimationOMP, ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointNormal))((pcl::Normal)(pcl::PointNormal))((pcl::FPFHSignature33)))
#else
PCL_INSTANTIATE_PRODUCT(FPFHEstimation, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES)((pcl::FPFHSignature33)))
PCL_INSTANTIATE_PRODUCT(FPFHEstimationOMP, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES)((pcl::FPFHSignature33)))
// Instantiations of specific point types
#ifdef PCL_ONLY_CORE_POINT_TYPES
PCL_INSTANTIATE_PRODUCT(NormalEstimation, ((pcl::PointSurfel)(pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointNormal))((pcl::Normal)(pcl::PointNormal)(pcl::PointXYZRGBNormal)))
- PCL_INSTANTIATE_PRODUCT(NormalEstimationOMP, ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA))((pcl::Normal)))
+ PCL_INSTANTIATE_PRODUCT(NormalEstimationOMP, ((pcl::PointSurfel)(pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointNormal))((pcl::Normal)(pcl::PointNormal)(pcl::PointXYZRGBNormal)))
#else
PCL_INSTANTIATE_PRODUCT(NormalEstimation, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES))
PCL_INSTANTIATE_PRODUCT(NormalEstimationOMP, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES))
}
bool transform_matrix_is_identity = transform_.matrix ().isIdentity ();
- bool translation_is_zero = (translation_ != Eigen::Vector3f::Zero ());
+ bool translation_is_zero = (translation_ == Eigen::Vector3f::Zero ());
bool inverse_transform_matrix_is_identity = inverse_transform.matrix ().isIdentity ();
for (size_t index = 0; index < indices_->size (); ++index)
if (!transform_matrix_is_identity)
local_pt = pcl::transformPoint<PointT> (local_pt, transform_);
- if (translation_is_zero)
+ if (!translation_is_zero)
{
local_pt.x -= translation_ (0);
local_pt.y -= translation_ (1);
pcl::for_each_type<FieldList> (pcl::detail::FieldAdder<PointT> (fields));
for (int rii = 0; rii < static_cast<int> (removed_indices_->size ()); ++rii) // rii = removed indices iterator
{
- uint8_t* pt_data = reinterpret_cast<uint8_t*> (&cloud->points[(*removed_indices_)[rii]]);
+ int pt_index = (*removed_indices_)[rii];
+ if (pt_index >= input_->points.size ())
+ {
+ PCL_ERROR ("[pcl::%s::filterDirectly] The index exceeds the size of the input. Do nothing.\n",
+ getClassName ().c_str ());
+ *cloud = *input_;
+ return;
+ }
+ uint8_t* pt_data = reinterpret_cast<uint8_t*> (&cloud->points[pt_index]);
for (int fi = 0; fi < static_cast<int> (fields.size ()); ++fi) // fi = field iterator
memcpy (pt_data + fields[fi].offset, &user_filter_value_, sizeof (float));
}
pcl::for_each_type<FieldList> (pcl::detail::FieldAdder<PointT> (fields));
for (int rii = 0; rii < static_cast<int> (removed_indices_->size ()); ++rii) // rii = removed indices iterator
{
- uint8_t* pt_data = reinterpret_cast<uint8_t*> (&output.points[(*removed_indices_)[rii]]);
+ int pt_index = (*removed_indices_)[rii];
+ if (pt_index >= input_->points.size ())
+ {
+ PCL_ERROR ("[pcl::%s::applyFilter] The index exceeds the size of the input. Do nothing.\n",
+ getClassName ().c_str ());
+ output = *input_;
+ return;
+ }
+ uint8_t* pt_data = reinterpret_cast<uint8_t*> (&output.points[pt_index]);
for (int fi = 0; fi < static_cast<int> (fields.size ()); ++fi) // fi = field iterator
memcpy (pt_data + fields[fi].offset, &user_filter_value_, sizeof (float));
}
// check distance of pointcloud to model
std::vector<double> distances;
//TODO: get signed distances !
+ model_->setIndices(indices_); // added to reduce computation and arrange distances with indices
model_->getDistancesToModel (model_coefficients_, distances);
+
bool thresh_result;
// Filter for non-finite entries and the specified field limits
continue;
}
- // use threshold function to seperate outliers from inliers:
+ // use threshold function to separate outliers from inliers:
thresh_result = threshold_function_ (distances[iii]);
// in normal mode: define outliers as false thresh_result
#include <pcl/common/common.h>
#include <pcl/common/io.h>
#include <pcl/filters/morphological_filter.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_search.h>
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
}
// Normalize if norm valid and non-zero
- const float norm = sqrtf (nx * nx + ny * ny + nz * nz);
+ const float norm = std::sqrt (nx * nx + ny * ny + nz * nz);
if (pcl_isfinite (norm) && norm > std::numeric_limits<float>::epsilon ())
{
point.normal_x = nx / norm;
float line_y = polygon [currentInterval.second].x - polygon [currentInterval.first].x;
float line_d = polygon [currentInterval.first].x * polygon [currentInterval.second].y - polygon [currentInterval.first].y * polygon [currentInterval.second].x;
- float linelen = 1.0f / sqrtf (line_x * line_x + line_y * line_y);
+ float linelen = 1.0f / std::sqrt (line_x * line_x + line_y * line_y);
line_x *= linelen;
line_y *= linelen;
int Cores;
} SMtoCores;
- SMtoCores gpuArchCoresPerSM[] = { { 0x10, 8}, { 0x11, 8}, { 0x12, 8}, { 0x13, 8}, { 0x20, 32}, { 0x21, 48}, {0x30, 192}, {0x35, 192}, {0x50, 128}, {0x52, 128}, {-1, -1} };
-
+ SMtoCores gpuArchCoresPerSM[] = {
+ {0x10, 8}, {0x11, 8}, {0x12, 8}, {0x13, 8}, {0x20, 32}, {0x21, 48}, {0x30, 192},
+ {0x35, 192}, {0x50, 128}, {0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {-1, -1}
+ };
int index = 0;
while (gpuArchCoresPerSM[index].SM != -1)
{
int cols;
view_device_.download (view_host_, cols);
if (viz_)
- viewerScene_->showRGBImage (reinterpret_cast<unsigned char*> (&view_host_[0]), view_device_.cols (), view_device_.rows ());
+ viewerScene_->showRGBImage (reinterpret_cast<unsigned char*> (&view_host_[0]), view_device_.cols (), view_device_.rows (), "rgb_image");
//viewerColor_.showRGBImage ((unsigned char*)&rgb24.data, rgb24.cols, rgb24.rows);
showDepth (const PtrStepSz<const unsigned short>& depth)
{
if (viz_)
- viewerDepth_->showShortImage (depth.data, depth.cols, depth.rows, 0, 5000, true);
+ viewerDepth_->showShortImage (depth.data, depth.cols, depth.rows, 0, 5000, true, "short_image");
}
void
generated_depth_.download(data, c);
if (viz_)
- viewerDepth_->showShortImage (&data[0], generated_depth_.cols(), generated_depth_.rows(), 0, 5000, true);
+ viewerDepth_->showShortImage (&data[0], generated_depth_.cols(), generated_depth_.rows(), 0, 5000, true, "short_image");
}
void
#define PCL_WORLD_MODEL_H_
#include <pcl/common/impl/common.hpp>
-#include <pcl/octree/octree.h>
-#include <pcl/octree/octree_impl.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/filter_indices.h>
#include <pcl/filters/crop_box.h>
#include <pcl/gpu/kinfu_large_scale/world_model.h>
#include <pcl/gpu/kinfu_large_scale/impl/world_model.hpp>
+ #include <pcl/impl/instantiate.hpp>
{
//std::cout << "Giving colors1\n";
boost::mutex::scoped_try_lock lock(data_ready_mutex_);
- std::cout << lock << std::endl;
+ //std::cout << lock << std::endl; //causes compile errors
if (exit_ || !lock)
return;
//std::cout << "Giving colors2\n";
#pragma warning (disable: 4521)
#endif
#include <pcl/point_cloud.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_search.h>
#if defined _MSC_VER
#pragma warning (default: 4521)
#endif
cout << "count_pcl_better: " << count_pcl_better << endl;
cout << "avg_diff_pcl_better: " << diff_pcl_better << endl;
-}
\ No newline at end of file
+}
#endif
#include <pcl/point_cloud.h>
-#include <pcl/octree/octree.h>
#if defined _MSC_VER
#pragma warning (default: 4521)
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_search.h>
#if defined _MSC_VER
#pragma warning (default: 4521)
#pragma warning (disable: 4521)
#endif
#include <pcl/point_cloud.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_search.h>
#if defined _MSC_VER
#pragma warning (default: 4521)
#endif
pairs_gpu.pop_back();
}
}
-}
\ No newline at end of file
+}
#endif
#include <pcl/point_cloud.h>
-#include <pcl/octree/octree.h>
#if defined _MSC_VER
#pragma warning (default: 4521)
//==============================================================================
-#if __CUDA_ARCH__ < 200
+#if ((defined __CUDA_ARCH__) && (__CUDA_ARCH__ < 200))
// FP32 atomic add
static __forceinline__ __device__ float _atomicAdd(float *addr, float val)
)
PCL_ADD_LIBRARY(pcl_io_ply "${SUBSYS_NAME}" ${PLY_SOURCES} ${PLY_INCLUDES})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/ply" ${PLY_INCLUDES})
+ target_include_directories(pcl_io_ply PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
+
set(srcs
src/debayer.cpp
endif(PNG_FOUND)
set(impl_incs
+ "include/pcl/${SUBSYS_NAME}/impl/ascii_io.hpp"
"include/pcl/${SUBSYS_NAME}/impl/pcd_io.hpp"
"include/pcl/${SUBSYS_NAME}/impl/auto_io.hpp"
"include/pcl/${SUBSYS_NAME}/impl/lzf_image_io.hpp"
set(LIB_NAME "pcl_${SUBSYS_NAME}")
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" ${VTK_INCLUDE_DIRECTORIES})
add_definitions(${VTK_DEFINES})
PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${incs} ${compression_incs} ${impl_incs} ${OPENNI_INCLUDES} ${OPENNI2_INCLUDES})
+ target_include_directories(${LIB_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ${VTK_INCLUDE_DIRECTORIES})
link_directories(${VTK_LINK_DIRECTORIES})
target_link_libraries("${LIB_NAME}" pcl_common pcl_io_ply ${VTK_LIBRARIES} )
if(PNG_FOUND)
#ifndef OCTREE_COMPRESSION_HPP
#define OCTREE_COMPRESSION_HPP
-#include <pcl/octree/octree_pointcloud.h>
#include <pcl/compression/entropy_range_coder.h>
#include <iterator>
#include <iostream>
#include <stdio.h>
-using namespace pcl::octree;
-
namespace pcl
{
namespace io
// prepare for next frame
this->switchBuffers ();
- i_frame_ = false;
// reset object count
object_count_ = 0;
else
PCL_INFO ("Encoding Frame: Prediction frame\n");
PCL_INFO ("Number of encoded points: %ld\n", point_count_);
- PCL_INFO ("XYZ compression percentage: %f%%\n", bytes_per_XYZ / (3.0f * sizeof(float)) * 100.0f);
+ PCL_INFO ("XYZ compression percentage: %f%%\n", bytes_per_XYZ / (3.0f * sizeof (float)) * 100.0f);
PCL_INFO ("XYZ bytes per point: %f bytes\n", bytes_per_XYZ);
PCL_INFO ("Color compression percentage: %f%%\n", bytes_per_color / (sizeof (int)) * 100.0f);
PCL_INFO ("Color bytes per point: %f bytes\n", bytes_per_color);
- PCL_INFO ("Size of uncompressed point cloud: %f kBytes\n", static_cast<float> (point_count_) * (sizeof (int) + 3.0f * sizeof (float)) / 1024);
- PCL_INFO ("Size of compressed point cloud: %d kBytes\n", (compressed_point_data_len_ + compressed_color_data_len_) / (1024));
- PCL_INFO ("Total bytes per point: %f\n", bytes_per_XYZ + bytes_per_color);
- PCL_INFO ("Total compression percentage: %f\n", (bytes_per_XYZ + bytes_per_color) / (sizeof (int) + 3.0f * sizeof(float)) * 100.0f);
+ PCL_INFO ("Size of uncompressed point cloud: %f kBytes\n", static_cast<float> (point_count_) * (sizeof (int) + 3.0f * sizeof (float)) / 1024.0f);
+ PCL_INFO ("Size of compressed point cloud: %f kBytes\n", static_cast<float> (compressed_point_data_len_ + compressed_color_data_len_) / 1024.0f);
+ PCL_INFO ("Total bytes per point: %f bytes\n", bytes_per_XYZ + bytes_per_color);
+ PCL_INFO ("Total compression percentage: %f%%\n", (bytes_per_XYZ + bytes_per_color) / (sizeof (int) + 3.0f * sizeof (float)) * 100.0f);
PCL_INFO ("Compression ratio: %f\n\n", static_cast<float> (sizeof (int) + 3.0f * sizeof (float)) / static_cast<float> (bytes_per_XYZ + bytes_per_color));
}
+
+ i_frame_ = false;
} else {
if (b_show_statistics_)
PCL_INFO ("Info: Dropping empty point cloud\n");
PCL_INFO ("*** POINTCLOUD DECODING ***\n");
PCL_INFO ("Frame ID: %d\n", frame_ID_);
if (i_frame_)
- PCL_INFO ("Encoding Frame: Intra frame\n");
+ PCL_INFO ("Decoding Frame: Intra frame\n");
else
- PCL_INFO ("Encoding Frame: Prediction frame\n");
- PCL_INFO ("Number of encoded points: %ld\n", point_count_);
+ PCL_INFO ("Decoding Frame: Prediction frame\n");
+ PCL_INFO ("Number of decoded points: %ld\n", point_count_);
PCL_INFO ("XYZ compression percentage: %f%%\n", bytes_per_XYZ / (3.0f * sizeof (float)) * 100.0f);
PCL_INFO ("XYZ bytes per point: %f bytes\n", bytes_per_XYZ);
PCL_INFO ("Color compression percentage: %f%%\n", bytes_per_color / (sizeof (int)) * 100.0f);
PCL_INFO ("Color bytes per point: %f bytes\n", bytes_per_color);
PCL_INFO ("Size of uncompressed point cloud: %f kBytes\n", static_cast<float> (point_count_) * (sizeof (int) + 3.0f * sizeof (float)) / 1024.0f);
PCL_INFO ("Size of compressed point cloud: %f kBytes\n", static_cast<float> (compressed_point_data_len_ + compressed_color_data_len_) / 1024.0f);
- PCL_INFO ("Total bytes per point: %d bytes\n", static_cast<int> (bytes_per_XYZ + bytes_per_color));
+ PCL_INFO ("Total bytes per point: %f bytes\n", bytes_per_XYZ + bytes_per_color);
PCL_INFO ("Total compression percentage: %f%%\n", (bytes_per_XYZ + bytes_per_color) / (sizeof (int) + 3.0f * sizeof (float)) * 100.0f);
PCL_INFO ("Compression ratio: %f\n\n", static_cast<float> (sizeof (int) + 3.0f * sizeof (float)) / static_cast<float> (bytes_per_XYZ + bytes_per_color));
}
Eigen::Vector4f &origin, Eigen::Quaternionf &orientation, int &file_version,
const int offset = 0);
+ /** \brief Set the ascii file point fields.
+ */
+ template<typename PointT>
+ void setInputFields ();
+
/** \brief Set the ascii file point fields using a list of fields.
* \param[in] fields is a list of point fields, in order, in the input ascii file
*/
* \param[in] p a point type
*/
template<typename PointT>
- void setInputFields (const PointT p = PointT ());
+ PCL_DEPRECATED ("Use setInputFields<PointT> () instead")
+ inline void setInputFields (const PointT p)
+ {
+ (void) p;
+ setInputFields<PointT> ();
+ }
/** \brief Set the Separting characters for the ascii point fields 2.
};
}
-//////////////////////////////////////////////////////////////////////////////
-template<typename PointT> void
-pcl::ASCIIReader::setInputFields (const PointT p)
-{
- (void) p;
- pcl::getFields<PointT> (fields_);
- // Remove empty fields and adjust offset
- int offset =0;
- for (std::vector<pcl::PCLPointField>::iterator field_iter = fields_.begin ();
- field_iter != fields_.end (); field_iter++)
- {
- if (field_iter->name == "_")
- field_iter = fields_.erase (field_iter);
- field_iter->offset = offset;
- offset += typeSize (field_iter->datatype);
- }
-}
+
+#include <pcl/io/impl/ascii_io.hpp>
#endif // PCL_IO_ASCII_IO_H_
/** \brief Save point cloud to a binary file when available else to ASCII.
* \param[in] file_name the output file name
* \param[in] cloud the point cloud
- * \param[in] precision float precision when saving to ASCII files
* \ingroup io
*/
template<typename PointT> int
- save (const std::string& file_name, const pcl::PointCloud<PointT>& cloud, unsigned precision = 5);
+ save (const std::string& file_name, const pcl::PointCloud<PointT>& cloud);
/** \brief Saves a TextureMesh to a binary file when available else to ASCII.
* \param[in] file_name the name of the file to write to disk
const std::string &cloud_name = "cloud")
{
pcl::PCLPointCloud2 blob;
- pcl::fromPCLPointCloud2<PointT> (blob, cloud);
+ pcl::toPCLPointCloud2<PointT> (cloud, blob);
return (write (file_name, blob, cloud_name));
}
};
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_IO_ASCII_IO_HPP_
+#define PCL_IO_ASCII_IO_HPP_
+
+template<typename PointT> void
+pcl::ASCIIReader::setInputFields ()
+{
+ pcl::getFields<PointT> (fields_);
+
+ // Remove empty fields and adjust offset
+ int offset =0;
+ for (std::vector<pcl::PCLPointField>::iterator field_iter = fields_.begin ();
+ field_iter != fields_.end (); field_iter++)
+ {
+ if (field_iter->name == "_")
+ field_iter = fields_.erase (field_iter);
+ field_iter->offset = offset;
+ offset += typeSize (field_iter->datatype);
+ }
+}
+
+
+#endif //PCL_IO_ASCII_IO_HPP_
// Add the regular dimension
field_names << " " << fields[i].name;
field_sizes << " " << pcl::getFieldSize (fields[i].datatype);
- field_types << " " << pcl::getFieldType (fields[i].datatype);
+ if ("rgb" == fields[i].name)
+ field_types << " " << "U";
+ else
+ field_types << " " << pcl::getFieldType (fields[i].datatype);
int count = abs (static_cast<int> (fields[i].count));
if (count == 0) count = 1; // check for 0 counts (coming from older converter code)
field_counts << " " << count;
return (-1);
}
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
+ int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during open!");
return (-1);
}
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
+ int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during open!");
}
case pcl::PCLPointField::FLOAT32:
{
- float value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (float), sizeof (float));
- if (pcl_isnan (value))
- stream << "nan";
+ /*
+ * Despite the float type, store the rgb field as uint32
+ * because several fully opaque color values are mapped to
+ * nan.
+ */
+ if ("rgb" == fields[d].name)
+ {
+ uint32_t value;
+ memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ if (pcl_isnan (value))
+ stream << "nan";
+ else
+ stream << boost::numeric_cast<uint32_t>(value);
+ break;
+ }
else
- stream << boost::numeric_cast<float>(value);
+ {
+ float value;
+ memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ if (pcl_isnan (value))
+ stream << "nan";
+ else
+ stream << boost::numeric_cast<float>(value);
+ }
break;
}
case pcl::PCLPointField::FLOAT64:
return (-1);
}
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
+ int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during open!");
}
case pcl::PCLPointField::FLOAT32:
{
- float value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[indices[i]]) + fields[d].offset + c * sizeof (float), sizeof (float));
- if (pcl_isnan (value))
- stream << "nan";
+ /*
+ * Despite the float type, store the rgb field as uint32
+ * because several fully opaque color values are mapped to
+ * nan.
+ */
+ if ("rgb" == fields[d].name)
+ {
+ uint32_t value;
+ memcpy (&value, reinterpret_cast<const char*> (&cloud.points[indices[i]]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ if (pcl_isnan (value))
+ stream << "nan";
+ else
+ stream << boost::numeric_cast<uint32_t>(value);
+ }
else
- stream << boost::numeric_cast<float>(value);
+ {
+ float value;
+ memcpy (&value, reinterpret_cast<const char*> (&cloud.points[indices[i]]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ if (pcl_isnan (value))
+ stream << "nan";
+ else
+ stream << boost::numeric_cast<float>(value);
+ }
break;
}
case pcl::PCLPointField::FLOAT64:
#include <vtkStructuredGrid.h>
#include <vtkVertexGlyphFilter.h>
+// Support for VTK 7.1 upwards
+#ifdef vtkGenericDataArray_h
+#define SetTupleValue SetTypedTuple
+#define InsertNextTupleValue InsertNextTypedTuple
+#define GetTupleValue GetTypedTuple
+#endif
+
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::io::vtkPolyDataToPointCloud (vtkPolyData* const polydata, pcl::PointCloud<PointT>& cloud)
}
}
+#ifdef vtkGenericDataArray_h
+#undef SetTupleValue
+#undef InsertNextTupleValue
+#undef GetTupleValue
+#endif
+
#endif //#ifndef PCL_IO_VTK_IO_H_
/** Set desired framerate, depth and color resolution. */
Mode (unsigned int fps, unsigned int depth_width, unsigned int depth_height, unsigned int color_width, unsigned int color_height);
+
+ bool
+ operator== (const pcl::RealSenseGrabber::Mode& m) const;
};
enum TemporalFilteringType
}
-bool
-operator== (const pcl::RealSenseGrabber::Mode& m1, const pcl::RealSenseGrabber::Mode& m2);
-
#endif /* PCL_IO_REAL_SENSE_GRABBER_H */
*
* Author: Julius Kammerl (julius@kammerl.de)
*/
-
+#define PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#include <pcl/octree/octree.h>
-#include <pcl/octree/octree_impl.h>
-
#include <pcl/compression/entropy_range_coder.h>
#include <pcl/compression/impl/entropy_range_coder.hpp>
xyz.z = xyzrgb.z = xyzi.z;
xyzrgb.rgba = laser_rgb_mapping_[j + offset].rgba;
- if ( (boost::math::isnan) (xyz.x) || (boost::math::isnan) (xyz.y) || (boost::math::isnan) (xyz.z))
+ if (pcl_isnan (xyz.x) || pcl_isnan (xyz.y) || pcl_isnan (xyz.z))
{
continue;
}
}
else
{
- pcl::TexMaterial::RGB &rgb = materials_.back ().tex_Ka;
+ pcl::TexMaterial::RGB *rgb = &materials_.back ().tex_Ka;
if (st[0] == "Kd")
- rgb = materials_.back ().tex_Kd;
+ rgb = &materials_.back ().tex_Kd;
else if (st[0] == "Ks")
- rgb = materials_.back ().tex_Ks;
+ rgb = &materials_.back ().tex_Ks;
if (st[1] == "xyz")
{
- if (fillRGBfromXYZ (st, rgb))
+ if (fillRGBfromXYZ (st, *rgb))
{
PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to RGB values",
line.c_str ());
}
else
{
- if (fillRGBfromRGB (st, rgb))
+ if (fillRGBfromRGB (st, *rgb))
{
PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to RGB values",
line.c_str ());
continue;
}
- if (st[0] == "d")
+ if (st[0] == "d" || st[0] == "Tr")
{
- if (st.size () > 2)
+ bool reverse = (st[0] == "Tr");
+ try
{
- try
- {
- materials_.back ().tex_d = boost::lexical_cast<float> (st[2]);
- }
- catch (boost::bad_lexical_cast &)
- {
- PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to transparency value",
- line.c_str ());
- mtl_file.close ();
- materials_.clear ();
- return (-1);
- }
+ materials_.back ().tex_d = boost::lexical_cast<float> (st[st.size () > 2 ? 2:1]);
+ if (reverse)
+ materials_.back ().tex_d = 1.f - materials_.back ().tex_d;
}
- else
+ catch (boost::bad_lexical_cast &)
{
- try
- {
- materials_.back ().tex_d = boost::lexical_cast<float> (st[1]);
- }
- catch (boost::bad_lexical_cast &)
- {
- PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to transparency value",
- line.c_str ());
- mtl_file.close ();
- materials_.clear ();
- return (-1);
- }
+ PCL_ERROR ("[pcl::MTLReader::read] Could not convert %s to transparency value",
+ line.c_str ());
+ mtl_file.close ();
+ materials_.clear ();
+ return (-1);
}
continue;
}
nr_faces += static_cast<unsigned> (tex_mesh.tex_polygons[m].size ());
// Write the header information
- fs << "####" << std::endl;
- fs << "# OBJ dataFile simple version. File name: " << file_name << std::endl;
- fs << "# Vertices: " << nr_points << std::endl;
- fs << "# Faces: " <<nr_faces << std::endl;
- fs << "# Material information:" << std::endl;
- fs << "mtllib " << mtl_file_name_nopath << std::endl;
- fs << "####" << std::endl;
+ fs << "####" << '\n';
+ fs << "# OBJ dataFile simple version. File name: " << file_name << '\n';
+ fs << "# Vertices: " << nr_points << '\n';
+ fs << "# Faces: " <<nr_faces << '\n';
+ fs << "# Material information:" << '\n';
+ fs << "mtllib " << mtl_file_name_nopath << '\n';
+ fs << "####" << '\n';
// Write vertex coordinates
- fs << "# Vertices" << std::endl;
+ fs << "# Vertices" << '\n';
for (unsigned i = 0; i < nr_points; ++i)
{
int xyz = 0;
PCL_ERROR ("[pcl::io::saveOBJFile] Input point cloud has no XYZ data!\n");
return (-2);
}
- fs << std::endl;
+ fs << '\n';
}
- fs << "# "<< nr_points <<" vertices" << std::endl;
+ fs << "# "<< nr_points <<" vertices" << '\n';
// Write vertex normals
for (unsigned i = 0; i < nr_points; ++i)
PCL_ERROR ("[pcl::io::saveOBJFile] Input point cloud has no normals!\n");
return (-2);
}
- fs << std::endl;
+ fs << '\n';
}
// Write vertex texture with "vt" (adding latter)
for (unsigned m = 0; m < nr_meshes; ++m)
{
- fs << "# " << tex_mesh.tex_coordinates[m].size() << " vertex textures in submesh " << m << std::endl;
+ fs << "# " << tex_mesh.tex_coordinates[m].size() << " vertex textures in submesh " << m << '\n';
for (size_t i = 0; i < tex_mesh.tex_coordinates[m].size (); ++i)
{
fs << "vt ";
- fs << tex_mesh.tex_coordinates[m][i][0] << " " << tex_mesh.tex_coordinates[m][i][1] << std::endl;
+ fs << tex_mesh.tex_coordinates[m][i][0] << " " << tex_mesh.tex_coordinates[m][i][1] << '\n';
}
}
{
if (m > 0) f_idx += static_cast<unsigned> (tex_mesh.tex_polygons[m-1].size ());
- fs << "# The material will be used for mesh " << m << std::endl;
- fs << "usemtl " << tex_mesh.tex_materials[m].tex_name << std::endl;
- fs << "# Faces" << std::endl;
+ fs << "# The material will be used for mesh " << m << '\n';
+ fs << "usemtl " << tex_mesh.tex_materials[m].tex_name << '\n';
+ fs << "# Faces" << '\n';
for (size_t i = 0; i < tex_mesh.tex_polygons[m].size(); ++i)
{
<< "/" << tex_mesh.tex_polygons[m][i].vertices.size () * (i+f_idx) +j+1
<< "/" << idx; // vertex index in obj file format starting with 1
}
- fs << std::endl;
+ fs << '\n';
}
- fs << "# "<< tex_mesh.tex_polygons[m].size() << " faces in mesh " << m << std::endl;
+ fs << "# "<< tex_mesh.tex_polygons[m].size() << " faces in mesh " << m << '\n';
}
- fs << "# End of File";
+ fs << "# End of File" << std::flush;
// Close obj file
fs.close ();
m_fs.open (mtl_file_name.c_str ());
// default
- m_fs << "#" << std::endl;
- m_fs << "# Wavefront material file" << std::endl;
- m_fs << "#" << std::endl;
+ m_fs << "#" << '\n';
+ m_fs << "# Wavefront material file" << '\n';
+ m_fs << "#" << '\n';
for(unsigned m = 0; m < nr_meshes; ++m)
{
- m_fs << "newmtl " << tex_mesh.tex_materials[m].tex_name << std::endl;
- m_fs << "Ka "<< tex_mesh.tex_materials[m].tex_Ka.r << " " << tex_mesh.tex_materials[m].tex_Ka.g << " " << tex_mesh.tex_materials[m].tex_Ka.b << std::endl; // defines the ambient color of the material to be (r,g,b).
- m_fs << "Kd "<< tex_mesh.tex_materials[m].tex_Kd.r << " " << tex_mesh.tex_materials[m].tex_Kd.g << " " << tex_mesh.tex_materials[m].tex_Kd.b << std::endl; // defines the diffuse color of the material to be (r,g,b).
- m_fs << "Ks "<< tex_mesh.tex_materials[m].tex_Ks.r << " " << tex_mesh.tex_materials[m].tex_Ks.g << " " << tex_mesh.tex_materials[m].tex_Ks.b << std::endl; // defines the specular color of the material to be (r,g,b). This color shows up in highlights.
- m_fs << "d " << tex_mesh.tex_materials[m].tex_d << std::endl; // defines the transparency of the material to be alpha.
- m_fs << "Ns "<< tex_mesh.tex_materials[m].tex_Ns << std::endl; // defines the shininess of the material to be s.
- m_fs << "illum "<< tex_mesh.tex_materials[m].tex_illum << std::endl; // denotes the illumination model used by the material.
+ m_fs << "newmtl " << tex_mesh.tex_materials[m].tex_name << '\n';
+ m_fs << "Ka "<< tex_mesh.tex_materials[m].tex_Ka.r << " " << tex_mesh.tex_materials[m].tex_Ka.g << " " << tex_mesh.tex_materials[m].tex_Ka.b << '\n'; // defines the ambient color of the material to be (r,g,b).
+ m_fs << "Kd "<< tex_mesh.tex_materials[m].tex_Kd.r << " " << tex_mesh.tex_materials[m].tex_Kd.g << " " << tex_mesh.tex_materials[m].tex_Kd.b << '\n'; // defines the diffuse color of the material to be (r,g,b).
+ m_fs << "Ks "<< tex_mesh.tex_materials[m].tex_Ks.r << " " << tex_mesh.tex_materials[m].tex_Ks.g << " " << tex_mesh.tex_materials[m].tex_Ks.b << '\n'; // defines the specular color of the material to be (r,g,b). This color shows up in highlights.
+ m_fs << "d " << tex_mesh.tex_materials[m].tex_d << '\n'; // defines the transparency of the material to be alpha.
+ m_fs << "Ns "<< tex_mesh.tex_materials[m].tex_Ns << '\n'; // defines the shininess of the material to be s.
+ m_fs << "illum "<< tex_mesh.tex_materials[m].tex_illum << '\n'; // denotes the illumination model used by the material.
// illum = 1 indicates a flat material with no specular highlights, so the value of Ks is not used.
// illum = 2 denotes the presence of specular highlights, and so a specification for Ks is required.
- m_fs << "map_Kd " << tex_mesh.tex_materials[m].tex_file << std::endl;
- m_fs << "###" << std::endl;
+ m_fs << "map_Kd " << tex_mesh.tex_materials[m].tex_file << '\n';
+ m_fs << "###" << '\n';
}
m_fs.close ();
return (0);
int normal_index = getFieldIndex (mesh.cloud, "normal_x");
// Write the header information
- fs << "####" << std::endl;
- fs << "# OBJ dataFile simple version. File name: " << file_name << std::endl;
- fs << "# Vertices: " << nr_points << std::endl;
+ fs << "####" << '\n';
+ fs << "# OBJ dataFile simple version. File name: " << file_name << '\n';
+ fs << "# Vertices: " << nr_points << '\n';
if (normal_index != -1)
- fs << "# Vertices normals : " << nr_points << std::endl;
- fs << "# Faces: " <<nr_faces << std::endl;
- fs << "####" << std::endl;
+ fs << "# Vertices normals : " << nr_points << '\n';
+ fs << "# Faces: " <<nr_faces << '\n';
+ fs << "####" << '\n';
// Write vertex coordinates
- fs << "# List of Vertices, with (x,y,z) coordinates, w is optional." << std::endl;
+ fs << "# List of Vertices, with (x,y,z) coordinates, w is optional." << '\n';
for (int i = 0; i < nr_points; ++i)
{
int xyz = 0;
PCL_ERROR ("[pcl::io::saveOBJFile] Input point cloud has no XYZ data!\n");
return (-2);
}
- fs << std::endl;
+ fs << '\n';
}
- fs << "# "<< nr_points <<" vertices" << std::endl;
+ fs << "# "<< nr_points <<" vertices" << '\n';
if(normal_index != -1)
{
- fs << "# Normals in (x,y,z) form; normals might not be unit." << std::endl;
+ fs << "# Normals in (x,y,z) form; normals might not be unit." << '\n';
// Write vertex normals
for (int i = 0; i < nr_points; ++i)
{
PCL_ERROR ("[pcl::io::saveOBJFile] Input point cloud has no normals!\n");
return (-2);
}
- fs << std::endl;
+ fs << '\n';
}
- fs << "# "<< nr_points <<" vertices normals" << std::endl;
+ fs << "# "<< nr_points <<" vertices normals" << '\n';
}
- fs << "# Face Definitions" << std::endl;
+ fs << "# Face Definitions" << '\n';
// Write down faces
if(normal_index == -1)
{
size_t j = 0;
for (; j < mesh.polygons[i].vertices.size () - 1; ++j)
fs << mesh.polygons[i].vertices[j] + 1 << " ";
- fs << mesh.polygons[i].vertices[j] + 1 << std::endl;
+ fs << mesh.polygons[i].vertices[j] + 1 << '\n';
}
}
else
size_t j = 0;
for (; j < mesh.polygons[i].vertices.size () - 1; ++j)
fs << mesh.polygons[i].vertices[j] + 1 << "//" << mesh.polygons[i].vertices[j] + 1 << " ";
- fs << mesh.polygons[i].vertices[j] + 1 << "//" << mesh.polygons[i].vertices[j] + 1 << std::endl;
+ fs << mesh.polygons[i].vertices[j] + 1 << "//" << mesh.polygons[i].vertices[j] + 1 << '\n';
}
}
fs << "# End of File" << std::endl;
#include <pcl/exceptions.h>
#include <iostream>
-namespace pcl
+namespace
{
typedef union
{
float float_value;
long long_value;
} RGBValue;
+}
+
+namespace pcl
+{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ONIGrabber::ONIGrabber (const std::string& file_name, bool repeat, bool stream)
using namespace pcl::io::openni2;
-namespace pcl
+namespace
{
// Treat color as chars, float32, or uint32
typedef union
{
// Ignore invalid padded dimensions that are inherited from binary data
if (cloud.fields[d].name != "_")
- stream << pcl::getFieldType (cloud.fields[d].datatype) << " ";
+ {
+ if (cloud.fields[d].name == "rgb")
+ stream << "U ";
+ else
+ stream << pcl::getFieldType (cloud.fields[d].datatype) << " ";
+ }
}
// Ignore invalid padded dimensions that are inherited from binary data
if (cloud.fields[cloud.fields.size () - 1].name != "_")
- stream << pcl::getFieldType (cloud.fields[cloud.fields.size () - 1].datatype);
+ {
+ if (cloud.fields[cloud.fields.size () - 1].name == "rgb")
+ stream << "U";
+ else
+ stream << pcl::getFieldType (cloud.fields[cloud.fields.size () - 1].datatype);
+ }
// Remove trailing spaces
result = stream.str ();
}
case pcl::PCLPointField::FLOAT32:
{
- copyValueString<pcl::traits::asType<pcl::PCLPointField::FLOAT32>::type>(cloud, i, point_size, d, c, stream);
+ /*
+ * Despite the float type, store the rgb field as uint32
+ * because several fully opaque color values are mapped to
+ * nan.
+ */
+ if ("rgb" == cloud.fields[d].name)
+ copyValueString<pcl::traits::asType<pcl::PCLPointField::UINT32>::type>(cloud, i, point_size, d, c, stream);
+ else
+ copyValueString<pcl::traits::asType<pcl::PCLPointField::FLOAT32>::type>(cloud, i, point_size, d, c, stream);
break;
}
case pcl::PCLPointField::FLOAT64:
setLockingPermissions (file_name, file_lock);
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
+ int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
PCL_ERROR ("[pcl::PCDWriter::writeBinary] Error during open (%s)!\n", file_name.c_str());
return (-1);
}
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
+ int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during open (%s)!\n", file_name.c_str());
boost::tuple<boost::function<void (pcl::io::ply::uint8)>, boost::function<void (pcl::io::ply::int32)>, boost::function<void ()> >
pcl::PLYReader::listPropertyDefinitionCallback (const std::string& element_name, const std::string& property_name)
{
- if ((element_name == "range_grid") && (property_name == "vertex_indices"))
+ if ((element_name == "range_grid") && (property_name == "vertex_indices" || property_name == "vertex_index"))
{
return boost::tuple<boost::function<void (pcl::io::ply::uint8)>, boost::function<void (pcl::io::ply::int32)>, boost::function<void ()> > (
boost::bind (&pcl::PLYReader::rangeGridVertexIndicesBeginCallback, this, _1),
boost::bind (&pcl::PLYReader::rangeGridVertexIndicesEndCallback, this)
);
}
- else if ((element_name == "face") && (property_name == "vertex_indices") && polygons_)
+ else if ((element_name == "face") && (property_name == "vertex_indices" || property_name == "vertex_index") && polygons_)
{
return boost::tuple<boost::function<void (pcl::io::ply::uint8)>, boost::function<void (pcl::io::ply::int32)>, boost::function<void ()> > (
boost::bind (&pcl::PLYReader::faceVertexIndicesBeginCallback, this, _1),
boost::split (st, line, boost::is_any_of (std::string ( "\t ")), boost::token_compress_on);
assert (st[0].substr (0, 8) == "obj_info");
{
- assert (st.size () == 3);
+ if (st.size() >= 3)
{
if (st[1] == "num_cols")
cloudWidthCallback (atoi (st[2].c_str ()));
fs << " ";
}
}
- fs << std::endl;
+ fs << '\n';
}
// Append sensor information
if (origin[3] != 0)
if (is_valid_line)
{
grids[i].push_back (valid_points);
- fs << line.str () << std::endl;
+ fs << line.str () << '\n';
++valid_points;
}
}
it != grids [i].end ();
++it)
fs << " " << *it;
- fs << std::endl;
+ fs << '\n';
}
}
PCL_ERROR ("[pcl::io::savePLYFile] Input point cloud has no XYZ data!\n");
return (-2);
}
- fs << std::endl;
+ fs << '\n';
}
// Write down faces
size_t j = 0;
for (j = 0; j < mesh.polygons[i].vertices.size () - 1; ++j)
fs << mesh.polygons[i].vertices[j] << " ";
- fs << mesh.polygons[i].vertices[j] << std::endl;
+ fs << mesh.polygons[i].vertices[j] << '\n';
}
// Close file
}
bool
-operator== (const pcl::RealSenseGrabber::Mode& m1, const pcl::RealSenseGrabber::Mode& m2)
+pcl::RealSenseGrabber::Mode::operator== (const pcl::RealSenseGrabber::Mode& m) const
{
- return (m1.fps == m2.fps &&
- m1.depth_width == m2.depth_width &&
- m1.depth_height == m2.depth_height &&
- m1.color_width == m2.color_width &&
- m1.color_height == m2.color_height);
+ return (this->fps == m.fps &&
+ this->depth_width == m.depth_width &&
+ this->depth_height == m.depth_height &&
+ this->color_width == m.color_width &&
+ this->color_height == m.color_height);
}
pcl::RealSenseGrabber::RealSenseGrabber (const std::string& device_id, const Mode& mode, bool strict)
for (int j = 0; j < HDL_LASER_PER_FIRING; j++)
{
double current_azimuth = firing_data.rotationalPosition;
- if (j > VLP_MAX_NUM_LASERS)
+ if (j >= VLP_MAX_NUM_LASERS)
{
current_azimuth += interpolated_azimuth_delta;
}
}
- if (! (boost::math::isnan (xyz.x) || boost::math::isnan (xyz.y) || boost::math::isnan (xyz.z)))
+ if (! (pcl_isnan (xyz.x) || pcl_isnan (xyz.y) || pcl_isnan (xyz.z)))
{
current_sweep_xyz_->push_back (xyz);
current_sweep_xyzi_->push_back (xyzi);
if (dataPacket->mode == VLP_DUAL_MODE)
{
if ( (dual_xyz.x != xyz.x || dual_xyz.y != xyz.y || dual_xyz.z != xyz.z)
- && ! (boost::math::isnan (dual_xyz.x) || boost::math::isnan (dual_xyz.y) || boost::math::isnan (dual_xyz.z)))
+ && ! (pcl_isnan (dual_xyz.x) || pcl_isnan (dual_xyz.y) || pcl_isnan (dual_xyz.z)))
{
current_sweep_xyz_->push_back (dual_xyz);
current_sweep_xyzi_->push_back (dual_xyzi);
unsigned int point_size = static_cast<unsigned int> (triangles.cloud.data.size () / nr_points);
// Write the header information
- fs << "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " << nr_points << " float" << std::endl;
+ fs << "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " << nr_points << " float" << '\n';
// Iterate through the points
for (unsigned int i = 0; i < nr_points; ++i)
PCL_ERROR ("[pcl::io::saveVTKFile] Input point cloud has no XYZ data!\n");
return (-2);
}
- fs << std::endl;
+ fs << '\n';
}
// Write vertices
- fs << "\nVERTICES " << nr_points << " " << 2*nr_points << std::endl;
+ fs << "\nVERTICES " << nr_points << " " << 2*nr_points << '\n';
for (unsigned int i = 0; i < nr_points; ++i)
- fs << "1 " << i << std::endl;
+ fs << "1 " << i << '\n';
// Write polygons
// compute the correct number of values:
size_t correct_number = triangle_size;
for (size_t i = 0; i < triangle_size; ++i)
correct_number += triangles.polygons[i].vertices.size ();
- fs << "\nPOLYGONS " << triangle_size << " " << correct_number << std::endl;
+ fs << "\nPOLYGONS " << triangle_size << " " << correct_number << '\n';
for (size_t i = 0; i < triangle_size; ++i)
{
fs << triangles.polygons[i].vertices.size () << " ";
size_t j = 0;
for (j = 0; j < triangles.polygons[i].vertices.size () - 1; ++j)
fs << triangles.polygons[i].vertices[j] << " ";
- fs << triangles.polygons[i].vertices[j] << std::endl;
+ fs << triangles.polygons[i].vertices[j] << '\n';
}
// Write RGB values
int b = color.b;
fs << static_cast<float> (r) / 255.0f << " " << static_cast<float> (g) / 255.0f << " " << static_cast<float> (b) / 255.0f;
}
- fs << std::endl;
+ fs << '\n';
}
}
unsigned int point_size = static_cast<unsigned int> (cloud.data.size () / nr_points);
// Write the header information
- fs << "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " << nr_points << " float" << std::endl;
+ fs << "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " << nr_points << " float" << '\n';
// Iterate through the points
for (unsigned int i = 0; i < nr_points; ++i)
PCL_ERROR ("[pcl::io::saveVTKFile] Input point cloud has no XYZ data!\n");
return (-2);
}
- fs << std::endl;
+ fs << '\n';
}
// Write vertices
- fs << "\nVERTICES " << nr_points << " " << 2*nr_points << std::endl;
+ fs << "\nVERTICES " << nr_points << " " << 2*nr_points << '\n';
for (unsigned int i = 0; i < nr_points; ++i)
- fs << "1 " << i << std::endl;
+ fs << "1 " << i << '\n';
// Write RGB values
int field_index = getFieldIndex (cloud, "rgb");
int b = color.b;
fs << static_cast<float> (r) / 255.0f << " " << static_cast<float> (g) / 255.0f << " " << static_cast<float> (b) / 255.0f;
}
- fs << std::endl;
+ fs << '\n';
}
}
#include <vtkImageShiftScale.h>
#include <vtkPNGWriter.h>
+// Support for VTK 7.1 upwards
+#ifdef vtkGenericDataArray_h
+#define SetTupleValue SetTypedTuple
+#define InsertNextTupleValue InsertNextTypedTuple
+#define GetTupleValue GetTypedTuple
+#endif
+
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int
pcl::io::loadPolygonFile (const std::string &file_name, pcl::PolygonMesh& mesh)
if (cloud_output)
mesh.polygons.clear();
- if (saveMesh (mesh, argv[file_args[1]], output_type))
+ if (!saveMesh (mesh, argv[file_args[1]], output_type))
return (-1);
}
else if (boost::filesystem::path (argv[file_args[0]]).extension () == ".stl")
delete[] prg_mem;
delete[] prg_local_mem;
delete[] feat_max;
+ delete[] omp_mem;
}
#define PCL_INSTANTIATE_ISSKeypoint3D(T,U,N) template class PCL_EXPORTS pcl::ISSKeypoint3D<T,U,N>;
search_radius_ = salient_radius_;
}
+ /** \brief Destructor. */
+ ~ISSKeypoint3D ()
+ {
+ delete[] third_eigen_value_;
+ delete[] edge_points_;
+ }
+
/** \brief Set the radius of the spherical neighborhood used to compute the scatter matrix.
* \param[in] salient_radius the radius of the spherical neighborhood
*/
setNonMaxRadius (double non_max_radius);
/** \brief Set the radius used for the estimation of the surface normals of the input cloud. If the radius is
- * too large, the temporal performances of the detector may degrade significantly.
+ * too large, the temporal performances of the detector may degrade significantly.
* \param[in] normal_radius the radius used to estimate surface normals
*/
void
setNormalRadius (double normal_radius);
/** \brief Set the radius used for the estimation of the boundary points. If the radius is too large,
- * the temporal performances of the detector may degrade significantly.
+ * the temporal performances of the detector may degrade significantly.
* \param[in] border_radius the radius used to compute the boundary points
*/
void
setNormals (const PointCloudNConstPtr &normals);
/** \brief Set the decision boundary (angle threshold) that marks points as boundary or regular.
- * (default \f$\pi / 2.0\f$)
+ * (default \f$\pi / 2.0\f$)
* \param[in] angle the angle threshold
*/
inline void
setAngleThreshold (float angle)
{
- angle_threshold_ = angle;
+ angle_threshold_ = angle;
}
/** \brief Initialize the scheduler and set the number of threads to use.
continue;
const Eigen::Vector3f& surface_change_direction = surface_change_directions[index2];
- float distance = sqrtf (distance_squared);
+ float distance = std::sqrt (distance_squared);
float distance_factor = radius_reciprocal*distance;
float positive_score, current_negative_score;
nkdGetScores (distance_factor, surface_change_score, pixelDistance,
normalized_angle_diff, angle_change_value);
}
}
- angle_change_value = sqrtf (angle_change_value);
+ angle_change_value = std::sqrt (angle_change_value);
interest_value = negative_score * angle_change_value;
if (parameters_.add_points_on_straight_edges)
normalized_angle_diff, angle_change_value);
}
}
- angle_change_value = sqrtf (angle_change_value);
+ angle_change_value = std::sqrt (angle_change_value);
float maximum_interest_value = angle_change_value;
if (parameters_.add_points_on_straight_edges)
normalized_angle_diff);
}
}
- angle_change_value = sqrtf (angle_change_value);
+ angle_change_value = std::sqrt (angle_change_value);
interest_value = negative_score * angle_change_value;
if (parameters_.add_points_on_straight_edges)
{
}
// Expected standard deviation of our filter (p.6 in [Adams etal 2010])
- float inv_std_dev = sqrtf (2.0f / 3.0f) * static_cast<float> (d_ + 1);
+ float inv_std_dev = std::sqrt (2.0f / 3.0f) * static_cast<float> (d_ + 1);
// Compute the diagonal part of E (p.5 in [Adams etal 2010])
for (int i = 0; i < d_; i++)
- scale_factor (i) = 1.0f / sqrtf (static_cast<float> (i + 2) * static_cast<float> (i + 1)) * inv_std_dev;
+ scale_factor (i) = 1.0f / std::sqrt (static_cast<float> (i + 2) * static_cast<float> (i + 1)) * inv_std_dev;
// Compute the simplex each feature lies in
for (int k = 0; k < N_; k++)
}
// Expected standard deviation of our filter (p.6 in [Adams etal 2010])
- float inv_std_dev = sqrtf (2.0f / 3.0f)* static_cast<float>(d_+1);
+ float inv_std_dev = std::sqrt (2.0f / 3.0f)* static_cast<float>(d_+1);
// Compute the diagonal part of E (p.5 in [Adams etal 2010])
for (int i=0; i<d_; i++)
- scale_factor[i] = 1.0f / sqrtf (static_cast<float>(i+2)*static_cast<float>(i+1)) * inv_std_dev;
+ scale_factor[i] = 1.0f / std::sqrt (static_cast<float>(i+2)*static_cast<float>(i+1)) * inv_std_dev;
// Compute the simplex each feature lies in
for (int k=0; k<N_; k++)
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${incs} ${impl_incs})
target_link_libraries("${LIB_NAME}")
+ target_include_directories(${LIB_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_MAKE_PKGCONFIG("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}"
"${SUBSYS_DEPS}" "" "" "" "")
#include <vector>
#include <pcl/impl/instantiate.hpp>
-#include <pcl/point_types.h>
-#include <pcl/octree/octree.h>
namespace pcl
{
#ifndef PCL_OCTREE_ITERATOR_HPP_
#define PCL_OCTREE_ITERATOR_HPP_
-#include <vector>
-#include <assert.h>
-
-#include <pcl/common/common.h>
-
namespace pcl
{
namespace octree
#ifndef PCL_OCTREE_POINTCLOUD_HPP_
#define PCL_OCTREE_POINTCLOUD_HPP_
-#include <vector>
#include <assert.h>
#include <pcl/common/common.h>
-
+#include <pcl/octree/impl/octree_base.hpp>
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename LeafContainerT, typename BranchContainerT, typename OctreeT>
const float minValue = std::numeric_limits<float>::epsilon();
// find maximum key values for x, y, z
- max_key_x = static_cast<unsigned int> ((max_x_ - min_x_) / resolution_);
- max_key_y = static_cast<unsigned int> ((max_y_ - min_y_) / resolution_);
- max_key_z = static_cast<unsigned int> ((max_z_ - min_z_) / resolution_);
+ max_key_x = static_cast<unsigned int> (ceil ((max_x_ - min_x_ - minValue) / resolution_));
+ max_key_y = static_cast<unsigned int> (ceil ((max_y_ - min_y_ - minValue) / resolution_));
+ max_key_z = static_cast<unsigned int> (ceil ((max_z_ - min_z_ - minValue) / resolution_));
// find maximum amount of keys
max_voxels = std::max (std::max (std::max (max_key_x, max_key_y), max_key_z), static_cast<unsigned int> (2));
this->octree_depth_ = std::max ((std::min (static_cast<unsigned int> (OctreeKey::maxDepth), static_cast<unsigned int> (ceil (this->Log2 (max_voxels)-minValue)))),
static_cast<unsigned int> (0));
- octree_side_len = static_cast<double> (1 << this->octree_depth_) * resolution_-minValue;
+ octree_side_len = static_cast<double> (1 << this->octree_depth_) * resolution_;
if (this->leaf_count_ == 0)
{
octree_oversize_y = (octree_side_len - (max_y_ - min_y_)) / 2.0;
octree_oversize_z = (octree_side_len - (max_z_ - min_z_)) / 2.0;
- min_x_ -= octree_oversize_x;
- min_y_ -= octree_oversize_y;
- min_z_ -= octree_oversize_z;
+ assert (octree_oversize_x > -minValue);
+ assert (octree_oversize_y > -minValue);
+ assert (octree_oversize_z > -minValue);
- max_x_ += octree_oversize_x;
- max_y_ += octree_oversize_y;
- max_z_ += octree_oversize_z;
+ if (octree_oversize_x > minValue)
+ {
+ min_x_ -= octree_oversize_x;
+ max_x_ += octree_oversize_x;
+ }
+ if (octree_oversize_y > minValue)
+ {
+ min_y_ -= octree_oversize_y;
+ max_y_ += octree_oversize_y;
+ }
+ if (octree_oversize_z > minValue)
+ {
+ min_z_ -= octree_oversize_z;
+ max_z_ += octree_oversize_z;
+ }
}
else
{
key_arg.x = static_cast<unsigned int> ((point_arg.x - this->min_x_) / this->resolution_);
key_arg.y = static_cast<unsigned int> ((point_arg.y - this->min_y_) / this->resolution_);
key_arg.z = static_cast<unsigned int> ((point_arg.z - this->min_z_) / this->resolution_);
+
+ assert (key_arg.x <= this->max_key_.x);
+ assert (key_arg.y <= this->max_key_.y);
+ assert (key_arg.z <= this->max_key_.z);
}
//////////////////////////////////////////////////////////////////////////////////////////////
#ifndef PCL_OCTREE_POINTCLOUD_ADJACENCY_HPP_
#define PCL_OCTREE_POINTCLOUD_ADJACENCY_HPP_
-#include <pcl/octree/octree_pointcloud_adjacency.h>
+#include <pcl/console/print.h>
+#include <pcl/common/geometry.h>
+/*
+ * OctreePointCloudAdjacency is not precompiled, since it's used in other
+ * parts of PCL with custom LeafContainers. So if PCL_NO_PRECOMPILE is NOT
+ * used, octree_pointcloud_adjacency.h includes this file but octree_pointcloud.h
+ * would not include the implementation because it's precompiled. So we need to
+ * include it here "manually".
+ */
+#include <pcl/octree/impl/octree_pointcloud.hpp>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename LeafContainerT, typename BranchContainerT>
#ifndef PCL_OCTREE_VOXELCENTROID_HPP
#define PCL_OCTREE_VOXELCENTROID_HPP
-#include <pcl/octree/octree_pointcloud_voxelcentroid.h>
+/*
+ * OctreePointCloudVoxelcontroid is not precompiled, since it's used in other
+ * parts of PCL with custom LeafContainers. So if PCL_NO_PRECOMPILE is NOT
+ * used, octree_pointcloud_voxelcentroid.h includes this file but octree_pointcloud.h
+ * would not include the implementation because it's precompiled. So we need to
+ * include it here "manually".
+ */
+#include <pcl/octree/impl/octree_pointcloud.hpp>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename LeafContainerT, typename BranchContainerT> bool
const PointT& point_arg, PointT& voxel_centroid_arg) const
{
OctreeKey key;
- LeafNode* leaf = 0;
+ LeafContainerT* leaf = NULL;
// generate key
genOctreeKeyforPoint (point_arg, key);
leaf = this->findLeaf (key);
-
if (leaf)
- {
- LeafContainerT* container = leaf;
- container->getCentroid (voxel_centroid_arg);
- }
+ leaf->getCentroid (voxel_centroid_arg);
- return (leaf != 0);
+ return (leaf != NULL);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef PCL_OCTREE_SEARCH_IMPL_H_
#define PCL_OCTREE_SEARCH_IMPL_H_
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
-
-#include <pcl/common/common.h>
#include <assert.h>
-
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename LeafContainerT, typename BranchContainerT> bool
pcl::octree::OctreePointCloudSearch<PointT, LeafContainerT, BranchContainerT>::voxelSearch (const PointT& point,
return (voxel_count);
}
+#define PCL_INSTANTIATE_OctreePointCloudSearch(T) template class PCL_EXPORTS pcl::octree::OctreePointCloudSearch<T>;
+
#endif // PCL_OCTREE_SEARCH_IMPL_H_
#include <pcl/octree/octree_pointcloud_pointvector.h>
#include <pcl/octree/octree_pointcloud_changedetector.h>
#include <pcl/octree/octree_pointcloud_voxelcentroid.h>
+#include <pcl/octree/octree_pointcloud_adjacency.h>
#include <pcl/octree/octree_search.h>
#include <vector>
-#include "octree_nodes.h"
-#include "octree_container.h"
-#include "octree_key.h"
-#include "octree_iterator.h"
+#include <pcl/octree/octree_nodes.h>
+#include <pcl/octree/octree_container.h>
+#include <pcl/octree/octree_key.h>
+#include <pcl/octree/octree_iterator.h>
-#include <stdio.h>
-#include <string.h>
namespace pcl
{
}
}
-//#include "impl/octree2buf_base.hpp"
+#ifdef PCL_NO_PRECOMPILE
+#include <pcl/octree/impl/octree2buf_base.hpp>
+#endif
#endif
#ifndef PCL_OCTREE_TREE_BASE_H
#define PCL_OCTREE_TREE_BASE_H
-#include <cstddef>
#include <vector>
-#include "octree_nodes.h"
-#include "octree_container.h"
-#include "octree_key.h"
-#include "octree_iterator.h"
+#include <pcl/octree/octree_nodes.h>
+#include <pcl/octree/octree_container.h>
+#include <pcl/octree/octree_key.h>
+#include <pcl/octree/octree_iterator.h>
namespace pcl
{
}
}
-//#include "impl/octree_base.hpp"
+#ifdef PCL_NO_PRECOMPILE
+#include <pcl/octree/impl/octree_base.hpp>
+#endif
#endif
#ifndef PCL_OCTREE_CONTAINER_H
#define PCL_OCTREE_CONTAINER_H
-#include <string.h>
#include <vector>
#include <cstddef>
#include <vector>
#include <deque>
-#include "octree_nodes.h"
-#include "octree_key.h"
-
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
+#include <pcl/octree/octree_nodes.h>
+#include <pcl/octree/octree_key.h>
#include <iterator>
}
}
+/*
+ * Note: Since octree iterators depend on octrees, don't precompile them.
+ */
+#include <pcl/octree/impl/octree_iterator.hpp>
+
#endif
#ifndef PCL_OCTREE_KEY_H
#define PCL_OCTREE_KEY_H
-#include <string>
-
namespace pcl
{
namespace octree
#include <cstddef>
#include <assert.h>
-#include <cstring>
-
-#include <string.h>
#include <Eigen/Core>
#ifndef PCL_OCTREE_POINTCLOUD_H
#define PCL_OCTREE_POINTCLOUD_H
-#include "octree_base.h"
-//#include "octree2buf_base.h"
+#include <pcl/octree/octree_base.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <queue>
#include <vector>
-#include <algorithm>
-#include <iostream>
namespace pcl
{
#ifndef PCL_OCTREE_POINTCLOUD_ADJACENCY_H_
#define PCL_OCTREE_POINTCLOUD_ADJACENCY_H_
-#include <pcl/console/print.h>
-#include <pcl/common/geometry.h>
#include <pcl/octree/boost.h>
-#include <pcl/octree/octree_impl.h>
+#include <pcl/octree/octree_pointcloud.h>
#include <pcl/octree/octree_pointcloud_adjacency_container.h>
#include <set>
}
-//#ifdef PCL_NO_PRECOMPILE
+// Note: Do not precompile this octree type because it is typically used with custom leaf containers.
#include <pcl/octree/impl/octree_pointcloud_adjacency.hpp>
-//#endif
#endif // PCL_OCTREE_POINTCLOUD_ADJACENCY_H_
#ifndef PCL_OCTREE_CHANGEDETECTOR_H
#define PCL_OCTREE_CHANGEDETECTOR_H
-#include "octree_pointcloud.h"
+#include <pcl/octree/octree_pointcloud.h>
+#include <pcl/octree/octree2buf_base.h>
namespace pcl
{
#ifndef PCL_OCTREE_DENSITY_H
#define PCL_OCTREE_DENSITY_H
-#include "octree_pointcloud.h"
+#include <pcl/octree/octree_pointcloud.h>
namespace pcl
{
#ifndef PCL_OCTREE_OCCUPANCY_H
#define PCL_OCTREE_OCCUPANCY_H
-#include "octree_pointcloud.h"
+#include <pcl/octree/octree_pointcloud.h>
namespace pcl
{
#ifndef PCL_OCTREE_POINT_VECTOR_H
#define PCL_OCTREE_POINT_VECTOR_H
-#include "octree_pointcloud.h"
+#include <pcl/octree/octree_pointcloud.h>
namespace pcl
{
#ifndef PCL_OCTREE_SINGLE_POINT_H
#define PCL_OCTREE_SINGLE_POINT_H
-#include "octree_pointcloud.h"
+#include <pcl/octree/octree_pointcloud.h>
namespace pcl
{
#ifndef PCL_OCTREE_VOXELCENTROID_H
#define PCL_OCTREE_VOXELCENTROID_H
-#include "octree_pointcloud.h"
-
-#include <pcl/common/point_operators.h>
-#include <pcl/point_types.h>
-#include <pcl/register_point_struct.h>
+#include <pcl/octree/octree_pointcloud.h>
namespace pcl
{
}
}
+// Note: Don't precompile this octree type to speed up compilation. It's probably rarely used.
#include <pcl/octree/impl/octree_pointcloud_voxelcentroid.hpp>
#endif
#define PCL_OCTREE_SEARCH_H_
#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
-#include "octree_pointcloud.h"
+#include <pcl/octree/octree_pointcloud.h>
namespace pcl
{
#ifdef PCL_NO_PRECOMPILE
#include <pcl/octree/impl/octree_search.hpp>
-#else
-#define PCL_INSTANTIATE_OctreePointCloudSearch(T) template class PCL_EXPORTS pcl::octree::OctreePointCloudSearch<T>;
-#endif // PCL_NO_PRECOMPILE
+#endif
#endif // PCL_OCTREE_SEARCH_H_
+++ /dev/null
-
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Julius Kammerl (julius@kammerl.de)
- */
-
-#include <pcl/impl/instantiate.hpp>
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
-
-#include <pcl/octree/octree.h>
-#include <pcl/octree/octree_impl.h>
-
-// Instantiations of specific point types
-
-template class PCL_EXPORTS pcl::octree::OctreeBase<int>;
-template class PCL_EXPORTS pcl::octree::Octree2BufBase<int>;
-
-template class PCL_EXPORTS pcl::octree::OctreeBase<int,
- pcl::octree::OctreeContainerDataTVector<int>,
- pcl::octree::OctreeContainerEmpty<int> >;
-
-template class PCL_EXPORTS pcl::octree::Octree2BufBase<int,
- pcl::octree::OctreeContainerDataTVector<int>,
- pcl::octree::OctreeContainerEmpty<int> >;
-
-PCL_INSTANTIATE(OctreePointCloudSingleBufferWithLeafDataTVector,
- PCL_XYZ_POINT_TYPES)
-PCL_INSTANTIATE(OctreePointCloudDoubleBufferWithLeafDataTVector,
- PCL_XYZ_POINT_TYPES)
-
-PCL_INSTANTIATE(OctreePointCloudSearch, PCL_XYZ_POINT_TYPES)
-
-
-// PCL_INSTANTIATE(OctreePointCloudSingleBufferWithLeafDataT, PCL_XYZ_POINT_TYPES)
-
-// PCL_INSTANTIATE(OctreePointCloudSingleBufferWithEmptyLeaf, PCL_XYZ_POINT_TYPES)
-
-// PCL_INSTANTIATE(OctreePointCloudDensity, PCL_XYZ_POINT_TYPES)
-// PCL_INSTANTIATE(OctreePointCloudSingleBufferWithDensityLeaf, PCL_XYZ_POINT_TYPES)
-// PCL_INSTANTIATE(OctreePointCloudDoubleBufferWithDensityLeaf, PCL_XYZ_POINT_TYPES)
-
-// PCL_INSTANTIATE(OctreePointCloudOccupancy, PCL_XYZ_POINT_TYPES)
-// PCL_INSTANTIATE(OctreePointCloudSinglePoint, PCL_XYZ_POINT_TYPES)
-// PCL_INSTANTIATE(OctreePointCloudPointVector, PCL_XYZ_POINT_TYPES)
-PCL_INSTANTIATE(OctreePointCloudChangeDetector, PCL_XYZ_POINT_TYPES)
-// PCL_INSTANTIATE(OctreePointCloudVoxelCentroid, PCL_XYZ_POINT_TYPES)
-
-
-
/*
* Software License Agreement (BSD License)
*
// PCL_INSTANTIATE(OctreePointCloudSingleBufferWithLeafDataT, PCL_XYZ_POINT_TYPES)
PCL_INSTANTIATE(OctreePointCloudSingleBufferWithEmptyLeaf, PCL_XYZ_POINT_TYPES)
+/*
+ * Note: Disable apriori instantiation of these octree types to speed up compilation.
+ * They are probably rarely used.
+ */
// PCL_INSTANTIATE(OctreePointCloudDensity, PCL_XYZ_POINT_TYPES)
// PCL_INSTANTIATE(OctreePointCloudSingleBufferWithDensityLeaf, PCL_XYZ_POINT_TYPES)
// PCL_INSTANTIATE(OctreePointCloudDoubleBufferWithDensityLeaf, PCL_XYZ_POINT_TYPES)
// PCL_INSTANTIATE(OctreePointCloudPointVector, PCL_XYZ_POINT_TYPES)
// PCL_INSTANTIATE(OctreePointCloudChangeDetector, PCL_XYZ_POINT_TYPES)
// PCL_INSTANTIATE(OctreePointCloudVoxelCentroid, PCL_XYZ_POINT_TYPES)
+// PCL_INSTANTIATE(OctreePointCloudAdjacency, PCL_XYZ_POINT_TYPES)
#endif // PCL_NO_PRECOMPILE
#define PCL_MAJOR_VERSION ${PCL_MAJOR_VERSION}
#define PCL_MINOR_VERSION ${PCL_MINOR_VERSION}
#define PCL_REVISION_VERSION ${PCL_REVISION_VERSION}
+#define PCL_DEV_VERSION ${PCL_DEV_VERSION}
#define PCL_VERSION_PRETTY "${PCL_VERSION}"
#define PCL_VERSION_CALC(MAJ, MIN, PATCH) (MAJ*100000+MIN*100+PATCH)
#define PCL_VERSION \
- PCL_VERSION_CALC(PCL_MAJOR_VERSION,PCL_MINOR_VERSION,PCL_REVISION_VERSION)
-#define PCL_VERSION_COMPARE(OP,MAJ,MIN,PATCH) \
- (PCL_VERSION OP PCL_VERSION_CALC(MAJ,MIN,PATCH))
+ PCL_VERSION_CALC(PCL_MAJOR_VERSION, PCL_MINOR_VERSION, PCL_REVISION_VERSION)
+#define PCL_VERSION_COMPARE(OP, MAJ, MIN, PATCH) \
+ (PCL_VERSION*10+PCL_DEV_VERSION OP PCL_VERSION_CALC(MAJ, MIN, PATCH)*10)
#cmakedefine HAVE_TBB 1
#include <pcl/people/hog.h>
-#define _USE_MATH_DEFINES
-#include <math.h>
#include <string.h>
#if defined(__SSE2__)
// compute gradient magnitude (M) and normalize Gx
for( y=0; y<h4; y++ )
{
- m = 1.0f/sqrtf(M2[y]);
+ m = 1.0f / std::sqrt (M2[y]);
m = m < 1e10f ? m : 1e10f;
M2[y] = 1.0f / m;
Gx[y] = ((Gx[y] * m) * acMult);
#include <pcl/point_types.h>
#include <pcl/recognition/dot_modality.h>
+#include <pcl/recognition/point_types.h>
#include <pcl/recognition/quantized_map.h>
namespace pcl
{
-
- /** \brief A point structure for representing RGB color
- * \ingroup common
- */
- struct EIGEN_ALIGN16 PointRGB
- {
- union
- {
- union
- {
- struct
- {
- uint8_t b;
- uint8_t g;
- uint8_t r;
- uint8_t _unused;
- };
- float rgb;
- };
- uint32_t rgba;
- };
-
- inline PointRGB ()
- {}
-
- inline PointRGB (const uint8_t b, const uint8_t g, const uint8_t r)
- : b (b), g (g), r (r), _unused (0)
- {}
-
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
- };
-
-
- /** \brief A point structure representing Euclidean xyz coordinates, and the intensity value.
- * \ingroup common
- */
- struct EIGEN_ALIGN16 GradientXY
- {
- union
- {
- struct
- {
- float x;
- float y;
- float angle;
- float magnitude;
- };
- float data[4];
- };
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
-
- inline bool operator< (const GradientXY & rhs)
- {
- return (magnitude > rhs.magnitude);
- }
- };
- inline std::ostream & operator << (std::ostream & os, const GradientXY & p)
- {
- os << "(" << p.x << "," << p.y << " - " << p.magnitude << ")";
- return (os);
- }
-
- // --------------------------------------------------------------------------
-
template <typename PointInT>
class ColorGradientDOTModality
: public DOTModality, public PCLBase<PointInT>
if (sqr_mag_r > sqr_mag_g && sqr_mag_r > sqr_mag_b)
{
GradientXY gradient;
- gradient.magnitude = sqrtf (static_cast<float> (sqr_mag_r));
+ gradient.magnitude = std::sqrt (static_cast<float> (sqr_mag_r));
gradient.angle = atan2f (static_cast<float> (r_dy), static_cast<float> (r_dx)) * 180.0f / pi;
if (gradient.angle < -180.0f) gradient.angle += 360.0f;
if (gradient.angle >= 180.0f) gradient.angle -= 360.0f;
else if (sqr_mag_g > sqr_mag_b)
{
GradientXY gradient;
- gradient.magnitude = sqrtf (static_cast<float> (sqr_mag_g));
+ gradient.magnitude = std::sqrt (static_cast<float> (sqr_mag_g));
gradient.angle = atan2f (static_cast<float> (g_dy), static_cast<float> (g_dx)) * 180.0f / pi;
if (gradient.angle < -180.0f) gradient.angle += 360.0f;
if (gradient.angle >= 180.0f) gradient.angle -= 360.0f;
else
{
GradientXY gradient;
- gradient.magnitude = sqrtf (static_cast<float> (sqr_mag_b));
+ gradient.magnitude = std::sqrt (static_cast<float> (sqr_mag_b));
gradient.angle = atan2f (static_cast<float> (b_dy), static_cast<float> (b_dx)) * 180.0f / pi;
if (gradient.angle < -180.0f) gradient.angle += 360.0f;
if (gradient.angle >= 180.0f) gradient.angle -= 360.0f;
multAB[k].r = a * c - b * d;
multAB[k].i = b * c + a * d;
- float tmp = sqrtf (multAB[k].r * multAB[k].r + multAB[k].i * multAB[k].i);
+ float tmp = std::sqrt (multAB[k].r * multAB[k].r + multAB[k].i * multAB[k].i);
multAB[k].r /= tmp;
multAB[k].i /= tmp;
*
*/
-#ifndef PCL_FEATURES_QUANTIZABLE_MODALITY
-#define PCL_FEATURES_QUANTIZABLE_MODALITY
+#ifndef PCL_FEATURES_DOT_MODALITY
+#define PCL_FEATURES_DOT_MODALITY
#include <vector>
#include <pcl/pcl_macros.h>
namespace pcl
{
-
- /** \brief A point structure for representing RGB color
- * \ingroup common
- */
- //struct EIGEN_ALIGN16 PointRGB
- //{
- // union
- // {
- // union
- // {
- // struct
- // {
- // uint8_t b;
- // uint8_t g;
- // uint8_t r;
- // uint8_t _unused;
- // };
- // float rgb;
- // };
- // uint32_t rgba;
- // };
-
- // inline PointRGB ()
- // {}
-
- // inline PointRGB (const uint8_t b, const uint8_t g, const uint8_t r)
- // : b (b), g (g), r (r), _unused (0)
- // {}
-
- // EIGEN_MAKE_ALIGNED_OPERATOR_NEW
- //};
-
-
/** \brief A point structure representing Euclidean xyz coordinates, and the intensity value.
* \ingroup common
*/
// normalize normals
for (int normal_index = 0; normal_index < nr_normals; ++normal_index)
{
- const float length = sqrtf (ref_normals[normal_index].x * ref_normals[normal_index].x +
- ref_normals[normal_index].y * ref_normals[normal_index].y +
- ref_normals[normal_index].z * ref_normals[normal_index].z);
+ const float length = std::sqrt (ref_normals[normal_index].x * ref_normals[normal_index].x +
+ ref_normals[normal_index].y * ref_normals[normal_index].y +
+ ref_normals[normal_index].z * ref_normals[normal_index].z);
const float inv_length = 1.0f / length;
PointXYZ normal (static_cast<float> (x_index - range_x/2),
static_cast<float> (y_index - range_y/2),
static_cast<float> (z_index - range_z));
- const float length = sqrtf (normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
+ const float length = std::sqrt (normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
const float inv_length = 1.0f / (length + 0.00001f);
normal.x *= inv_length;
}
else
{
- const float normInv = 1.0f / sqrtf (length);
+ const float normInv = 1.0f / std::sqrt (length);
const float normal_x = nx * normInv;
const float normal_y = ny * normInv;
//double l_ny = l_ddy * dummy_focal_length;
//double l_nz = -l_det * l_d;
- float l_sqrt = sqrtf (l_nx * l_nx + l_ny * l_ny + l_nz * l_nz);
+ float l_sqrt = std::sqrt (l_nx * l_nx + l_ny * l_ny + l_nz * l_nz);
if (l_sqrt > 0)
{
{
copy_back_counter = 0;
- for (size_t mem_index = 0; mem_index < mem_size; mem_index += 16)
+ for (size_t mem_index = 0; mem_index < mem_size_mod_16_base; mem_index += 16)
{
score_sums[mem_index+0] = static_cast<unsigned short> (score_sums[mem_index+0] + tmp_score_sums[mem_index+0]);
score_sums[mem_index+1] = static_cast<unsigned short> (score_sums[mem_index+1] + tmp_score_sums[mem_index+1]);
detections.push_back (detection);
+#ifdef __SSE2__
+ aligned_free (score_sums);
+ aligned_free (tmp_score_sums);
+#else
delete[] score_sums;
+#endif
}
// release data
{
copy_back_counter = 0;
- for (size_t mem_index = 0; mem_index < mem_size; mem_index += 16)
+ for (size_t mem_index = 0; mem_index < mem_size_mod_16_base; mem_index += 16)
{
score_sums[mem_index+0] = static_cast<unsigned short> (score_sums[mem_index+0] + tmp_score_sums[mem_index+0]);
score_sums[mem_index+1] = static_cast<unsigned short> (score_sums[mem_index+1] + tmp_score_sums[mem_index+1]);
{
copy_back_counter = 0;
- for (size_t mem_index = 0; mem_index < mem_size; mem_index += 16)
+ for (size_t mem_index = 0; mem_index < mem_size_mod_16_base; mem_index += 16)
{
score_sums[mem_index+0] = static_cast<unsigned short> (score_sums[mem_index+0] + tmp_score_sums[mem_index+0]);
score_sums[mem_index+1] = static_cast<unsigned short> (score_sums[mem_index+1] + tmp_score_sums[mem_index+1]);
// Ensure ymin <= ymax
if (ymin > ymax) { Scalar tmp = ymin; ymin = ymax; ymax = tmp; };
- if (order > 2 && !(fpb != fpb) && fpb != std::numeric_limits<Scalar>::infinity ())
+ if (order > 2 && !(fpb != fpa) && fpb != std::numeric_limits<Scalar>::infinity ())
{
fpa = fpa * (b - a);
fpb = fpb * (b - a);
#ifndef PCL_GICP6D_H_
#define PCL_GICP6D_H_
-#include <pcl/registration/gicp.h>
-#include <pcl/kdtree/impl/kdtree_flann.hpp>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/point_representation.h>
+#include <pcl/kdtree/impl/kdtree_flann.hpp>
+#include <pcl/registration/gicp.h>
namespace pcl
{
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception, Inc.
+ * Copyright (C) 2008 Ben Gurion University of the Negev, Beer Sheva, Israel.
+ *
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_REGISTRATION_IA_FPCS_H_
+#define PCL_REGISTRATION_IA_FPCS_H_
+
+#include <pcl/common/common.h>
+#include <pcl/registration/registration.h>
+#include <pcl/registration/matching_candidate.h>
+
+namespace pcl
+{
+ /** \brief Compute the mean point density of a given point cloud.
+ * \param[in] cloud pointer to the input point cloud
+ * \param[in] max_dist maximum distance of a point to be considered as a neighbor
+ * \param[in] nr_threads number of threads to use (default = 1, only used if OpenMP flag is set)
+ * \return the mean point density of a given point cloud
+ */
+ template <typename PointT> inline float
+ getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, float max_dist, int nr_threads = 1);
+
+ /** \brief Compute the mean point density of a given point cloud.
+ * \param[in] cloud pointer to the input point cloud
+ * \param[in] indices the vector of point indices to use from \a cloud
+ * \param[in] max_dist maximum distance of a point to be considered as a neighbor
+ * \param[in] nr_threads number of threads to use (default = 1, only used if OpenMP flag is set)
+ * \return the mean point density of a given point cloud
+ */
+ template <typename PointT> inline float
+ getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, const std::vector <int> &indices,
+ float max_dist, int nr_threads = 1);
+
+
+ namespace registration
+ {
+ /** \brief FPCSInitialAlignment computes corresponding four point congruent sets as described in:
+ * "4-points congruent sets for robust pairwise surface registration", Dror Aiger, Niloy Mitra, Daniel Cohen-Or.
+ * ACM Transactions on Graphics, vol. 27(3), 2008
+ * \author P.W.Theiler
+ * \ingroup registration
+ */
+ template <typename PointSource, typename PointTarget, typename NormalT = pcl::Normal, typename Scalar = float>
+ class FPCSInitialAlignment : public Registration <PointSource, PointTarget, Scalar>
+ {
+ public:
+ /** \cond */
+ typedef boost::shared_ptr <FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> > Ptr;
+ typedef boost::shared_ptr <const FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> > ConstPtr;
+
+ typedef pcl::search::KdTree<PointSource> KdTreeReciprocal;
+ typedef typename KdTreeReciprocal::Ptr KdTreeReciprocalPtr;
+
+ typedef pcl::PointCloud <PointTarget> PointCloudTarget;
+ typedef pcl::PointCloud <PointSource> PointCloudSource;
+ typedef typename PointCloudSource::Ptr PointCloudSourcePtr;
+ typedef typename PointCloudSource::iterator PointCloudSourceIterator;
+
+ typedef pcl::PointCloud <NormalT> Normals;
+ typedef typename Normals::ConstPtr NormalsConstPtr;
+
+ typedef pcl::registration::MatchingCandidate MatchingCandidate;
+ typedef pcl::registration::MatchingCandidates MatchingCandidates;
+ /** \endcond */
+
+
+ /** \brief Constructor.
+ * Resets the maximum number of iterations to 0 thus forcing an internal computation if not set by the user.
+ * Sets the number of RANSAC iterations to 1000 and the standard transformation estimation to TransformationEstimation3Point.
+ */
+ FPCSInitialAlignment ();
+
+ /** \brief Destructor. */
+ virtual ~FPCSInitialAlignment ()
+ {};
+
+
+ /** \brief Provide a pointer to the vector of target indices.
+ * \param[in] target_indices a pointer to the target indices
+ */
+ inline void
+ setTargetIndices (const IndicesPtr &target_indices)
+ {
+ target_indices_ = target_indices;
+ };
+
+ /** \return a pointer to the vector of target indices. */
+ inline IndicesPtr
+ getTargetIndices () const
+ {
+ return (target_indices_);
+ };
+
+
+ /** \brief Provide a pointer to the normals of the source point cloud.
+ * \param[in] source_normals pointer to the normals of the source pointer cloud.
+ */
+ inline void
+ setSourceNormals (const NormalsConstPtr &source_normals)
+ {
+ source_normals_ = source_normals;
+ };
+
+ /** \return the normals of the source point cloud. */
+ inline NormalsConstPtr
+ getSourceNormals () const
+ {
+ return (source_normals_);
+ };
+
+
+ /** \brief Provide a pointer to the normals of the target point cloud.
+ * \param[in] target_normals point to the normals of the target point cloud.
+ */
+ inline void
+ setTargetNormals (const NormalsConstPtr &target_normals)
+ {
+ target_normals_ = target_normals;
+ };
+
+ /** \return the normals of the target point cloud. */
+ inline NormalsConstPtr
+ getTargetNormals () const
+ {
+ return (target_normals_);
+ };
+
+
+ /** \brief Set the number of used threads if OpenMP is activated.
+ * \param[in] nr_threads the number of used threads
+ */
+ inline void
+ setNumberOfThreads (int nr_threads)
+ {
+ nr_threads_ = nr_threads;
+ };
+
+ /** \return the number of threads used if OpenMP is activated. */
+ inline int
+ getNumberOfThreads () const
+ {
+ return (nr_threads_);
+ };
+
+
+ /** \brief Set the constant factor delta which weights the internally calculated parameters.
+ * \param[in] delta the weight factor delta
+ * \param[in] normalize flag if delta should be normalized according to point cloud density
+ */
+ inline void
+ setDelta (float delta, bool normalize = false)
+ {
+ delta_ = delta;
+ normalize_delta_ = normalize;
+ };
+
+ /** \return the constant factor delta which weights the internally calculated parameters. */
+ inline float
+ getDelta () const
+ {
+ return (delta_);
+ };
+
+
+ /** \brief Set the approximate overlap between source and target.
+ * \param[in] approx_overlap the estimated overlap
+ */
+ inline void
+ setApproxOverlap (float approx_overlap)
+ {
+ approx_overlap_ = approx_overlap;
+ };
+
+ /** \return the approximated overlap between source and target. */
+ inline float
+ getApproxOverlap () const
+ {
+ return (approx_overlap_);
+ };
+
+
+ /** \brief Set the scoring threshold used for early finishing the method.
+ * \param[in] score_threshold early terminating score criteria
+ */
+ inline void
+ setScoreThreshold (float score_threshold)
+ {
+ score_threshold_ = score_threshold;
+ };
+
+ /** \return the scoring threshold used for early finishing the method. */
+ inline float
+ getScoreThreshold () const
+ {
+ return (score_threshold_);
+ };
+
+
+ /** \brief Set the number of source samples to use during alignment.
+ * \param[in] nr_samples the number of source samples
+ */
+ inline void
+ setNumberOfSamples (int nr_samples)
+ {
+ nr_samples_ = nr_samples;
+ };
+
+ /** \return the number of source samples to use during alignment. */
+ inline int
+ getNumberOfSamples () const
+ {
+ return (nr_samples_);
+ };
+
+
+ /** \brief Set the maximum normal difference between valid point correspondences in degree.
+ * \param[in] max_norm_diff the maximum difference in degree
+ */
+ inline void
+ setMaxNormalDifference (float max_norm_diff)
+ {
+ max_norm_diff_ = max_norm_diff;
+ };
+
+ /** \return the maximum normal difference between valid point correspondences in degree. */
+ inline float
+ getMaxNormalDifference () const
+ {
+ return (max_norm_diff_);
+ };
+
+
+ /** \brief Set the maximum computation time in seconds.
+ * \param[in] max_runtime the maximum runtime of the method in seconds
+ */
+ inline void
+ setMaxComputationTime (int max_runtime)
+ {
+ max_runtime_ = max_runtime;
+ };
+
+ /** \return the maximum computation time in seconds. */
+ inline int
+ getMaxComputationTime () const
+ {
+ return (max_runtime_);
+ };
+
+
+ /** \return the fitness score of the best scored four-point match. */
+ inline float
+ getFitnessScore () const
+ {
+ return (fitness_score_);
+ };
+
+ protected:
+
+ using PCLBase <PointSource>::deinitCompute;
+ using PCLBase <PointSource>::input_;
+ using PCLBase <PointSource>::indices_;
+
+ using Registration <PointSource, PointTarget, Scalar>::reg_name_;
+ using Registration <PointSource, PointTarget, Scalar>::target_;
+ using Registration <PointSource, PointTarget, Scalar>::tree_;
+ using Registration <PointSource, PointTarget, Scalar>::correspondences_;
+ using Registration <PointSource, PointTarget, Scalar>::target_cloud_updated_;
+ using Registration <PointSource, PointTarget, Scalar>::final_transformation_;
+ using Registration <PointSource, PointTarget, Scalar>::max_iterations_;
+ using Registration <PointSource, PointTarget, Scalar>::ransac_iterations_;
+ using Registration <PointSource, PointTarget, Scalar>::transformation_estimation_;
+ using Registration <PointSource, PointTarget, Scalar>::converged_;
+
+
+ /** \brief Rigid transformation computation method.
+ * \param output the transformed input point cloud dataset using the rigid transformation found
+ * \param guess The computed transforamtion
+ */
+ virtual void
+ computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess);
+
+
+ /** \brief Internal computation initialization. */
+ virtual bool
+ initCompute ();
+
+ /** \brief Select an approximately coplanar set of four points from the source cloud.
+ * \param[out] base_indices selected source cloud indices, further used as base (B)
+ * \param[out] ratio the two diagonal intersection ratios (r1,r2) of the base points
+ * \return
+ * * < 0 no coplanar four point sets with large enough sampling distance was found
+ * * = 0 a set of four congruent points was selected
+ */
+ int
+ selectBase (std::vector <int> &base_indices, float (&ratio)[2]);
+
+ /** \brief Select randomly a triplet of points with large point-to-point distances. The minimum point
+ * sampling distance is calculated based on the estimated point cloud overlap during initialization.
+ *
+ * \param[out] base_indices indices of base B
+ * \return
+ * * < 0 no triangle with large enough base lines could be selected
+ * * = 0 base triangle succesully selected
+ */
+ int
+ selectBaseTriangle (std::vector <int> &base_indices);
+
+ /** \brief Setup the base (four coplanar points) by ordering the points and computing intersection
+ * ratios and segment to segment distances of base diagonal.
+ *
+ * \param[in,out] base_indices indices of base B (will be reordered)
+ * \param[out] ratio diagonal intersection ratios of base points
+ */
+ void
+ setupBase (std::vector <int> &base_indices, float (&ratio)[2]);
+
+ /** \brief Calculate intersection ratios and segment to segment distances of base diagonals.
+ * \param[in] base_indices indices of base B
+ * \param[out] ratio diagonal intersection ratios of base points
+ * \return quality value of diagonal intersection
+ */
+ float
+ segmentToSegmentDist (const std::vector <int> &base_indices, float (&ratio)[2]);
+
+ /** \brief Search for corresponding point pairs given the distance between two base points.
+ *
+ * \param[in] idx1 first index of current base segment (in source cloud)
+ * \param[in] idx2 second index of current base segment (in source cloud)
+ * \param[out] pairs resulting point pairs with point-to-point distance close to ref_dist
+ * \return
+ * * < 0 no corresponding point pair was found
+ * * = 0 at least one point pair candidate was found
+ */
+ virtual int
+ bruteForceCorrespondences (int idx1, int idx2, pcl::Correspondences &pairs);
+
+ /** \brief Determine base matches by combining the point pair candidate and search for coinciding
+ * intersection points using the diagonal segment ratios of base B. The coincidation threshold is
+ * calculated during initialization (coincidation_limit_).
+ *
+ * \param[in] base_indices indices of base B
+ * \param[out] matches vector of candidate matches w.r.t the base B
+ * \param[in] pairs_a point pairs corresponding to points of 1st diagonal of base B
+ * \param[in] pairs_b point pairs corresponding to points of 2nd diagonal of base B
+ * \param[in] ratio diagonal intersection ratios of base points
+ * \return
+ * * < 0 no base match could be found
+ * * = 0 at least one base match was found
+ */
+ virtual int
+ determineBaseMatches (
+ const std::vector <int> &base_indices,
+ std::vector <std::vector <int> > &matches,
+ const pcl::Correspondences &pairs_a,
+ const pcl::Correspondences &pairs_b,
+ const float (&ratio)[2]);
+
+ /** \brief Check if outer rectangle distance of matched points fit with the base rectangle.
+ *
+ * \param[in] match_indices indices of match M
+ * \param[in] ds edge lengths of base B
+ * \return
+ * * < 0 at least one edge of the match M has no corresponding one in the base B
+ * * = 0 edges of match M fits to the ones of base B
+ */
+ int
+ checkBaseMatch (const std::vector <int> &match_indices, const float (&ds)[4]);
+
+ /** \brief Method to handle current candidate matches. Here we validate and evaluate the matches w.r.t the
+ * base and store the best fitting match (together with its score and estimated transformation).
+ * \note For forwards compatibility the results are stored in 'vectors of size 1'.
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in,out] matches vector of candidate matches w.r.t the base B. The candidate matches are
+ * reordered during this step.
+ * \param[out] candidates vector which contains the candidates matches M
+ */
+ virtual void
+ handleMatches (
+ const std::vector <int> &base_indices,
+ std::vector <std::vector <int> > &matches,
+ MatchingCandidates &candidates);
+
+ /** \brief Sets the correspondences between the base B and the match M by using the distance of each point
+ * to the centroid of the rectangle.
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in] match_indices indices of match M
+ * \param[out] correspondences resulting correspondences
+ */
+ virtual void
+ linkMatchWithBase (
+ const std::vector <int> &base_indices,
+ std::vector <int> &match_indices,
+ pcl::Correspondences &correspondences);
+
+ /** \brief Validate the matching by computing the transformation between the source and target based on the
+ * four matched points and by comparing the mean square error (MSE) to a threshold. The MSE limit was
+ * calculated during initialization (max_mse_).
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in] match_indices indices of match M
+ * \param[in] correspondences corresondences between source and target
+ * \param[out] transformation resulting transformation matrix
+ * \return
+ * * < 0 MSE bigger than max_mse_
+ * * = 0 MSE smaller than max_mse_
+ */
+ virtual int
+ validateMatch (
+ const std::vector <int> &base_indices,
+ const std::vector <int> &match_indices,
+ const pcl::Correspondences &correspondences,
+ Eigen::Matrix4f &transformation);
+
+ /** \brief Validate the transformation by calculating the number of inliers after transforming the source cloud.
+ * The resulting fitness score is later used as the decision criteria of the best fitting match.
+ *
+ * \param[out] transformation updated orientation matrix using all inliers
+ * \param[out] fitness_score current best fitness_score
+ * \note fitness score is only updated if the score of the current transformation exceeds the input one.
+ * \return
+ * * < 0 if previous result is better than the current one (score remains)
+ * * = 0 current result is better than the previous one (score updated)
+ */
+ virtual int
+ validateTransformation (Eigen::Matrix4f &transformation, float &fitness_score);
+
+ /** \brief Final computation of best match out of vector of best matches. To avoid cross thread dependencies
+ * during parallel running, a best match for each try was calculated.
+ * \note For forwards compatibility the candidates are stored in vectors of 'vectors of size 1'.
+ * \param[in] candidates vector of candidate matches
+ */
+ virtual void
+ finalCompute (const std::vector <MatchingCandidates > &candidates);
+
+
+ /** \brief Normals of source point cloud. */
+ NormalsConstPtr source_normals_;
+
+ /** \brief Normals of target point cloud. */
+ NormalsConstPtr target_normals_;
+
+
+ /** \brief Number of threads for parallelization (standard = 1).
+ * \note Only used if run compiled with OpenMP.
+ */
+ int nr_threads_;
+
+ /** \brief Estimated overlap between source and target (standard = 0.5). */
+ float approx_overlap_;
+
+ /** \brief Delta value of 4pcs algorithm (standard = 1.0).
+ * It can be used as:
+ * * absolute value (normalization = false), value should represent the point accuracy to ensure finding neighbors between source <-> target
+ * * relative value (normalization = true), to adjust the internally calculated point accuracy (= point density)
+ */
+ float delta_;
+
+ /** \brief Score threshold to stop calculation with success.
+ * If not set by the user it is equal to the approximated overlap
+ */
+ float score_threshold_;
+
+ /** \brief The number of points to uniformly sample the source point cloud. (standard = 0 => full cloud). */
+ int nr_samples_;
+
+ /** \brief Maximum normal difference of corresponding point pairs in degrees (standard = 90). */
+ float max_norm_diff_;
+
+ /** \brief Maximum allowed computation time in seconds (standard = 0 => ~unlimited). */
+ int max_runtime_;
+
+
+ /** \brief Resulting fitness score of the best match. */
+ float fitness_score_;
+
+
+ /** \brief Estimated diamter of the target point cloud. */
+ float diameter_;
+
+ /** \brief Estimated squared metric overlap between source and target.
+ * \note Internally calculated using the estimated overlap and the extent of the source cloud.
+ * It is used to derive the minimum sampling distance of the base points as well as to calculated
+ * the number of trys to reliable find a correct mach.
+ */
+ float max_base_diameter_sqr_;
+
+ /** \brief Use normals flag. */
+ bool use_normals_;
+
+ /** \brief Normalize delta flag. */
+ bool normalize_delta_;
+
+
+ /** \brief A pointer to the vector of source point indices to use after sampling. */
+ pcl::IndicesPtr source_indices_;
+
+ /** \brief A pointer to the vector of target point indices to use after sampling. */
+ pcl::IndicesPtr target_indices_;
+
+ /** \brief Maximal difference between corresponding point pairs in source and target.
+ * \note Internally calculated using an estimation of the point density.
+ */
+ float max_pair_diff_;
+
+ /** \brief Maximal difference between the length of the base edges and valid match edges.
+ * \note Internally calculated using an estimation of the point density.
+ */
+ float max_edge_diff_;
+
+ /** \brief Maximal distance between coinciding intersection points to find valid matches.
+ * \note Internally calculated using an estimation of the point density.
+ */
+ float coincidation_limit_;
+
+ /** \brief Maximal mean squared errors of a transformation calculated from a candidate match.
+ * \note Internally calculated using an estimation of the point density.
+ */
+ float max_mse_;
+
+ /** \brief Maximal squared point distance between source and target points to count as inlier.
+ * \note Internally calculated using an estimation of the point density.
+ */
+ float max_inlier_dist_sqr_;
+
+
+ /** \brief Definition of a small error. */
+ const float small_error_;
+
+ };
+ }; // namespace registration
+}; // namespace pcl
+
+#include <pcl/registration/impl/ia_fpcs.hpp>
+
+#endif // PCL_REGISTRATION_IA_FPCS_H_
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception, Inc.
+ *
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_REGISTRATION_IA_KFPCS_H_
+#define PCL_REGISTRATION_IA_KFPCS_H_
+
+#include <pcl/registration/ia_fpcs.h>
+
+namespace pcl
+{
+ namespace registration
+ {
+ /** \brief KFPCSInitialAlignment computes corresponding four point congruent sets based on keypoints
+ * as described in: "Markerless point cloud registration with keypoint-based 4-points congruent sets",
+ * Pascal Theiler, Jan Dirk Wegner, Konrad Schindler. ISPRS Annals II-5/W2, 2013. Presented at ISPRS Workshop
+ * Laser Scanning, Antalya, Turkey, 2013.
+ * \note Method has since been improved and some variations to the paper exist.
+ * \author P.W.Theiler
+ * \ingroup registration
+ */
+ template <typename PointSource, typename PointTarget, typename NormalT = pcl::Normal, typename Scalar = float>
+ class KFPCSInitialAlignment : public virtual FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>
+ {
+ public:
+ /** \cond */
+ typedef boost::shared_ptr <KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> > Ptr;
+ typedef boost::shared_ptr <const KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar> > ConstPtr;
+
+ typedef pcl::PointCloud <PointSource> PointCloudSource;
+ typedef typename PointCloudSource::Ptr PointCloudSourcePtr;
+ typedef typename PointCloudSource::iterator PointCloudSourceIterator;
+
+ typedef pcl::PointCloud <PointTarget> PointCloudTarget;
+ typedef typename PointCloudTarget::Ptr PointCloudTargetPtr;
+ typedef typename PointCloudTarget::iterator PointCloudTargetIterator;
+
+ typedef pcl::registration::MatchingCandidate MatchingCandidate;
+ typedef pcl::registration::MatchingCandidates MatchingCandidates;
+ /** \endcond */
+
+
+ /** \brief Constructor. */
+ KFPCSInitialAlignment ();
+
+ /** \brief Destructor. */
+ virtual ~KFPCSInitialAlignment ()
+ {};
+
+
+ /** \brief Set the upper translation threshold used for score evaluation.
+ * \param[in] upper_trl_boundary upper translation threshold
+ */
+ inline void
+ setUpperTranslationThreshold (float upper_trl_boundary)
+ {
+ upper_trl_boundary_ = upper_trl_boundary;
+ };
+
+ /** \return the upper translation threshold used for score evaluation. */
+ inline float
+ getUpperTranslationThreshold () const
+ {
+ return (upper_trl_boundary_);
+ };
+
+
+ /** \brief Set the lower translation threshold used for score evaluation.
+ * \param[in] lower_trl_boundary lower translation threshold
+ */
+ inline void
+ setLowerTranslationThreshold (float lower_trl_boundary)
+ {
+ lower_trl_boundary_ = lower_trl_boundary;
+ };
+
+ /** \return the lower translation threshold used for score evaluation. */
+ inline float
+ getLowerTranslationThreshold () const
+ {
+ return (lower_trl_boundary_);
+ };
+
+
+ /** \brief Set the weighting factor of the translation cost term.
+ * \param[in] lambda the weighting factor of the translation cost term
+ */
+ inline void
+ setLambda (float lambda)
+ {
+ lambda_ = lambda;
+ };
+
+ /** \return the weighting factor of the translation cost term. */
+ inline float
+ getLambda () const
+ {
+ return (lambda_);
+ };
+
+
+ /** \brief Get the N best unique candidate matches according to their fitness score.
+ * The method only returns unique transformations comparing the translation
+ * and the 3D rotation to already returned transformations.
+ *
+ * \note The method may return less than N candidates, if the number of unique candidates
+ * is smaller than N
+ *
+ * \param[in] n number of best candidates to return
+ * \param[in] min_angle3d minimum 3D angle difference in radian
+ * \param[in] min_translation3d minimum 3D translation difference
+ * \param[out] candidates vector of unique candidates
+ */
+ void
+ getNBestCandidates (int n, float min_angle3d, float min_translation3d, MatchingCandidates &candidates);
+
+ /** \brief Get all unique candidate matches with fitness scores above a threshold t.
+ * The method only returns unique transformations comparing the translation
+ * and the 3D rotation to already returned transformations.
+ *
+ * \param[in] t fitness score threshold
+ * \param[in] min_angle3d minimum 3D angle difference in radian
+ * \param[in] min_translation3d minimum 3D translation difference
+ * \param[out] candidates vector of unique candidates
+ */
+ void
+ getTBestCandidates (float t, float min_angle3d, float min_translation3d, MatchingCandidates &candidates);
+
+
+ protected:
+
+ using PCLBase <PointSource>::deinitCompute;
+ using PCLBase <PointSource>::input_;
+ using PCLBase <PointSource>::indices_;
+
+ using Registration <PointSource, PointTarget, Scalar>::reg_name_;
+ using Registration <PointSource, PointTarget, Scalar>::tree_;
+ using Registration <PointSource, PointTarget, Scalar>::final_transformation_;
+ using Registration <PointSource, PointTarget, Scalar>::ransac_iterations_;
+ using Registration <PointSource, PointTarget, Scalar>::correspondences_;
+ using Registration <PointSource, PointTarget, Scalar>::converged_;
+
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::delta_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::approx_overlap_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_pair_diff_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_edge_diff_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::coincidation_limit_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_mse_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::max_inlier_dist_sqr_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::diameter_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::normalize_delta_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::fitness_score_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::score_threshold_;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::linkMatchWithBase;
+ using FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateMatch;
+
+
+ /** \brief Internal computation initialization. */
+ virtual bool
+ initCompute ();
+
+ /** \brief Method to handle current candidate matches. Here we validate and evaluate the matches w.r.t the
+ * base and store the sorted matches (together with score values and estimated transformations).
+ *
+ * \param[in] base_indices indices of base B
+ * \param[in,out] matches vector of candidate matches w.r.t the base B. The candidate matches are
+ * reordered during this step.
+ * \param[out] candidates vector which contains the candidates matches M
+ */
+ virtual void
+ handleMatches (
+ const std::vector <int> &base_indices,
+ std::vector <std::vector <int> > &matches,
+ MatchingCandidates &candidates);
+
+ /** \brief Validate the transformation by calculating the score value after transforming the input source cloud.
+ * The resulting score is later used as the decision criteria of the best fitting match.
+ *
+ * \param[out] transformation updated orientation matrix using all inliers
+ * \param[out] fitness_score current best score
+ * \note fitness score is only updated if the score of the current transformation exceeds the input one.
+ * \return
+ * * < 0 if previous result is better than the current one (score remains)
+ * * = 0 current result is better than the previous one (score updated)
+ */
+ virtual int
+ validateTransformation (Eigen::Matrix4f &transformation, float &fitness_score);
+
+ /** \brief Final computation of best match out of vector of matches. To avoid cross thread dependencies
+ * during parallel running, a best match for each try was calculated.
+ * \note For forwards compatibility the candidates are stored in vectors of 'vectors of size 1'.
+ * \param[in] candidates vector of candidate matches
+ */
+ virtual void
+ finalCompute (const std::vector <MatchingCandidates > &candidates);
+
+
+ /** \brief Lower boundary for translation costs calculation.
+ * \note If not set by the user, the translation costs are not used during evaluation.
+ */
+ float lower_trl_boundary_;
+
+ /** \brief Upper boundary for translation costs calculation.
+ * \note If not set by the user, it is calculated from the estimated overlap and the diameter
+ * of the point cloud.
+ */
+ float upper_trl_boundary_;
+
+ /** \brief Weighting factor for translation costs (standard = 0.5). */
+ float lambda_;
+
+
+ /** \brief Container for resulting vector of registration candidates. */
+ MatchingCandidates candidates_;
+
+ /** \brief Flag if translation score should be used in validation (internal calculation). */
+ bool use_trl_score_;
+
+ /** \brief Subset of input indices on which we evaluate candidates.
+ * To speed up the evaluation, we only use a fix number of indices defined during initialization.
+ */
+ pcl::IndicesPtr indices_validation_;
+
+ };
+ }; // namespace registration
+}; // namespace pcl
+
+#include <pcl/registration/impl/ia_kfpcs.hpp>
+
+#endif // PCL_REGISTRATION_IA_KFPCS_H_
}
////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget>
+template <typename PointSource, typename PointTarget>
template<typename PointT> void
-pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeCovariances(typename pcl::PointCloud<PointT>::ConstPtr cloud,
+pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeCovariances(typename pcl::PointCloud<PointT>::ConstPtr cloud,
const typename pcl::search::KdTree<PointT>::Ptr kdtree,
MatricesVector& cloud_covariances)
{
// Search for the K nearest neighbours
kdtree->nearestKSearch(query_point, k_correspondences_, nn_indecies, nn_dist_sq);
-
+
// Find the covariance matrix
for(int j = 0; j < k_correspondences_; j++) {
const PointT &pt = (*cloud)[nn_indecies[j]];
-
+
mean[0] += pt.x;
mean[1] += pt.y;
mean[2] += pt.z;
-
+
cov(0,0) += pt.x*pt.x;
-
+
cov(1,0) += pt.y*pt.x;
cov(1,1) += pt.y*pt.y;
-
+
cov(2,0) += pt.z*pt.x;
cov(2,1) += pt.z*pt.y;
- cov(2,2) += pt.z*pt.z;
+ cov(2,2) += pt.z*pt.z;
}
-
+
mean /= static_cast<double> (k_correspondences_);
// Get the actual covariance
for (int k = 0; k < 3; k++)
- for (int l = 0; l <= k; l++)
+ for (int l = 0; l <= k; l++)
{
cov(k,l) /= static_cast<double> (k_correspondences_);
cov(k,l) -= mean[k]*mean[l];
cov(l,k) = cov(k,l);
}
-
+
// Compute the SVD (covariance matrix is symmetric so U = V')
Eigen::JacobiSVD<Eigen::Matrix3d> svd(cov, Eigen::ComputeFullU);
cov.setZero ();
double v = 1.; // biggest 2 singular values replaced by 1
if(k == 2) // smallest singular value replaced by gicp_epsilon
v = gicp_epsilon_;
- cov+= v * col * col.transpose();
+ cov+= v * col * col.transpose();
}
}
}
Eigen::Matrix3d dR_dPsi;
double phi = x[3], theta = x[4], psi = x[5];
-
+
double cphi = cos(phi), sphi = sin(phi);
double ctheta = cos(theta), stheta = sin(theta);
double cpsi = cos(psi), spsi = sin(psi);
-
+
dR_dPhi(0,0) = 0.;
dR_dPhi(1,0) = 0.;
dR_dPhi(2,0) = 0.;
dR_dPsi(0,2) = cpsi*sphi - cphi*spsi*stheta;
dR_dPsi(1,2) = sphi*spsi + cphi*cpsi*stheta;
dR_dPsi(2,2) = 0.;
-
+
g[3] = matricesInnerProd(dR_dPhi, R);
g[4] = matricesInnerProd(dR_dTheta, R);
g[5] = matricesInnerProd(dR_dPsi, R);
////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointTarget> void
-pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::estimateRigidTransformationBFGS (const PointCloudSource &cloud_src,
- const std::vector<int> &indices_src,
- const PointCloudTarget &cloud_tgt,
- const std::vector<int> &indices_tgt,
+pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::estimateRigidTransformationBFGS (const PointCloudSource &cloud_src,
+ const std::vector<int> &indices_src,
+ const PointCloudTarget &cloud_tgt,
+ const std::vector<int> &indices_tgt,
Eigen::Matrix4f &transformation_matrix)
{
if (indices_src.size () < 4) // need at least 4 samples
{
- PCL_THROW_EXCEPTION (NotEnoughPointsException,
+ PCL_THROW_EXCEPTION (NotEnoughPointsException,
"[pcl::GeneralizedIterativeClosestPoint::estimateRigidTransformationBFGS] Need at least 4 points to estimate a transform! Source and target have " << indices_src.size () << " points!");
return;
}
applyState(transformation_matrix, x);
}
else
- PCL_THROW_EXCEPTION(SolverDidntConvergeException,
+ PCL_THROW_EXCEPTION(SolverDidntConvergeException,
"[pcl::" << getClassName () << "::TransformationEstimationBFGS::estimateRigidTransformation] BFGS solver didn't converge!");
}
pp = gicp_->base_transformation_ * p_src;
Eigen::Vector3d p_src3 (pp[0], pp[1], pp[2]);
// Increment rotation gradient
- R+= p_src3 * temp.transpose();
+ R+= p_src3 * temp.transpose();
}
f/= double(m);
g.head<3> ()*= double(2.0/m);
computeCovariances<PointSource> (input_, tree_reciprocal_, *input_covariances_);
}
- base_transformation_ = guess;
+ base_transformation_ = Eigen::Matrix4f::Identity();
nr_iterations_ = 0;
converged_ = false;
double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
std::vector<int> nn_indices (1);
std::vector<float> nn_dists (1);
+ pcl::transformPointCloud(output, output, guess);
+
while(!converged_)
{
size_t cnt = 0;
for (size_t i = 0; i < N; i++)
{
PointSource query = output[i];
- query.getVector4fMap () = guess * query.getVector4fMap ();
query.getVector4fMap () = transformation_ * query.getVector4fMap ();
if (!searchForNeighbors (query, nn_indices, nn_dists))
PCL_ERROR ("[pcl::%s::computeTransformation] Unable to find a nearest neighbor in the target dataset for point %d in the source!\n", getClassName ().c_str (), (*indices_)[i]);
return;
}
-
+
// Check if the distance to the nearest neighbor is smaller than the user imposed threshold
if (nn_dists[0] < dist_threshold)
{
// M = R*C1
M = R * C1;
// temp = M*R' + C2 = R*C1*R' + C2
- Eigen::Matrix3d temp = M * R.transpose();
+ Eigen::Matrix3d temp = M * R.transpose();
temp+= C2;
// M = temp^-1
M = temp.inverse ();
delta = c_delta;
}
}
- }
+ }
catch (PCLException &e)
{
PCL_DEBUG ("[pcl::%s::computeTransformation] Optimization issue %s\n", getClassName ().c_str (), e.what ());
previous_transformation_ = transformation_;
PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence reached. Number of iterations: %d out of %d. Transformation difference: %f\n",
getClassName ().c_str (), nr_iterations_, max_iterations_, (transformation_ - previous_transformation_).array ().abs ().sum ());
- }
+ }
else
PCL_DEBUG ("[pcl::%s::computeTransformation] Convergence failed\n", getClassName ().c_str ());
}
- //for some reason the static equivalent methode raises an error
- // final_transformation_.block<3,3> (0,0) = (transformation_.block<3,3> (0,0)) * (guess.block<3,3> (0,0));
- // final_transformation_.block <3, 1> (0, 3) = transformation_.block <3, 1> (0, 3) + guess.rightCols<1>.block <3, 1> (0, 3);
- final_transformation_.topLeftCorner (3,3) = previous_transformation_.topLeftCorner (3,3) * guess.topLeftCorner (3,3);
- final_transformation_(0,3) = previous_transformation_(0,3) + guess(0,3);
- final_transformation_(1,3) = previous_transformation_(1,3) + guess(1,3);
- final_transformation_(2,3) = previous_transformation_(2,3) + guess(2,3);
+ final_transformation_ = previous_transformation_ * guess;
// Transform the point cloud
pcl::transformPointCloud (*input_, output, final_transformation_);
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception, Inc.
+ * Copyright (C) 2008 Ben Gurion University of the Negev, Beer Sheva, Israel.
+ *
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_REGISTRATION_IMPL_IA_FPCS_H_
+#define PCL_REGISTRATION_IMPL_IA_FPCS_H_
+
+#include <pcl/registration/ia_fpcs.h>
+#include <pcl/common/time.h>
+#include <pcl/common/distances.h>
+#include <pcl/sample_consensus/sac_model_plane.h>
+#include <pcl/registration/transformation_estimation_3point.h>
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT> inline float
+pcl::getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, float max_dist, int nr_threads)
+{
+ const float max_dist_sqr = max_dist * max_dist;
+ const std::size_t s = cloud.size ();
+
+ pcl::search::KdTree <PointT> tree;
+ tree.setInputCloud (cloud);
+
+ float mean_dist = 0.f;
+ int num = 0;
+ std::vector <int> ids (2);
+ std::vector <float> dists_sqr (2);
+
+#ifdef _OPENMP
+#pragma omp parallel for \
+ reduction (+:mean_dist, num) \
+ private (ids, dists_sqr) shared (tree, cloud) \
+ default (none)num_threads (nr_threads)
+#endif
+
+ for (int i = 0; i < 1000; i++)
+ {
+ tree.nearestKSearch (cloud->points[rand () % s], 2, ids, dists_sqr);
+ if (dists_sqr[1] < max_dist_sqr)
+ {
+ mean_dist += std::sqrt (dists_sqr[1]);
+ num++;
+ }
+ }
+
+ return (mean_dist / num);
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT> inline float
+pcl::getMeanPointDensity (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, const std::vector <int> &indices,
+ float max_dist, int nr_threads)
+{
+ const float max_dist_sqr = max_dist * max_dist;
+ const std::size_t s = indices.size ();
+
+ pcl::search::KdTree <PointT> tree;
+ tree.setInputCloud (cloud);
+
+ float mean_dist = 0.f;
+ int num = 0;
+ std::vector <int> ids (2);
+ std::vector <float> dists_sqr (2);
+
+#ifdef _OPENMP
+#pragma omp parallel for \
+ reduction (+:mean_dist, num) \
+ private (ids, dists_sqr) shared (tree, cloud, indices) \
+ default (none)num_threads (nr_threads)
+#endif
+
+ for (int i = 0; i < 1000; i++)
+ {
+ tree.nearestKSearch (cloud->points[indices[rand () % s]], 2, ids, dists_sqr);
+ if (dists_sqr[1] < max_dist_sqr)
+ {
+ mean_dist += std::sqrt (dists_sqr[1]);
+ num++;
+ }
+ }
+
+ return (mean_dist / num);
+};
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::FPCSInitialAlignment () :
+ source_normals_ (),
+ target_normals_ (),
+ nr_threads_ (1),
+ approx_overlap_ (0.5f),
+ delta_ (1.f),
+ score_threshold_ (FLT_MAX),
+ nr_samples_ (0),
+ max_norm_diff_ (90.f),
+ max_runtime_ (0),
+ fitness_score_ (FLT_MAX),
+ diameter_ (),
+ max_base_diameter_sqr_ (),
+ use_normals_ (false),
+ normalize_delta_ (true),
+ max_pair_diff_ (),
+ max_edge_diff_ (),
+ coincidation_limit_ (),
+ max_mse_ (),
+ max_inlier_dist_sqr_ (),
+ small_error_ (0.00001f)
+{
+ reg_name_ = "pcl::registration::FPCSInitialAlignment";
+ max_iterations_ = 0;
+ ransac_iterations_ = 1000;
+ transformation_estimation_.reset (new pcl::registration::TransformationEstimation3Point <PointSource, PointTarget>);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::computeTransformation (
+ PointCloudSource &output,
+ const Eigen::Matrix4f &guess)
+{
+ if (!initCompute ())
+ return;
+
+ final_transformation_ = guess;
+ bool abort = false;
+ std::vector <MatchingCandidates> all_candidates (max_iterations_);
+ pcl::StopWatch timer;
+
+ #ifdef _OPENMP
+ #pragma omp parallel num_threads (nr_threads_)
+ #endif
+ {
+ #ifdef _OPENMP
+ std::srand (static_cast <unsigned int> (std::time (NULL)) ^ omp_get_thread_num ());
+ #pragma omp for schedule (dynamic)
+ #endif
+ for (int i = 0; i < max_iterations_; i++)
+ {
+
+ #ifdef _OPENMP
+ #pragma omp flush (abort)
+ #endif
+
+ MatchingCandidates candidates (1);
+ std::vector <int> base_indices (4);
+ float ratio[2];
+ all_candidates[i] = candidates;
+
+ if (!abort)
+ {
+ // select four coplanar point base
+ if (selectBase (base_indices, ratio) == 0)
+ {
+ // calculate candidate pair correspondences using diagonal lenghts of base
+ pcl::Correspondences pairs_a, pairs_b;
+ if (bruteForceCorrespondences (base_indices[0], base_indices[1], pairs_a) == 0 &&
+ bruteForceCorrespondences (base_indices[2], base_indices[3], pairs_b) == 0)
+ {
+ // determine candidate matches by combining pair correspondences based on segment distances
+ std::vector <std::vector <int> > matches;
+ if (determineBaseMatches (base_indices, matches, pairs_a, pairs_b, ratio) == 0)
+ {
+ // check and evaluate candidate matches and store them
+ handleMatches (base_indices, matches, candidates);
+ if (candidates.size () != 0)
+ all_candidates[i] = candidates;
+ }
+ }
+ }
+
+ // check terminate early (time or fitness_score threshold reached)
+ abort = (candidates.size () > 0 ? candidates[0].fitness_score < score_threshold_ : abort);
+ abort = (abort ? abort : timer.getTimeSeconds () > max_runtime_);
+
+
+ #ifdef _OPENMP
+ #pragma omp flush (abort)
+ #endif
+ }
+ }
+ }
+
+
+ // determine best match over all trys
+ finalCompute (all_candidates);
+
+ // apply the final transformation
+ pcl::transformPointCloud (*input_, output, final_transformation_);
+
+ deinitCompute ();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> bool
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::initCompute ()
+{
+ std::srand (static_cast <unsigned int> (std::time (NULL)));
+
+ // basic pcl initialization
+ if (!pcl::PCLBase <PointSource>::initCompute ())
+ return (false);
+
+ // check if source and target are given
+ if (!input_ || !target_)
+ {
+ PCL_ERROR ("[%s::initCompute] Source or target dataset not given!\n", reg_name_.c_str ());
+ return (false);
+ }
+
+ if (!target_indices_ || target_indices_->size () == 0)
+ {
+ target_indices_.reset (new std::vector <int> (static_cast <int> (target_->size ())));
+ int index = 0;
+ for (std::vector <int>::iterator it = target_indices_->begin (), it_e = target_indices_->end (); it != it_e; it++)
+ *it = index++;
+ target_cloud_updated_ = true;
+ }
+
+ // if a sample size for the point clouds is given; prefarably no sampling of target cloud
+ if (nr_samples_ != 0)
+ {
+ const int ss = static_cast <int> (indices_->size ());
+ const int sample_fraction_src = std::max (1, static_cast <int> (ss / nr_samples_));
+
+ source_indices_ = pcl::IndicesPtr (new std::vector <int>);
+ for (int i = 0; i < ss; i++)
+ if (rand () % sample_fraction_src == 0)
+ source_indices_->push_back ((*indices_) [i]);
+ }
+ else
+ source_indices_ = indices_;
+
+ // check usage of normals
+ if (source_normals_ && target_normals_ && source_normals_->size () == input_->size () && target_normals_->size () == target_->size ())
+ use_normals_ = true;
+
+ // set up tree structures
+ if (target_cloud_updated_)
+ {
+ tree_->setInputCloud (target_, target_indices_);
+ target_cloud_updated_ = false;
+ }
+
+ // set predefined variables
+ const int min_iterations = 4;
+ const float diameter_fraction = 0.3f;
+
+ // get diameter of input cloud (distance between farthest points)
+ Eigen::Vector4f pt_min, pt_max;
+ pcl::getMinMax3D (*target_, *target_indices_, pt_min, pt_max);
+ diameter_ = (pt_max - pt_min).norm ();
+
+ // derive the limits for the random base selection
+ float max_base_diameter = diameter_* approx_overlap_ * 2.f;
+ max_base_diameter_sqr_ = max_base_diameter * max_base_diameter;
+
+ // normalize the delta
+ if (normalize_delta_)
+ {
+ float mean_dist = getMeanPointDensity <PointTarget> (target_, *target_indices_, 0.05f * diameter_, nr_threads_);
+ delta_ *= mean_dist;
+ }
+
+ // heuristic determination of number of trials to have high probabilty of finding a good solution
+ if (max_iterations_ == 0)
+ {
+ float first_est = std::log (small_error_) / std::log (1.0 - std::pow ((double) approx_overlap_, (double) min_iterations));
+ max_iterations_ = static_cast <int> (first_est / (diameter_fraction * approx_overlap_ * 2.f));
+ }
+
+ // set further parameter
+ if (score_threshold_ == FLT_MAX)
+ score_threshold_ = 1.f - approx_overlap_;
+
+ if (max_iterations_ < 4)
+ max_iterations_ = 4;
+
+ if (max_runtime_ < 1)
+ max_runtime_ = INT_MAX;
+
+ // calculate internal parameters based on the the estimated point density
+ max_pair_diff_ = delta_ * 2.f;
+ max_edge_diff_ = delta_ * 4.f;
+ coincidation_limit_ = delta_ * 2.f; // EDITED: originally std::sqrt (delta_ * 2.f)
+ max_mse_ = powf (delta_* 2.f, 2.f);
+ max_inlier_dist_sqr_ = powf (delta_ * 2.f, 2.f);
+
+ // reset fitness_score
+ fitness_score_ = FLT_MAX;
+
+ return (true);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::selectBase (
+ std::vector <int> &base_indices,
+ float (&ratio)[2])
+{
+ const float too_close_sqr = max_base_diameter_sqr_*0.01;
+
+ Eigen::VectorXf coefficients (4);
+ pcl::SampleConsensusModelPlane <PointTarget> plane (target_);
+ plane.setIndices (target_indices_);
+ Eigen::Vector4f centre_pt;
+ float nearest_to_plane = FLT_MAX;
+
+ // repeat base search until valid quadruple was found or ransac_iterations_ number of trys were unsuccessfull
+ for (int i = 0; i < ransac_iterations_; i++)
+ {
+ // random select an appropriate point triple
+ if (selectBaseTriangle (base_indices) < 0)
+ continue;
+
+ std::vector <int> base_triple (base_indices.begin (), base_indices.end () - 1);
+ plane.computeModelCoefficients (base_triple, coefficients);
+ pcl::compute3DCentroid (*target_, base_triple, centre_pt);
+
+ // loop over all points in source cloud to find most suitable fourth point
+ const PointTarget *pt1 = &(target_->points[base_indices[0]]);
+ const PointTarget *pt2 = &(target_->points[base_indices[1]]);
+ const PointTarget *pt3 = &(target_->points[base_indices[2]]);
+
+ for (std::vector <int>::iterator it = target_indices_->begin (), it_e = target_indices_->end (); it != it_e; it++)
+ {
+ const PointTarget *pt4 = &(target_->points[*it]);
+
+ float d1 = pcl::squaredEuclideanDistance (*pt4, *pt1);
+ float d2 = pcl::squaredEuclideanDistance (*pt4, *pt2);
+ float d3 = pcl::squaredEuclideanDistance (*pt4, *pt3);
+ float d4 = (pt4->getVector3fMap () - centre_pt.head (3)).squaredNorm ();
+
+ // check distance between points w.r.t minimum sampling distance; EDITED -> 4th point now also limited by max base line
+ if (d1 < too_close_sqr || d2 < too_close_sqr || d3 < too_close_sqr || d4 < too_close_sqr ||
+ d1 > max_base_diameter_sqr_ || d2 > max_base_diameter_sqr_ || d3 > max_base_diameter_sqr_)
+ continue;
+
+ // check distance to plane to get point closest to plane
+ float dist_to_plane = pcl::pointToPlaneDistance (*pt4, coefficients);
+ if (dist_to_plane < nearest_to_plane)
+ {
+ base_indices[3] = *it;
+ nearest_to_plane = dist_to_plane;
+ }
+ }
+
+ // check if at least one point fullfilled the conditions
+ if (nearest_to_plane != FLT_MAX)
+ {
+ // order points to build largest quadrangle and calcuate intersection ratios of diagonals
+ setupBase (base_indices, ratio);
+ return (0);
+ }
+ }
+
+ // return unsuccessfull if no quadruple was selected
+ return (-1);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::selectBaseTriangle (std::vector <int> &base_indices)
+{
+ int nr_points = static_cast <int> (target_indices_->size ());
+ float best_t = 0.f;
+
+ // choose random first point
+ base_indices[0] = (*target_indices_)[rand () % nr_points];
+ int *index1 = &base_indices[0];
+
+ // random search for 2 other points (as far away as overlap allows)
+ for (int i = 0; i < ransac_iterations_; i++)
+ {
+ int *index2 = &(*target_indices_)[rand () % nr_points];
+ int *index3 = &(*target_indices_)[rand () % nr_points];
+
+ Eigen::Vector3f u = target_->points[*index2].getVector3fMap () - target_->points[*index1].getVector3fMap ();
+ Eigen::Vector3f v = target_->points[*index3].getVector3fMap () - target_->points[*index1].getVector3fMap ();
+ float t = u.cross (v).squaredNorm (); // triangle area (0.5 * sqrt(t)) should be maximal
+
+ // check for most suitable point triple
+ if (t > best_t && u.squaredNorm () < max_base_diameter_sqr_ && v.squaredNorm () < max_base_diameter_sqr_)
+ {
+ best_t = t;
+ base_indices[1] = *index2;
+ base_indices[2] = *index3;
+ }
+ }
+
+ // return if a triplet could be selected
+ return (best_t == 0.f ? -1 : 0);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::setupBase (
+ std::vector <int> &base_indices,
+ float (&ratio)[2])
+{
+ float best_t = FLT_MAX;
+ const std::vector <int> copy (base_indices.begin (), base_indices.end ());
+ std::vector <int> temp (base_indices.begin (), base_indices.end ());
+
+ // loop over all combinations of base points
+ for (std::vector <int>::const_iterator i = copy.begin (), i_e = copy.end (); i != i_e; i++)
+ for (std::vector <int>::const_iterator j = copy.begin (), j_e = copy.end (); j != j_e; j++)
+ {
+ if (i == j)
+ continue;
+
+ for (std::vector <int>::const_iterator k = copy.begin (), k_e = copy.end (); k != k_e; k++)
+ {
+ if (k == j || k == i)
+ continue;
+
+ std::vector <int>::const_iterator l = copy.begin ();
+ while (l == i || l == j || l == k)
+ l++;
+
+ temp[0] = *i;
+ temp[1] = *j;
+ temp[2] = *k;
+ temp[3] = *l;
+
+ // calculate diagonal intersection ratios and check for suitable segment to segment distances
+ float ratio_temp[2];
+ float t = segmentToSegmentDist (temp, ratio_temp);
+ if (t < best_t)
+ {
+ best_t = t;
+ ratio[0] = ratio_temp[0];
+ ratio[1] = ratio_temp[1];
+ base_indices = temp;
+ }
+ }
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> float
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::segmentToSegmentDist (
+ const std::vector <int> &base_indices,
+ float (&ratio)[2])
+{
+ // get point vectors
+ Eigen::Vector3f u = target_->points[base_indices[1]].getVector3fMap () - target_->points[base_indices[0]].getVector3fMap ();
+ Eigen::Vector3f v = target_->points[base_indices[3]].getVector3fMap () - target_->points[base_indices[2]].getVector3fMap ();
+ Eigen::Vector3f w = target_->points[base_indices[0]].getVector3fMap () - target_->points[base_indices[2]].getVector3fMap ();
+
+ // calculate segment distances
+ float a = u.dot (u);
+ float b = u.dot (v);
+ float c = v.dot (v);
+ float d = u.dot (w);
+ float e = v.dot (w);
+ float D = a * c - b * b;
+ float sN = 0.f, sD = D;
+ float tN = 0.f, tD = D;
+
+ // check segments
+ if (D < small_error_)
+ {
+ sN = 0.f;
+ sD = 1.f;
+ tN = e;
+ tD = c;
+ }
+ else
+ {
+ sN = (b * e - c * d);
+ tN = (a * e - b * d);
+
+ if (sN < 0.f)
+ {
+ sN = 0.f;
+ tN = e;
+ tD = c;
+ }
+ else if (sN > sD)
+ {
+ sN = sD;
+ tN = e + b;
+ tD = c;
+ }
+ }
+
+ if (tN < 0.f)
+ {
+ tN = 0.f;
+
+ if (-d < 0.f)
+ sN = 0.f;
+
+ else if (-d > a)
+ sN = sD;
+
+ else
+ {
+ sN = -d;
+ sD = a;
+ }
+ }
+
+ else if (tN > tD)
+ {
+ tN = tD;
+
+ if ((-d + b) < 0.f)
+ sN = 0.f;
+
+ else if ((-d + b) > a)
+ sN = sD;
+
+ else
+ {
+ sN = (-d + b);
+ sD = a;
+ }
+ }
+
+ // set intersection ratios
+ ratio[0] = (std::abs (sN) < small_error_) ? 0.f : sN / sD;
+ ratio[1] = (std::abs (tN) < small_error_) ? 0.f : tN / tD;
+
+ Eigen::Vector3f x = w + (ratio[0] * u) - (ratio[1] * v);
+ return (x.norm ());
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::bruteForceCorrespondences (
+ int idx1,
+ int idx2,
+ pcl::Correspondences &pairs)
+{
+ const float max_norm_diff = 0.5f * max_norm_diff_ * M_PI / 180.f;
+
+ // calculate reference segment distance and normal angle
+ float ref_dist = pcl::euclideanDistance (target_->points[idx1], target_->points[idx2]);
+ float ref_norm_angle = (use_normals_ ? (target_normals_->points[idx1].getNormalVector3fMap () -
+ target_normals_->points[idx2].getNormalVector3fMap ()).norm () : 0.f);
+
+ // loop over all pairs of points in source point cloud
+ std::vector <int>::iterator it_out = source_indices_->begin (), it_out_e = source_indices_->end () - 1;
+ std::vector <int>::iterator it_in, it_in_e = source_indices_->end ();
+ for ( ; it_out != it_out_e; it_out++)
+ {
+ it_in = it_out + 1;
+ const PointSource *pt1 = &(*input_)[*it_out];
+ for ( ; it_in != it_in_e; it_in++)
+ {
+ const PointSource *pt2 = &(*input_)[*it_in];
+
+ // check point distance compared to reference dist (from base)
+ float dist = pcl::euclideanDistance (*pt1, *pt2);
+ if (std::abs(dist - ref_dist) < max_pair_diff_)
+ {
+ // add here normal evaluation if normals are given
+ if (use_normals_)
+ {
+ const NormalT *pt1_n = &(source_normals_->points[*it_out]);
+ const NormalT *pt2_n = &(source_normals_->points[*it_in]);
+
+ float norm_angle_1 = (pt1_n->getNormalVector3fMap () - pt2_n->getNormalVector3fMap ()).norm ();
+ float norm_angle_2 = (pt1_n->getNormalVector3fMap () + pt2_n->getNormalVector3fMap ()).norm ();
+
+ float norm_diff = std::min <float> (std::abs (norm_angle_1 - ref_norm_angle), std::abs (norm_angle_2 - ref_norm_angle));
+ if (norm_diff > max_norm_diff)
+ continue;
+ }
+
+ pairs.push_back (pcl::Correspondence (*it_in, *it_out, dist));
+ pairs.push_back (pcl::Correspondence (*it_out, *it_in, dist));
+ }
+ }
+ }
+
+ // return success if at least one correspondence was found
+ return (pairs.size () == 0 ? -1 : 0);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::determineBaseMatches (
+ const std::vector <int> &base_indices,
+ std::vector <std::vector <int> > &matches,
+ const pcl::Correspondences &pairs_a,
+ const pcl::Correspondences &pairs_b,
+ const float (&ratio)[2])
+{
+ // calculate edge lengths of base
+ float dist_base[4];
+ dist_base[0] = pcl::euclideanDistance (target_->points[base_indices[0]], target_->points[base_indices[2]]);
+ dist_base[1] = pcl::euclideanDistance (target_->points[base_indices[0]], target_->points[base_indices[3]]);
+ dist_base[2] = pcl::euclideanDistance (target_->points[base_indices[1]], target_->points[base_indices[2]]);
+ dist_base[3] = pcl::euclideanDistance (target_->points[base_indices[1]], target_->points[base_indices[3]]);
+
+ // loop over first point pair correspondences and store intermediate points 'e' in new point cloud
+ PointCloudSourcePtr cloud_e (new PointCloudSource);
+ cloud_e->resize (pairs_a.size () * 2);
+ PointCloudSourceIterator it_pt = cloud_e->begin ();
+ for (pcl::Correspondences::const_iterator it_pair = pairs_a.begin (), it_pair_e = pairs_a.end () ; it_pair != it_pair_e; it_pair++)
+ {
+ const PointSource *pt1 = &(input_->points[it_pair->index_match]);
+ const PointSource *pt2 = &(input_->points[it_pair->index_query]);
+
+ // calculate intermediate points using both ratios from base (r1,r2)
+ for (int i = 0; i < 2; i++, it_pt++)
+ {
+ it_pt->x = pt1->x + ratio[i] * (pt2->x - pt1->x);
+ it_pt->y = pt1->y + ratio[i] * (pt2->y - pt1->y);
+ it_pt->z = pt1->z + ratio[i] * (pt2->z - pt1->z);
+ }
+ }
+
+ // initialize new kd tree of intermediate points from first point pair correspondences
+ KdTreeReciprocalPtr tree_e (new KdTreeReciprocal);
+ tree_e->setInputCloud (cloud_e);
+
+ std::vector <int> ids;
+ std::vector <float> dists_sqr;
+
+ // loop over second point pair correspondences
+ for (pcl::Correspondences::const_iterator it_pair = pairs_b.begin (), it_pair_e = pairs_b.end () ; it_pair != it_pair_e; it_pair++)
+ {
+ const PointTarget *pt1 = &(input_->points[it_pair->index_match]);
+ const PointTarget *pt2 = &(input_->points[it_pair->index_query]);
+
+ // calculate intermediate points using both ratios from base (r1,r2)
+ for (int i = 0; i < 2; i++)
+ {
+ PointTarget pt_e;
+ pt_e.x = pt1->x + ratio[i] * (pt2->x - pt1->x);
+ pt_e.y = pt1->y + ratio[i] * (pt2->y - pt1->y);
+ pt_e.z = pt1->z + ratio[i] * (pt2->z - pt1->z);
+
+ // search for corresponding intermediate points
+ tree_e->radiusSearch (pt_e, coincidation_limit_, ids, dists_sqr);
+ for (std::vector <int>::iterator it = ids.begin (), it_e = ids.end (); it != it_e; it++)
+ {
+ std::vector <int> match_indices (4);
+
+ match_indices[0] = pairs_a[static_cast <int> (std::floor ((float)(*it/2.f)))].index_match;
+ match_indices[1] = pairs_a[static_cast <int> (std::floor ((float)(*it/2.f)))].index_query;
+ match_indices[2] = it_pair->index_match;
+ match_indices[3] = it_pair->index_query;
+
+ // EDITED: added coarse check of match based on edge length (due to rigid-body )
+ if (checkBaseMatch (match_indices, dist_base) < 0)
+ continue;
+
+ matches.push_back (match_indices);
+ }
+ }
+ }
+
+ // return unsuccessfull if no match was found
+ return (matches.size () > 0 ? 0 : -1);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::checkBaseMatch (
+ const std::vector <int> &match_indices,
+ const float (&dist_ref)[4])
+{
+ float d0 = pcl::euclideanDistance (input_->points[match_indices[0]], input_->points[match_indices[2]]);
+ float d1 = pcl::euclideanDistance (input_->points[match_indices[0]], input_->points[match_indices[3]]);
+ float d2 = pcl::euclideanDistance (input_->points[match_indices[1]], input_->points[match_indices[2]]);
+ float d3 = pcl::euclideanDistance (input_->points[match_indices[1]], input_->points[match_indices[3]]);
+
+ // check edge distances of match w.r.t the base
+ return (std::abs (d0 - dist_ref[0]) < max_edge_diff_ && std::abs (d1 - dist_ref[1]) < max_edge_diff_ &&
+ std::abs (d2 - dist_ref[2]) < max_edge_diff_ && std::abs (d3 - dist_ref[3]) < max_edge_diff_) ? 0 : -1;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::handleMatches (
+ const std::vector <int> &base_indices,
+ std::vector <std::vector <int> > &matches,
+ MatchingCandidates &candidates)
+{
+ candidates.resize (1);
+ float fitness_score = FLT_MAX;
+
+ // loop over all Candidate matches
+ for (std::vector <std::vector <int> >::iterator match_indices = matches.begin (), it_e = matches.end (); match_indices != it_e; match_indices++)
+ {
+ Eigen::Matrix4f transformation_temp;
+ pcl::Correspondences correspondences_temp;
+
+ // determine corresondences between base and match according to their distance to centroid
+ linkMatchWithBase (base_indices, *match_indices, correspondences_temp);
+
+ // check match based on residuals of the corresponding points after
+ if (validateMatch (base_indices, *match_indices, correspondences_temp, transformation_temp) < 0)
+ continue;
+
+ // check resulting using a sub sample of the source point cloud and compare to previous matches
+ if (validateTransformation (transformation_temp, fitness_score) < 0)
+ continue;
+
+ // store best match as well as associated fitness_score and transformation
+ candidates[0].fitness_score = fitness_score;
+ candidates [0].transformation = transformation_temp;
+ correspondences_temp.erase (correspondences_temp.end () - 1);
+ candidates[0].correspondences = correspondences_temp;
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::linkMatchWithBase (
+ const std::vector <int> &base_indices,
+ std::vector <int> &match_indices,
+ pcl::Correspondences &correspondences)
+{
+ // calculate centroid of base and target
+ Eigen::Vector4f centre_base, centre_match;
+ pcl::compute3DCentroid (*target_, base_indices, centre_base);
+ pcl::compute3DCentroid (*input_, match_indices, centre_match);
+
+ PointTarget centre_pt_base;
+ centre_pt_base.x = centre_base[0];
+ centre_pt_base.y = centre_base[1];
+ centre_pt_base.z = centre_base[2];
+
+ PointSource centre_pt_match;
+ centre_pt_match.x = centre_match[0];
+ centre_pt_match.y = centre_match[1];
+ centre_pt_match.z = centre_match[2];
+
+ // find corresponding points according to their distance to the centroid
+ std::vector <int> copy = match_indices;
+
+ std::vector <int>::const_iterator it_base = base_indices.begin (), it_base_e = base_indices.end ();
+ std::vector <int>::iterator it_match, it_match_e = copy.end ();
+ std::vector <int>::iterator it_match_orig = match_indices.begin ();
+ for (; it_base != it_base_e; it_base++, it_match_orig++)
+ {
+ float dist_sqr_1 = pcl::squaredEuclideanDistance (target_->points[*it_base], centre_pt_base);
+ float best_diff_sqr = FLT_MAX;
+ int best_index;
+
+ for (it_match = copy.begin (); it_match != it_match_e; it_match++)
+ {
+ // calculate difference of distances to centre point
+ float dist_sqr_2 = pcl::squaredEuclideanDistance (input_->points[*it_match], centre_pt_match);
+ float diff_sqr = std::abs(dist_sqr_1 - dist_sqr_2);
+
+ if (diff_sqr < best_diff_sqr)
+ {
+ best_diff_sqr = diff_sqr;
+ best_index = *it_match;
+ }
+ }
+
+ // assign new correspondence and update indices of matched targets
+ correspondences.push_back (pcl::Correspondence (best_index, *it_base, best_diff_sqr));
+ *it_match_orig = best_index;
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateMatch (
+ const std::vector <int> &base_indices,
+ const std::vector <int> &match_indices,
+ const pcl::Correspondences &correspondences,
+ Eigen::Matrix4f &transformation)
+{
+ // only use triplet of points to simlify process (possible due to planar case)
+ pcl::Correspondences correspondences_temp = correspondences;
+ correspondences_temp.erase (correspondences_temp.end () - 1);
+
+ // estimate transformation between correspondence set
+ transformation_estimation_->estimateRigidTransformation (*input_, *target_, correspondences_temp, transformation);
+
+ // transform base points
+ PointCloudSource match_transformed;
+ pcl::transformPointCloud (*input_, match_indices, match_transformed, transformation);
+
+ // calculate residuals of transformation and check against maximum threshold
+ std::size_t nr_points = correspondences_temp.size ();
+ float mse = 0.f;
+ for (std::size_t i = 0; i < nr_points; i++)
+ mse += pcl::squaredEuclideanDistance (match_transformed.points [i], target_->points [base_indices[i]]);
+
+ mse /= nr_points;
+ return (mse < max_mse_ ? 0 : -1);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateTransformation (
+ Eigen::Matrix4f &transformation,
+ float &fitness_score)
+{
+ // transform source point cloud
+ PointCloudSource source_transformed;
+ pcl::transformPointCloud (*input_, *source_indices_, source_transformed, transformation);
+
+ std::size_t nr_points = source_transformed.size ();
+ std::size_t terminate_value = fitness_score > 1 ? 0 : static_cast <std::size_t> ((1.f - fitness_score) * nr_points);
+
+ float inlier_score_temp = 0;
+ std::vector <int> ids;
+ std::vector <float> dists_sqr;
+ PointCloudSourceIterator it = source_transformed.begin ();
+
+ for (std::size_t i = 0; i < nr_points; it++, i++)
+ {
+ // search for nearest point using kd tree search
+ tree_->nearestKSearch (*it, 1, ids, dists_sqr);
+ inlier_score_temp += (dists_sqr[0] < max_inlier_dist_sqr_ ? 1 : 0);
+
+ // early terminating
+ if (nr_points - i + inlier_score_temp < terminate_value)
+ break;
+ }
+
+ // check current costs and return unsuccessfull if larger than previous ones
+ inlier_score_temp /= static_cast <float> (nr_points);
+ float fitness_score_temp = 1.f - inlier_score_temp;
+
+ if (fitness_score_temp > fitness_score)
+ return (-1);
+
+ fitness_score = fitness_score_temp;
+ return (0);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::finalCompute (
+ const std::vector <MatchingCandidates > &candidates)
+{
+ // get best fitness_score over all trys
+ int nr_candidates = static_cast <int> (candidates.size ());
+ int best_index = -1;
+ float best_score = FLT_MAX;
+ for (int i = 0; i < nr_candidates; i++)
+ {
+ const float &fitness_score = candidates [i][0].fitness_score;
+ if (fitness_score < best_score)
+ {
+ best_score = fitness_score;
+ best_index = i;
+ }
+ }
+
+ // check if a valid candidate was available
+ if (!(best_index < 0))
+ {
+ fitness_score_ = candidates [best_index][0].fitness_score;
+ final_transformation_ = candidates [best_index][0].transformation;
+ *correspondences_ = candidates [best_index][0].correspondences;
+
+ // here we define convergence if resulting fitness_score is below 1-threshold
+ converged_ = fitness_score_ < score_threshold_;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // PCL_REGISTRATION_IMPL_IA_4PCS_H_
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception, Inc.
+ *
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_REGISTRATION_IMPL_IA_KFPCS_H_
+#define PCL_REGISTRATION_IMPL_IA_KFPCS_H_
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar>
+pcl::registration::KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::KFPCSInitialAlignment () :
+ lower_trl_boundary_ (-1.f),
+ upper_trl_boundary_ (-1.f),
+ lambda_ (0.5f),
+ candidates_ (),
+ use_trl_score_ (false),
+ indices_validation_ (new std::vector <int>)
+{
+ reg_name_ = "pcl::registration::KFPCSInitialAlignment";
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> bool
+pcl::registration::KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::initCompute ()
+{
+ // due to sparse keypoint cloud, do not normalize delta with estimated point density
+ if (normalize_delta_)
+ {
+ PCL_WARN ("[%s::initCompute] Delta should be set according to keypoint precision! Normalization according to point cloud density is ignored.", reg_name_.c_str ());
+ normalize_delta_ = false;
+ }
+
+ // initialize as in fpcs
+ pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::initCompute ();
+
+ // set the threshold values with respect to keypoint charactersitics
+ max_pair_diff_ = delta_ * 1.414f; // diff between 2 points of delta_ accuracy
+ coincidation_limit_ = delta_ * 2.828f; // diff between diff of 2 points
+ max_edge_diff_ = delta_ * 3.f; // diff between 2 points + some inaccuracy due to quadruple orientation
+ max_mse_ = powf (delta_ * 4.f, 2.f); // diff between 2 points + some registration inaccuracy
+ max_inlier_dist_sqr_ = powf (delta_ * 8.f, 2.f); // set rel. high, because MSAC is used (residual based score function)
+
+ // check use of translation costs and calculate upper boundary if not set by user
+ if (upper_trl_boundary_ < 0)
+ upper_trl_boundary_ = diameter_ * (1.f - approx_overlap_) * 0.5f;
+
+ if (!(lower_trl_boundary_ < 0) && upper_trl_boundary_ > lower_trl_boundary_)
+ use_trl_score_ = true;
+ else
+ lambda_ = 0.f;
+
+ // generate a subset of indices of size ransac_iterations_ on which to evaluate candidates on
+ std::size_t nr_indices = indices_->size ();
+ if (nr_indices < ransac_iterations_)
+ indices_validation_ = indices_;
+ else
+ for (int i = 0; i < ransac_iterations_; i++)
+ indices_validation_->push_back ((*indices_)[rand () % nr_indices]);
+
+ return (true);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::handleMatches (
+ const std::vector <int> &base_indices,
+ std::vector <std::vector <int> > &matches,
+ MatchingCandidates &candidates)
+{
+ candidates.clear ();
+ float fitness_score = FLT_MAX;
+
+ // loop over all Candidate matches
+ for (std::vector <std::vector <int> >::iterator match_indices = matches.begin (), it_e = matches.end (); match_indices != it_e; match_indices++)
+ {
+ Eigen::Matrix4f transformation_temp;
+ pcl::Correspondences correspondences_temp;
+ fitness_score = FLT_MAX; // reset to FLT_MAX to accept all candidates and not only best
+
+ // determine corresondences between base and match according to their distance to centroid
+ linkMatchWithBase (base_indices, *match_indices, correspondences_temp);
+
+ // check match based on residuals of the corresponding points after transformation
+ if (validateMatch (base_indices, *match_indices, correspondences_temp, transformation_temp) < 0)
+ continue;
+
+ // check resulting transformation using a sub sample of the source point cloud
+ // all candidates are stored and later sorted according to their fitness score
+ validateTransformation (transformation_temp, fitness_score);
+
+ // store all valid match as well as associated score and transformation
+ candidates.push_back (MatchingCandidate (fitness_score, correspondences_temp, transformation_temp));
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
+pcl::registration::KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::validateTransformation (
+ Eigen::Matrix4f &transformation,
+ float &fitness_score)
+{
+ // transform sub sampled source cloud
+ PointCloudSource source_transformed;
+ pcl::transformPointCloud (*input_, *indices_validation_, source_transformed, transformation);
+
+ const std::size_t nr_points = source_transformed.size ();
+ float score_a = 0.f, score_b = 0.f;
+
+ // residual costs based on mse
+ std::vector <int> ids;
+ std::vector <float> dists_sqr;
+ for (PointCloudSourceIterator it = source_transformed.begin (), it_e = source_transformed.end (); it != it_e; ++it)
+ {
+ // search for nearest point using kd tree search
+ tree_->nearestKSearch (*it, 1, ids, dists_sqr);
+ score_a += (dists_sqr[0] < max_inlier_dist_sqr_ ? dists_sqr[0] : max_inlier_dist_sqr_); // MSAC
+ }
+
+ score_a /= (max_inlier_dist_sqr_ * nr_points); // MSAC
+ //score_a = 1.f - (1.f - score_a) / (1.f - approx_overlap_); // make score relative to estimated overlap
+
+ // translation score (solutions with small translation are down-voted)
+ float scale = 1.f;
+ if (use_trl_score_)
+ {
+ float trl = transformation.rightCols <1> ().head (3).norm ();
+ float trl_ratio = (trl - lower_trl_boundary_) / (upper_trl_boundary_ - lower_trl_boundary_);
+
+ score_b = (trl_ratio < 0.f ? 1.f : (trl_ratio > 1.f ? 0.f : 0.5f * sin (M_PI * trl_ratio + M_PI_2) + 0.5f)); // sinusoidal costs
+ scale += lambda_;
+ }
+
+ // calculate the fitness and return unsuccessfull if smaller than previous ones
+ float fitness_score_temp = (score_a + lambda_ * score_b) / scale;
+ if (fitness_score_temp > fitness_score)
+ return (-1);
+
+ fitness_score = fitness_score_temp;
+ return (0);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::finalCompute (
+ const std::vector <MatchingCandidates > &candidates)
+{
+ // reorganize candidates into single vector
+ size_t total_size = 0;
+ std::vector <MatchingCandidates>::const_iterator it, it_e = candidates.end ();
+ for (it = candidates.begin (); it != it_e; it++)
+ total_size += it->size ();
+
+ candidates_.clear ();
+ candidates_.reserve (total_size);
+
+ MatchingCandidates::const_iterator it_curr, it_curr_e;
+ for (it = candidates.begin (); it != it_e; it++)
+ for (it_curr = it->begin (), it_curr_e = it->end (); it_curr != it_curr_e; it_curr++)
+ candidates_.push_back (*it_curr);
+
+ // sort acoording to score value
+ std::sort (candidates_.begin (), candidates_.end (), by_score ());
+
+ // return here if no score was valid, i.e. all scores are FLT_MAX
+ if (candidates_[0].fitness_score == FLT_MAX)
+ {
+ converged_ = false;
+ return;
+ }
+
+ // save best candidate as output result
+ // note, all other candidates are accessible via getNBestCandidates () and getTBestCandidates ()
+ fitness_score_ = candidates_ [0].fitness_score;
+ final_transformation_ = candidates_ [0].transformation;
+ *correspondences_ = candidates_ [0].correspondences;
+
+ // here we define convergence if resulting score is above threshold
+ converged_ = fitness_score_ < score_threshold_;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::getNBestCandidates (
+ int n,
+ float min_angle3d,
+ float min_translation3d,
+ MatchingCandidates &candidates)
+{
+ candidates.clear ();
+
+ // loop over all candidates starting from the best one
+ for (MatchingCandidates::iterator it_candidate = candidates_.begin (), it_e = candidates_.end (); it_candidate != it_e; it_candidate++)
+ {
+ // stop if current candidate has no valid score
+ if (it_candidate->fitness_score == FLT_MAX)
+ return;
+
+ // check if current candidate is a unique one compared to previous using the min_diff threshold
+ bool unique = true;
+ MatchingCandidates::iterator it = candidates.begin (), it_e2 = candidates.end ();
+ while (unique && it != it_e2)
+ {
+ Eigen::Matrix4f diff = it_candidate->transformation.colPivHouseholderQr ().solve (it->transformation);
+ const float angle3d = Eigen::AngleAxisf (diff.block <3, 3> (0, 0)).angle ();
+ const float translation3d = diff.block <3, 1> (0, 3).norm ();
+ unique = angle3d > min_angle3d && translation3d > min_translation3d;
+ it++;
+ }
+
+ // add candidate to best candidates
+ if (unique)
+ candidates.push_back (*it_candidate);
+
+ // stop if n candidates are reached
+ if (candidates.size () == n)
+ return;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> void
+pcl::registration::KFPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::getTBestCandidates (
+ float t,
+ float min_angle3d,
+ float min_translation3d,
+ MatchingCandidates &candidates)
+{
+ candidates.clear ();
+
+ // loop over all candidates starting from the best one
+ for (MatchingCandidates::iterator it_candidate = candidates_.begin (), it_e = candidates_.end (); it_candidate != it_e; it_candidate++)
+ {
+ // stop if current candidate has score below threshold
+ if (it_candidate->fitness_score > t)
+ return;
+
+ // check if current candidate is a unique one compared to previous using the min_diff threshold
+ bool unique = true;
+ MatchingCandidates::iterator it = candidates.begin (), it_e2 = candidates.end ();
+ while (unique && it != it_e2)
+ {
+ Eigen::Matrix4f diff = it_candidate->transformation.colPivHouseholderQr ().solve (it->transformation);
+ const float angle3d = Eigen::AngleAxisf (diff.block <3, 3> (0, 0)).angle ();
+ const float translation3d = diff.block <3, 1> (0, 3).norm ();
+ unique = angle3d > min_angle3d && translation3d > min_translation3d;
+ it++;
+ }
+
+ // add candidate to best candidates
+ if (unique)
+ candidates.push_back (*it_candidate);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // PCL_REGISTRATION_IMPL_IA_KFPCS_H_
template<typename PointT> struct NumTraits<pcl::ndt2d::NormalDist<PointT> >
{
typedef double Real;
+ typedef double Literal;
static Real dummy_precision () { return 1.0; }
enum {
IsComplex = 0,
{
PointCloudSource intm_cloud = output;
+ nr_iterations_ = 0;
+ converged_ = false;
+
if (guess != Eigen::Matrix4f::Identity ())
{
transformation_ = guess;
float self_similarity_a = static_cast<float> (pyramid_a->nr_features),
self_similarity_b = static_cast<float> (pyramid_b->nr_features);
PCL_DEBUG ("[pcl::PyramidFeatureMatching::comparePyramidFeatureHistograms] Self similarity measures: %f, %f\n", self_similarity_a, self_similarity_b);
- match_count /= sqrtf (self_similarity_a * self_similarity_b);
+ match_count /= std::sqrt (self_similarity_a * self_similarity_b);
return match_count;
}
float aux = range_it->first - range_it->second;
D += aux * aux;
}
- D = sqrtf (D);
+ D = std::sqrt (D);
nr_levels = static_cast<size_t> (ceilf (Log2 (D)));
PCL_DEBUG ("[pcl::PyramidFeatureHistogram::initializeHistogram] Pyramid will have %u levels with a hyper-parallelepiped diagonal size of %f\n", nr_levels, D);
for (size_t dim_i = 0; dim_i < nr_dimensions; ++dim_i)
{
bins_per_dimension[dim_i] =
- static_cast<size_t> (ceilf ((dimension_range_target_[dim_i].second - dimension_range_target_[dim_i].first) / (powf (2.0f, static_cast<float> (level_i)) * sqrtf (static_cast<float> (nr_dimensions)))));
- bin_step[dim_i] = powf (2.0f, static_cast<float> (level_i)) * sqrtf (static_cast<float> (nr_dimensions));
+ static_cast<size_t> (ceilf ((dimension_range_target_[dim_i].second - dimension_range_target_[dim_i].first) / (powf (2.0f, static_cast<float> (level_i)) * std::sqrt (static_cast<float> (nr_dimensions)))));
+ bin_step[dim_i] = powf (2.0f, static_cast<float> (level_i)) * std::sqrt (static_cast<float> (nr_dimensions));
}
hist_levels[level_i] = PyramidFeatureHistogramLevel (bins_per_dimension, bin_step);
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Open Perception, Inc.
+ * Copyright (C) 2008 Ben Gurion University of the Negev, Beer Sheva, Israel.
+ *
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_REGISTRATION_MATCHING_CANDIDATE_H_
+#define PCL_REGISTRATION_MATCHING_CANDIDATE_H_
+
+#include <pcl/registration/registration.h>
+#include <pcl/common/common.h>
+
+namespace pcl
+{
+ namespace registration
+ {
+ /** \brief Container for matching candidate consisting of
+ *
+ * * fitness score value as a result of the matching algorithm
+ * * correspondences between source and target data set
+ * * transformation matrix calculated based on the correspondences
+ *
+ */
+ struct MatchingCandidate
+ {
+ /** \brief Constructor. */
+ MatchingCandidate () :
+ fitness_score (FLT_MAX),
+ correspondences (),
+ transformation (Eigen::Matrix4f::Identity ())
+ {};
+
+ /** \brief Value constructor. */
+ MatchingCandidate (float s, const pcl::Correspondences &c, const Eigen::Matrix4f &m) :
+ fitness_score (s),
+ correspondences (c),
+ transformation (m)
+ {};
+
+ /** \brief Destructor. */
+ ~MatchingCandidate ()
+ {};
+
+
+ /** \brief Fitness score of current candidate resulting from matching algorithm. */
+ float fitness_score;
+
+ /** \brief Correspondences between source <-> target. */
+ pcl::Correspondences correspondences;
+
+ /** \brief Corresponding transformation matrix retrieved using \a corrs. */
+ Eigen::Matrix4f transformation;
+
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ };
+
+ typedef std::vector<MatchingCandidate, Eigen::aligned_allocator<MatchingCandidate> > MatchingCandidates;
+
+ /** \brief Sorting of candidates based on fitness score value. */
+ struct by_score
+ {
+ /** \brief Operator used to sort candidates based on fitness score. */
+ bool operator () (MatchingCandidate const &left, MatchingCandidate const &right)
+ {
+ return (left.fitness_score < right.fitness_score);
+ }
+ };
+
+ }; // namespace registration
+}; // namespace pcl
+
+
+#endif // PCL_REGISTRATION_MATCHING_CANDIDATE_H_
for (size_t i = 0; i < indices_->size (); ++i)
// Calculate the distance from the point to the circle as the difference between
// dist(point,circle_origin) and circle_radius
- distances[i] = fabsf (sqrtf (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ distances[i] = fabsf (std::sqrt (
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
- ) - model_coefficients[2]);
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
+ ) - model_coefficients[2]);
}
//////////////////////////////////////////////////////////////////////////
{
// Calculate the distance from the point to the sphere as the difference between
// dist(point,sphere_origin) and sphere_radius
- float distance = fabsf (sqrtf (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ float distance = fabsf (std::sqrt (
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
- ) - model_coefficients[2]);
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
+ ) - model_coefficients[2]);
if (distance < threshold)
{
// Returns the indices of the points whose distances are smaller than the threshold
{
// Calculate the distance from the point to the sphere as the difference between
// dist(point,sphere_origin) and sphere_radius
- float distance = fabsf (sqrtf (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ float distance = fabsf (std::sqrt (
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
- ) - model_coefficients[2]);
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
+ ) - model_coefficients[2]);
if (distance < threshold)
nr_p++;
}
{
float dx = input_->points[inliers[i]].x - model_coefficients[0];
float dy = input_->points[inliers[i]].y - model_coefficients[1];
- float a = sqrtf ( (model_coefficients[2] * model_coefficients[2]) / (dx * dx + dy * dy) );
+ float a = std::sqrt ( (model_coefficients[2] * model_coefficients[2]) / (dx * dx + dy * dy) );
projected_points.points[inliers[i]].x = a * dx + model_coefficients[0];
projected_points.points[inliers[i]].y = a * dy + model_coefficients[1];
{
float dx = input_->points[inliers[i]].x - model_coefficients[0];
float dy = input_->points[inliers[i]].y - model_coefficients[1];
- float a = sqrtf ( (model_coefficients[2] * model_coefficients[2]) / (dx * dx + dy * dy) );
+ float a = std::sqrt ( (model_coefficients[2] * model_coefficients[2]) / (dx * dx + dy * dy) );
projected_points.points[i].x = a * dx + model_coefficients[0];
projected_points.points[i].y = a * dy + model_coefficients[1];
for (std::set<int>::const_iterator it = indices.begin (); it != indices.end (); ++it)
// Calculate the distance from the point to the sphere as the difference between
//dist(point,sphere_origin) and sphere_radius
- if (fabsf (sqrtf (
- ( input_->points[*it].x - model_coefficients[0] ) *
- ( input_->points[*it].x - model_coefficients[0] ) +
- ( input_->points[*it].y - model_coefficients[1] ) *
- ( input_->points[*it].y - model_coefficients[1] )
- ) - model_coefficients[2]) > threshold)
+ if (fabsf (std::sqrt (
+ ( input_->points[*it].x - model_coefficients[0] ) *
+ ( input_->points[*it].x - model_coefficients[0] ) +
+ ( input_->points[*it].y - model_coefficients[1] ) *
+ ( input_->points[*it].y - model_coefficients[1] )
+ ) - model_coefficients[2]) > threshold)
return (false);
return (true);
const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
{
// Need 3 samples
- if (samples.size () != 3)
+ if (samples.size () != sample_size_)
{
PCL_ERROR ("[pcl::SampleConsensusModelPlane::computeModelCoefficients] Invalid set of samples given (%lu)!\n", samples.size ());
return (false);
const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
{
// Needs a valid set of model coefficients
- if (model_coefficients.size () != 4)
+ if (model_coefficients.size () != model_size_)
{
PCL_ERROR ("[pcl::SampleConsensusModelPlane::getDistancesToModel] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ());
return;
const Eigen::VectorXf &model_coefficients, const double threshold, std::vector<int> &inliers)
{
// Needs a valid set of model coefficients
- if (model_coefficients.size () != 4)
+ if (model_coefficients.size () != model_size_)
{
PCL_ERROR ("[pcl::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ());
return;
const Eigen::VectorXf &model_coefficients, const double threshold)
{
// Needs a valid set of model coefficients
- if (model_coefficients.size () != 4)
+ if (model_coefficients.size () != model_size_)
{
PCL_ERROR ("[pcl::SampleConsensusModelPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ());
return (0);
const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
{
// Needs a valid set of model coefficients
- if (model_coefficients.size () != 4)
+ if (model_coefficients.size () != model_size_)
{
PCL_ERROR ("[pcl::SampleConsensusModelPlane::optimizeModelCoefficients] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ());
optimized_coefficients = model_coefficients;
return;
}
- // Need at least 3 points to estimate a plane
- if (inliers.size () < 4)
+ // Need more than the minimum sample size to make a difference
+ if (inliers.size () <= sample_size_)
{
- PCL_ERROR ("[pcl::SampleConsensusModelPlane::optimizeModelCoefficients] Not enough inliers found to support a model (%lu)! Returning the same coefficients.\n", inliers.size ());
+ PCL_ERROR ("[pcl::SampleConsensusModelPlane::optimizeModelCoefficients] Not enough inliers found to optimize model coefficients (%lu)! Returning the same coefficients.\n", inliers.size ());
optimized_coefficients = model_coefficients;
return;
}
const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields)
{
// Needs a valid set of model coefficients
- if (model_coefficients.size () != 4)
+ if (model_coefficients.size () != model_size_)
{
PCL_ERROR ("[pcl::SampleConsensusModelPlane::projectPoints] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ());
return;
const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
{
// Needs a valid set of model coefficients
- if (model_coefficients.size () != 4)
+ if (model_coefficients.size () != model_size_)
{
PCL_ERROR ("[pcl::SampleConsensusModelPlane::doSamplesVerifyModel] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ());
return (false);
model_coefficients[1] = 0.5f * m13 / m11;
model_coefficients[2] = 0.5f * m14 / m11;
// Radius
- model_coefficients[3] = sqrtf (
- model_coefficients[0] * model_coefficients[0] +
- model_coefficients[1] * model_coefficients[1] +
- model_coefficients[2] * model_coefficients[2] - m15 / m11);
+ model_coefficients[3] = std::sqrt (model_coefficients[0] * model_coefficients[0] +
+ model_coefficients[1] * model_coefficients[1] +
+ model_coefficients[2] * model_coefficients[2] - m15 / m11);
return (true);
}
for (size_t i = 0; i < indices_->size (); ++i)
// Calculate the distance from the point to the sphere as the difference between
//dist(point,sphere_origin) and sphere_radius
- distances[i] = fabs (sqrtf (
+ distances[i] = fabs (std::sqrt (
( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
// Iterate through the 3d points and calculate the distances from them to the sphere
for (size_t i = 0; i < indices_->size (); ++i)
{
- double distance = fabs (sqrtf (
+ double distance = fabs (std::sqrt (
( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
{
// Calculate the distance from the point to the sphere as the difference between
// dist(point,sphere_origin) and sphere_radius
- if (fabs (sqrtf (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ if (fabs (std::sqrt (
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) +
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) +
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] ) *
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] )
- ) - model_coefficients[3]) < threshold)
+ ( input_->points[(*indices_)[i]].z - model_coefficients[2] ) *
+ ( input_->points[(*indices_)[i]].z - model_coefficients[2] )
+ ) - model_coefficients[3]) < threshold)
nr_p++;
}
return (nr_p);
float yt = model_->input_->points[(*model_->tmp_inliers_)[i]].y - x[1];
// g = sqrt ((x-a)^2 + (y-b)^2) - R
- fvec[i] = sqrtf (xt * xt + yt * yt) - x[2];
+ fvec[i] = std::sqrt (xt * xt + yt * yt) - x[2];
}
return (0);
}
cen_t[2] = model_->input_->points[(*model_->tmp_inliers_)[i]].z - x[2];
// g = sqrt ((x-a)^2 + (y-b)^2 + (z-c)^2) - R
- fvec[i] = sqrtf (cen_t.dot (cen_t)) - x[3];
+ fvec[i] = std::sqrt (cen_t.dot (cen_t)) - x[3];
}
return (0);
}
// 2 * tan (85 degree) ~ 22.86
float min_f = 0.043744332f * static_cast<float>(input_->width);
//std::cout << "isValid: " << determinant3x3Matrix<Eigen::Matrix3f> (KR_ / sqrt (KR_KRT_.coeff (8))) << " >= " << (min_f * min_f) << std::endl;
- return (determinant3x3Matrix<Eigen::Matrix3f> (KR_ / sqrtf (KR_KRT_.coeff (8))) >= (min_f * min_f));
+ return (determinant3x3Matrix<Eigen::Matrix3f> (KR_ / std::sqrt (KR_KRT_.coeff (8))) >= (min_f * min_f));
}
/** \brief Compute the camera matrix
float dx = input_->points[idx1].x - input_->points[idx2].x;
float dy = input_->points[idx1].y - input_->points[idx2].y;
float dz = input_->points[idx1].z - input_->points[idx2].z;
- float dist = sqrtf (dx*dx + dy*dy + dz*dz);
+ float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
bool normal_ok = (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ );
bool dist_ok = (dist < euclidean_dist_threshold);
float dx = input_->points[idx1].x - input_->points[idx2].x;
float dy = input_->points[idx1].y - input_->points[idx2].y;
float dz = input_->points[idx1].z - input_->points[idx2].z;
- float dist = sqrtf (dx*dx + dy*dy + dz*dz);
+ float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
return (dist < dist_threshold);
}
float dx = input_->points[idx1].x - input_->points[idx2].x;
float dy = input_->points[idx1].y - input_->points[idx2].y;
float dz = input_->points[idx1].z - input_->points[idx2].z;
- float dist = sqrtf (dx*dx + dy*dy + dz*dz);
+ float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
return ( (dist < distance_threshold_)
&& (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ ) );
#define PCL_SEGMENTATION_IMPL_LCCP_SEGMENTATION_HPP_
#include <pcl/segmentation/lccp_segmentation.h>
+#include <pcl/common/common.h>
//////////////////////////////////////////////////////////
x = curr_x + directions [nIdx].d_x;
y = curr_y + directions [nIdx].d_y;
index = curr_idx + directions [nIdx].d_index;
- if (x >= 0 && y < int(labels->width) && y >= 0 && y < int(labels->height) && labels->points[index].label == label)
+ if (x >= 0 && x < int(labels->width) && y >= 0 && y < int(labels->height) && labels->points[index].label == label)
break;
}
{
//test1 = true;
labels->points[current_row+colIdx+1].label = current_label;
- label_indices[label_to_model[current_label]].indices.push_back (current_row+colIdx+1);
+ label_indices[current_label].indices.push_back (current_row+colIdx+1);
inlier_indices[label_to_model[current_label]].indices.push_back (current_row+colIdx+1);
}
if (refinement_compare_->compare (current_row+colIdx, next_row+colIdx))
{
labels->points[next_row+colIdx].label = current_label;
- label_indices[label_to_model[current_label]].indices.push_back (next_row+colIdx);
+ label_indices[current_label].indices.push_back (next_row+colIdx);
inlier_indices[label_to_model[current_label]].indices.push_back (next_row+colIdx);
}
if (refinement_compare_->compare (current_row+colIdx, current_row+colIdx-1))
{
labels->points[current_row+colIdx-1].label = current_label;
- label_indices[label_to_model[current_label]].indices.push_back (current_row+colIdx-1);
+ label_indices[current_label].indices.push_back (current_row+colIdx-1);
inlier_indices[label_to_model[current_label]].indices.push_back (current_row+colIdx-1);
}
if (refinement_compare_->compare (current_row+colIdx, prev_row+colIdx))
{
labels->points[prev_row+colIdx].label = current_label;
- label_indices[label_to_model[current_label]].indices.push_back (prev_row+colIdx);
+ label_indices[current_label].indices.push_back (prev_row+colIdx);
inlier_indices[label_to_model[current_label]].indices.push_back (prev_row+colIdx);
}
}//col
if (point_labels_[index] == -1)
{
seed = index;
+ seed_counter = i_seed;
break;
}
}
if ( !pcl::isFinite<PointT> (*input_itr))
continue;
//Otherwise look up its leaf container
- LeafContainerT* leaf = adjacency_octree_->getLeafContainerAtPoint (*input_itr);
-
- //Get the voxel data object
- VoxelData& voxel_data = leaf->getData ();
- //Add this normal in (we will normalize at the end)
- voxel_data.normal_ += normal_itr->getNormalVector4fMap ();
- voxel_data.curvature_ += normal_itr->curvature;
+ LeafContainerT* leaf = adjacency_octree_->getLeafContainerAtPoint (*input_itr);
+
+ //Get the voxel data object
+ VoxelData& voxel_data = leaf->getData ();
+ //Add this normal in (we will normalize at the end)
+ voxel_data.normal_ += normal_itr->getNormalVector4fMap ();
+ voxel_data.curvature_ += normal_itr->curvature;
}
//Now iterate through the leaves and normalize
for (leaf_itr = adjacency_octree_->begin (); leaf_itr != adjacency_octree_->end (); ++leaf_itr)
std::vector<float> sqr_distances;
seed_indices.reserve (seed_indices_orig.size ());
float search_radius = 0.5f*seed_resolution_;
- // This is number of voxels which fit in a planar slice through search volume
- // Area of planar slice / area of voxel side
+ // This is 1/20th of the number of voxels which fit in a planar slice through search volume
+ // Area of planar slice / area of voxel side. (Note: This is smaller than the value mentioned in the original paper)
float min_points = 0.05f * (search_radius)*(search_radius) * 3.1415926536f / (resolution_*resolution_);
for (size_t i = 0; i < seed_indices_orig.size (); ++i)
{
/** \brief Get map<Supervoxel_ID, Segment_ID>
* \param[out] supervoxel_segment_map_arg The output container. On error the map is empty. */
inline void
- getSupervoxelToSegmentMap (std::map<uint32_t, uint32_t> supervoxel_segment_map_arg) const
+ getSupervoxelToSegmentMap (std::map<uint32_t, uint32_t>& supervoxel_segment_map_arg) const
{
if (grouping_data_valid_)
{
float dx = input_->points[idx1].x - input_->points[idx2].x;
float dy = input_->points[idx1].y - input_->points[idx2].y;
float dz = input_->points[idx1].z - input_->points[idx2].z;
- float dist = sqrtf (dx*dx + dy*dy + dz*dz);
+ float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
int dr = input_->points[idx1].r - input_->points[idx2].r;
int dg = input_->points[idx1].g - input_->points[idx2].g;
int db = input_->points[idx1].b - input_->points[idx2].b;
#include <pcl/pcl_base.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_search.h>
#include <pcl/octree/octree_pointcloud_adjacency.h>
#include <pcl/search/search.h>
#include <pcl/segmentation/boost.h>
typename pcl::PointCloud<PointXYZRGBA>::Ptr
getColoredCloud () const
{
- return boost::make_shared<pcl::PointCloud<PointXYZRGBA> > ();
+ return boost::shared_ptr<pcl::PointCloud<PointXYZRGBA> > (new pcl::PointCloud<PointXYZRGBA>);
}
/** \brief Returns a deep copy of the voxel centroid cloud */
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr
getColoredVoxelCloud () const
{
- return boost::make_shared<pcl::PointCloud<PointXYZRGBA> > ();
+ return boost::shared_ptr<pcl::PointCloud<PointXYZRGBA> > (new pcl::PointCloud<PointXYZRGBA>);
}
/** \brief Returns labeled voxelized cloud
*
*/
+/*
+ * Do not use pre-compiled versions in this compilation unit (cpp-file),
+ * especially for the octree classes. This way the OctreePointCloudAdjacency
+ * class is instantiated with the custom leaf container SupervoxelClustering.
+ */
+#define PCL_NO_PRECOMPILE
+
#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
#include <pcl/segmentation/impl/supervoxel_clustering.hpp>
template class pcl::octree::OctreePointCloudAdjacency<pcl::PointXYZ, AdjacencyContainerT>;
template class pcl::octree::OctreePointCloudAdjacency<pcl::PointXYZRGB, AdjacencyContainerRGBT>;
-template class pcl::octree::OctreePointCloudAdjacency<pcl::PointXYZRGBA, AdjacencyContainerRGBAT>;
\ No newline at end of file
+template class pcl::octree::OctreePointCloudAdjacency<pcl::PointXYZRGBA, AdjacencyContainerRGBAT>;
abs (input_->points[y_w * input_->width + x_w].r - input_->points[y * input_->width + x].r) +
abs (input_->points[y_w * input_->width + x_w].g - input_->points[y * input_->width + x].g) +
abs (input_->points[y_w * input_->width + x_w].b - input_->points[y * input_->width + x].b));
-
- float val_exp_rgb = val_exp_rgb_vector(d_color);
+
+ float val_exp_rgb = val_exp_rgb_vector (static_cast<Eigen::VectorXf::Index> (d_color));
if (pcl_isfinite (input_->points[y_w*input_->width + x_w].z))
{
template <typename PointInT> void
pcl::ConcaveHull<PointInT>::performReconstruction (PointCloud &alpha_shape, std::vector<pcl::Vertices> &polygons)
{
- EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
Eigen::Vector4d xyz_centroid;
- computeMeanAndCovarianceMatrix (*input_, *indices_, covariance_matrix, xyz_centroid);
+ compute3DCentroid (*input_, *indices_, xyz_centroid);
+ EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
+ computeCovarianceMatrixNormalized (*input_, *indices_, xyz_centroid, covariance_matrix);
// Check if the covariance matrix is finite or not.
for (int i = 0; i < 3; ++i)
if (dim_ == 0)
{
PCL_DEBUG ("[pcl::%s] WARNING: Input dimension not specified. Automatically determining input dimension.\n", getClassName ().c_str ());
- if (eigen_values[0] / eigen_values[2] < 1.0e-3)
+ if (std::abs (eigen_values[0]) < std::numeric_limits<double>::epsilon () || std::abs (eigen_values[0] / eigen_values[2]) < 1.0e-3)
dim_ = 2;
else
dim_ = 3;
pcl::ConvexHull<PointInT>::calculateInputDimension ()
{
PCL_DEBUG ("[pcl::%s::calculateInputDimension] WARNING: Input dimension not specified. Automatically determining input dimension.\n", getClassName ().c_str ());
- EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
Eigen::Vector4d xyz_centroid;
- computeMeanAndCovarianceMatrix (*input_, *indices_, covariance_matrix, xyz_centroid);
+ compute3DCentroid (*input_, *indices_, xyz_centroid);
+ EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
+ computeCovarianceMatrixNormalized (*input_, *indices_, xyz_centroid, covariance_matrix);
EIGEN_ALIGN16 Eigen::Vector3d eigen_values;
pcl::eigen33 (covariance_matrix, eigen_values);
- if (eigen_values[0] / eigen_values[2] < 1.0e-3)
+ if (std::abs (eigen_values[0]) < std::numeric_limits<double>::epsilon () || std::abs (eigen_values[0] / eigen_values[2]) < 1.0e-3)
dimension_ = 2;
else
dimension_ = 3;
Eigen::Vector4d normal_calc_centroid;
Eigen::Matrix3d normal_calc_covariance;
- pcl::computeMeanAndCovarianceMatrix (normal_calc_cloud, normal_calc_covariance, normal_calc_centroid);
+ pcl::compute3DCentroid (normal_calc_cloud, normal_calc_centroid);
+ pcl::computeCovarianceMatrixNormalized (normal_calc_cloud, normal_calc_centroid, normal_calc_covariance);
+
// Need to set -1 here. See eigen33 for explanations.
Eigen::Vector3d::Scalar eigen_value;
Eigen::Vector3d plane_params;
break;
if (sfn_[nnIdx[doubleEdges[j].index]] != nnIdx[i])
visibility = isVisible(uvn_nn[i], uvn_nn[doubleEdges[j].index], doubleEdges[j].second, Eigen::Vector2f::Zero());
- if (!visibility == false)
+ if (!visibility)
break;
}
angles_[i].visible = visibility;
angleMax = angle1;
}
double angleR = angles_[i].angle + M_PI;
- if (angleR >= 2*M_PI)
+ if (angleR >= M_PI)
angleR -= 2*M_PI;
if ((source_[nnIdx[i]] == ffn_[nnIdx[i]]) || (source_[nnIdx[i]] == sfn_[nnIdx[i]]))
{
neighbor_update = sfn_[next_index];
/* sfn[next_index] */
- if ((ffn_[sfn_[next_index]] = ffn_[current_index_]) || (sfn_[sfn_[next_index]] == ffn_[current_index_]))
+ if ((ffn_[sfn_[next_index]] == ffn_[current_index_]) || (sfn_[sfn_[next_index]] == ffn_[current_index_]))
{
state_[sfn_[next_index]] = COMPLETED;
}
for (size_t i=0; i < input.polygons.size (); ++i)
for (size_t j=0; j < input.polygons[i].vertices.size (); ++j)
- triangleList[j].push_back (i);
+ triangleList[input.polygons[i].vertices[j]].push_back (i);
return (triangleList);
}
#include <vtkPointData.h>
#include <vtkFloatArray.h>
+// Support for VTK 7.1 upwards
+#ifdef vtkGenericDataArray_h
+#define SetTupleValue SetTypedTuple
+#define InsertNextTupleValue InsertNextTypedTuple
+#define GetTupleValue GetTypedTuple
+#endif
//////////////////////////////////////////////////////////////////////////////////////////////
int
-PCL_ADD_TEST(test_2d test_2d FILES test_2d.cpp
- LINK_WITH pcl_io pcl_gtest
- ARGUMENTS "${PCL_SOURCE_DIR}/test/2d/lena.pcd"
- "${PCL_SOURCE_DIR}/test/2d/gauss_smooth.pcd"
- "${PCL_SOURCE_DIR}/test/2d/erosion.pcd"
- "${PCL_SOURCE_DIR}/test/2d/dilation.pcd"
- "${PCL_SOURCE_DIR}/test/2d/opening.pcd"
- "${PCL_SOURCE_DIR}/test/2d/closing.pcd"
- "${PCL_SOURCE_DIR}/test/2d/erosion_binary.pcd"
- "${PCL_SOURCE_DIR}/test/2d/dilation_binary.pcd"
- "${PCL_SOURCE_DIR}/test/2d/opening_binary.pcd"
- "${PCL_SOURCE_DIR}/test/2d/closing_binary.pcd"
- "${PCL_SOURCE_DIR}/test/2d/canny.pcd")
+set(SUBSYS_NAME tests_2d)
+set(SUBSYS_DESC "Point cloud library 2d module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS 2d)
+
+
+set(OPT_DEPS)
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if(build)
+ PCL_ADD_TEST(test_2d test_2d FILES test_2d.cpp
+ LINK_WITH pcl_io pcl_gtest
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/2d/lena.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/gauss_smooth.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/erosion.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/dilation.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/opening.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/closing.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/erosion_binary.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/dilation_binary.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/opening_binary.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/closing_binary.pcd"
+ "${PCL_SOURCE_DIR}/test/2d/canny.pcd")
+endif(build)
set(SUBSYS_NAME global_tests)
set(SUBSYS_DESC "Point cloud library global unit tests")
-if(BUILD_visualization)
- include("${VTK_USE_FILE}")
- set(SUBSYS_DEPS 2d common sample_consensus io kdtree features filters geometry keypoints search surface registration segmentation octree recognition people outofcore visualization)
- set(OPT_DEPS vtk)
-else()
- set(SUBSYS_DEPS 2d common sample_consensus io kdtree features filters geometry keypoints search surface registration segmentation octree recognition people outofcore)
-endif()
-
set(DEFAULT OFF)
set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
endif()
enable_testing()
- include_directories(${PCL_INCLUDE_DIRS})
-
add_custom_target(tests "${CMAKE_CTEST_COMMAND}" "-V" VERBATIM)
add_subdirectory(2d)
add_subdirectory(geometry)
add_subdirectory(io)
add_subdirectory(kdtree)
+ add_subdirectory(keypoints)
+ add_subdirectory(people)
add_subdirectory(octree)
add_subdirectory(outofcore)
+ add_subdirectory(recognition)
add_subdirectory(registration)
add_subdirectory(search)
- add_subdirectory(keypoints)
add_subdirectory(surface)
add_subdirectory(segmentation)
add_subdirectory(sample_consensus)
-
- PCL_ADD_TEST(a_recognition_ism_test test_recognition_ism
- FILES test_recognition_ism.cpp
- LINK_WITH pcl_gtest pcl_io pcl_features
- ARGUMENTS "${PCL_SOURCE_DIR}/test/ism_train.pcd" "${PCL_SOURCE_DIR}/test/ism_test.pcd")
-
- PCL_ADD_TEST(search test_search
- FILES test_search.cpp
- LINK_WITH pcl_gtest pcl_search pcl_io pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd")
-
- PCL_ADD_TEST(a_transforms_test test_transforms
- FILES test_transforms.cpp
- LINK_WITH pcl_gtest pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-
- PCL_ADD_TEST(a_segmentation_test test_segmentation
- FILES test_segmentation.cpp
- LINK_WITH pcl_gtest pcl_io pcl_segmentation pcl_features pcl_kdtree pcl_search pcl_common
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/car6.pcd" "${PCL_SOURCE_DIR}/test/colored_cloud.pcd")
-
- PCL_ADD_TEST(test_non_linear test_non_linear
- FILES test_non_linear.cpp
- LINK_WITH pcl_gtest pcl_common pcl_io pcl_sample_consensus pcl_segmentation pcl_kdtree pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/noisy_slice_displaced.pcd")
-
- PCL_ADD_TEST(a_recognition_cg_test test_recognition_cg
- FILES test_recognition_cg.cpp
- LINK_WITH pcl_gtest pcl_common pcl_io pcl_kdtree pcl_features pcl_recognition pcl_keypoints
- ARGUMENTS "${PCL_SOURCE_DIR}/test/milk.pcd" "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
-
- PCL_ADD_TEST(a_people_detection_test test_people_detection
- FILES test_people_groundBasedPeopleDetectionApp.cpp
- LINK_WITH pcl_gtest pcl_common pcl_io pcl_kdtree pcl_search pcl_features pcl_sample_consensus pcl_filters pcl_io pcl_segmentation pcl_people
- ARGUMENTS "${PCL_SOURCE_DIR}/people/data/trainedLinearSVMForPeopleDetectionWithHOG.yaml" "${PCL_SOURCE_DIR}/test/five_people.pcd")
-
- if(BUILD_visualization AND (NOT UNIX OR (UNIX AND DEFINED ENV{DISPLAY})))
- PCL_ADD_TEST(a_visualization_test test_visualization
- FILES test_visualization.cpp
- LINK_WITH pcl_gtest pcl_io pcl_visualization pcl_features
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bunny.pcd")
- endif()
+ add_subdirectory(visualization)
endif(build)
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: io.h 5850 2012-06-06 14:04:59Z stfox88 $
- *
- */
-
-#ifndef PCL_TEST_BOOST_H_
-#define PCL_TEST_BOOST_H_
-
-#ifdef __GNUC__
-#pragma GCC system_header
-#endif
-
-// Marking all Boost headers as system headers to remove warnings
-#include <boost/random.hpp>
-#include <boost/thread.hpp>
-#include <boost/smart_ptr/shared_array.hpp>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_int.hpp>
-#include <boost/random/uniform_real.hpp>
-#include <boost/random/variate_generator.hpp>
-
-#endif // PCL_TEST_BOOST_H_
-# Args: name, executable_name
-PCL_ADD_TEST(common_test_wrappers test_wrappers FILES test_wrappers.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_test_macros test_macros FILES test_macros.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_vector_average test_vector_average FILES test_vector_average.cpp LINK_WITH pcl_gtest)
-PCL_ADD_TEST(common_common test_common FILES test_common.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_geometry test_geometry FILES test_geometry.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_copy_point test_copy_point FILES test_copy_point.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_centroid test_centroid FILES test_centroid.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_int test_plane_intersection FILES test_plane_intersection.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_pca test_pca FILES test_pca.cpp LINK_WITH pcl_gtest pcl_common)
-#PCL_ADD_TEST(common_spring test_spring FILES test_spring.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_gaussian test_gaussian FILES test_gaussian.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_operators test_operators FILES test_operators.cpp LINK_WITH pcl_gtest pcl_common)
-#PCL_ADD_TEST(common_convolution test_convolution FILES test_convolution.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_eigen test_eigen FILES test_eigen.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_intensity test_intensity FILES test_intensity.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_generator test_generator FILES test_generator.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_io test_common_io FILES test_io.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_copy_make_borders test_copy_make_borders FILES test_copy_make_borders.cpp LINK_WITH pcl_gtest pcl_common)
-PCL_ADD_TEST(common_bearing_angle_image test_bearing_angle_image FILES test_bearing_angle_image.cpp LINK_WITH pcl_gtest pcl_common)
+set(SUBSYS_NAME tests_common)
+set(SUBSYS_DESC "Point cloud library common module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS common)
+set(OPT_DEPS io features search kdtree octree)
-PCL_ADD_TEST(common_point_type_conversion test_common_point_type_conversion FILES test_point_type_conversion.cpp LINK_WITH pcl_gtest pcl_common)
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ # Args: name, executable_name
+ PCL_ADD_TEST(common_test_wrappers test_wrappers FILES test_wrappers.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_test_macros test_macros FILES test_macros.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_vector_average test_vector_average FILES test_vector_average.cpp LINK_WITH pcl_gtest)
+ PCL_ADD_TEST(common_common test_common FILES test_common.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_geometry test_geometry FILES test_geometry.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_copy_point test_copy_point FILES test_copy_point.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_centroid test_centroid FILES test_centroid.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_int test_plane_intersection FILES test_plane_intersection.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_pca test_pca FILES test_pca.cpp LINK_WITH pcl_gtest pcl_common)
+ #PCL_ADD_TEST(common_spring test_spring FILES test_spring.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_gaussian test_gaussian FILES test_gaussian.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_operators test_operators FILES test_operators.cpp LINK_WITH pcl_gtest pcl_common)
+ #PCL_ADD_TEST(common_convolution test_convolution FILES test_convolution.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_eigen test_eigen FILES test_eigen.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_intensity test_intensity FILES test_intensity.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_generator test_generator FILES test_generator.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_io test_common_io FILES test_io.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_copy_make_borders test_copy_make_borders FILES test_copy_make_borders.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_bearing_angle_image test_bearing_angle_image FILES test_bearing_angle_image.cpp LINK_WITH pcl_gtest pcl_common)
+
+ PCL_ADD_TEST(common_point_type_conversion test_common_point_type_conversion FILES test_point_type_conversion.cpp LINK_WITH pcl_gtest pcl_common)
+
+ if (BUILD_io AND BUILD_features)
+ PCL_ADD_TEST(a_transforms_test test_transforms
+ FILES test_transforms.cpp
+ LINK_WITH pcl_gtest pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ endif (BUILD_io AND BUILD_features)
+endif (build)
#include <pcl/common/centroid.h>
using namespace pcl;
+using pcl::test::EXPECT_EQ_VECTORS;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, compute3DCentroidFloat)
indices [2] = 6;
indices [3] = 7;
- ConstCloudIterator<PointXYZ> it (cloud, indices);
+ // Test finite data
+ {
+ ConstCloudIterator<PointXYZ> it (cloud, indices);
- EXPECT_EQ (compute3DCentroid (it, centroid_f), 4);
+ EXPECT_EQ (compute3DCentroid (it, centroid_f), 4);
- EXPECT_EQ (centroid_f[0], 0.0f);
- EXPECT_EQ (centroid_f[1], 1.0f);
- EXPECT_EQ (centroid_f[2], 0.0f);
- EXPECT_EQ (centroid_f[3], 1.0f);
-
- Eigen::Vector4d centroid_d;
- it.reset ();
- EXPECT_EQ (compute3DCentroid (it, centroid_d), 4);
-
- EXPECT_EQ (centroid_d[0], 0.0);
- EXPECT_EQ (centroid_d[1], 1.0);
- EXPECT_EQ (centroid_d[2], 0.0);
- EXPECT_EQ (centroid_d[3], 1.0);
+ EXPECT_EQ (centroid_f[0], 0.0f);
+ EXPECT_EQ (centroid_f[1], 1.0f);
+ EXPECT_EQ (centroid_f[2], 0.0f);
+ EXPECT_EQ (centroid_f[3], 1.0f);
+
+ Eigen::Vector4d centroid_d;
+ it.reset ();
+ EXPECT_EQ (compute3DCentroid (it, centroid_d), 4);
+
+ EXPECT_EQ (centroid_d[0], 0.0);
+ EXPECT_EQ (centroid_d[1], 1.0);
+ EXPECT_EQ (centroid_d[2], 0.0);
+ EXPECT_EQ (centroid_d[3], 1.0);
+ }
+
+ // Test for non-finite data
+ {
+ point.getVector3fMap() << std::numeric_limits<float>::quiet_NaN (),
+ std::numeric_limits<float>::quiet_NaN (),
+ std::numeric_limits<float>::quiet_NaN ();
+ cloud.push_back (point);
+ cloud.is_dense = false;
+ ConstCloudIterator<PointXYZ> it (cloud);
+
+ EXPECT_EQ (8, compute3DCentroid (it, centroid_f));
+ EXPECT_EQ_VECTORS (Eigen::Vector4f (0.f, 0.f, 0.f, 1.f), centroid_f);
+ }
}
*/
#include <gtest/gtest.h>
+#include <pcl/pcl_tests.h>
#include <pcl/common/common.h>
#include <pcl/common/distances.h>
#include <pcl/common/intersections.h>
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "rgb", is_rgb, rgb_val));
EXPECT_EQ (is_rgb, true);
int rgb = *reinterpret_cast<int*>(&rgb_val);
- EXPECT_EQ (rgb, 8339710); // alpha is 0
+ EXPECT_EQ (rgb, 0xff7f40fe); // alpha is 255
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "normal_x", is_normal_x, normal_x_val));
EXPECT_EQ (is_normal_x, true);
EXPECT_EQ (normal_x_val, 1.0);
// No indices specified
max_exp_pt = cloud[0].getVector4fMap ();
getMaxDistance (cloud, pivot_pt, max_pt);
- EXPECT_EQ (max_exp_pt, max_pt);
+ test::EXPECT_EQ_VECTORS (max_exp_pt, max_pt);
// Specifying indices
std::vector<int> idx (2);
idx[0] = 1; idx[1] = 2;
max_exp_pt = cloud[2].getVector4fMap ();
getMaxDistance (cloud, idx, pivot_pt, max_pt);
- EXPECT_EQ (max_exp_pt, max_pt);
+ test::EXPECT_EQ_VECTORS (max_exp_pt, max_pt);
}
/* ---[ */
EXPECT_EQ (cloud_all[i].rgba, cloud_xyz_rgba[i].rgba);
}
for (int i = 0; i < int (cloud_xyz_rgb.size ()); ++i)
- {
EXPECT_XYZ_EQ (cloud_all[cloud_xyz_rgba.size () + i], cloud_xyz_rgb[i]);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].r, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].g, 0);
- EXPECT_FLOAT_EQ (cloud_all[cloud_xyz_rgba.size () + i].b, 0);
- EXPECT_EQ (cloud_all[cloud_xyz_rgba.size () + i].rgba, 0);
- }
cloud1.fields[rgb_idx].name = "rgba";
// regular vs _
EXPECT_NEAR_VECTORS (ev1, v2, 2*epsilon);
}
+TEST(MACROS, PCL_VERSION_COMPARE)
+{
+ // PCL_MAJOR_VERSION.PCL_MINOR_VERSION.PCL_REVISION_VERSION : latest released PCL version
+
+ // Current version should be:
+ // * equal (if release version is being tested)
+ // * greater (if development version is being tested)
+#if PCL_VERSION_COMPARE(>=, PCL_MAJOR_VERSION, PCL_MINOR_VERSION, PCL_REVISION_VERSION)
+ SUCCEED();
+#else
+ FAIL();
+#endif
+
+ // If current version is greater, then it must be a development version
+#if PCL_VERSION_COMPARE(>, PCL_MAJOR_VERSION, PCL_MINOR_VERSION, PCL_REVISION_VERSION)
+ EXPECT_TRUE(PCL_DEV_VERSION);
+#else
+ EXPECT_FALSE(PCL_DEV_VERSION);
+#endif
+
+ // If current version is equal, then it must be a release version (not development)
+#if PCL_VERSION_COMPARE(==, PCL_MAJOR_VERSION, PCL_MINOR_VERSION, PCL_REVISION_VERSION)
+ EXPECT_FALSE(PCL_DEV_VERSION);
+#else
+ EXPECT_TRUE(PCL_DEV_VERSION);
+#endif
+
+ // Pretend that current version is 1.7.2-dev
+#undef PCL_MAJOR_VERSION
+#undef PCL_MINOR_VERSION
+#undef PCL_REVISION_VERSION
+#undef PCL_DEV_VERSION
+#define PCL_MAJOR_VERSION 1
+#define PCL_MINOR_VERSION 7
+#define PCL_REVISION_VERSION 2
+#define PCL_DEV_VERSION 1
+ // Should be greater than these:
+ EXPECT_TRUE(PCL_VERSION_COMPARE(>, 1, 7, 2));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(>, 1, 7, 1));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(>, 1, 6, 3));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(>, 0, 8, 4));
+ // Should be less than these:
+ EXPECT_TRUE(PCL_VERSION_COMPARE(<, 1, 7, 3));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(<, 1, 8, 0));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(<, 2, 0, 0));
+
+ // Now pretend that current version is 1.7.2 (release)
+#undef PCL_DEV_VERSION
+#define PCL_DEV_VERSION 0
+ // Should be greater than these:
+ EXPECT_TRUE(PCL_VERSION_COMPARE(>, 1, 7, 1));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(>, 1, 6, 3));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(>, 0, 8, 4));
+ // Should be equal to itself
+ EXPECT_TRUE(PCL_VERSION_COMPARE(==, 1, 7, 2));
+ // Should be less than these:
+ EXPECT_TRUE(PCL_VERSION_COMPARE(<, 1, 7, 3));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(<, 1, 8, 0));
+ EXPECT_TRUE(PCL_VERSION_COMPARE(<, 2, 0, 0));
+}
+
int
main (int argc, char** argv)
{
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2010, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ */
+#include <gtest/gtest.h>
+
+#include <iostream> // For debug
+
+#include <pcl/PCLPointCloud2.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/features/feature.h>
+#include <pcl/common/eigen.h>
+#include <pcl/point_types.h>
+#include <pcl/point_cloud.h>
+#include <pcl/common/transforms.h>
+
+#include <pcl/pcl_tests.h>
+
+using namespace pcl;
+using namespace pcl::io;
+using namespace std;
+
+const float PI = 3.14159265f;
+const float rho = std::sqrt (2.0f) / 2.0f; // cos(PI/4) == sin(PI/4)
+
+PointCloud<PointXYZ> cloud;
+pcl::PCLPointCloud2 cloud_blob;
+
+void
+init ()
+{
+ PointXYZ p0 (0, 0, 0);
+ PointXYZ p1 (1, 0, 0);
+ PointXYZ p2 (0, 1, 0);
+ PointXYZ p3 (0, 0, 1);
+ cloud.points.push_back (p0);
+ cloud.points.push_back (p1);
+ cloud.points.push_back (p2);
+ cloud.points.push_back (p3);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, DeMean)
+{
+ PointCloud<PointXYZ> cloud, cloud_demean;
+ fromPCLPointCloud2 (cloud_blob, cloud);
+
+ Eigen::Vector4f centroid;
+ compute3DCentroid (cloud, centroid);
+ EXPECT_NEAR (centroid[0], -0.0290809, 1e-4);
+ EXPECT_NEAR (centroid[1], 0.102653, 1e-4);
+ EXPECT_NEAR (centroid[2], 0.027302, 1e-4);
+ EXPECT_NEAR (centroid[3], 1, 1e-4);
+
+ // Check standard demean
+ demeanPointCloud (cloud, centroid, cloud_demean);
+ EXPECT_EQ (cloud_demean.is_dense, cloud.is_dense);
+ EXPECT_EQ (cloud_demean.width, cloud.width);
+ EXPECT_EQ (cloud_demean.height, cloud.height);
+ EXPECT_EQ (cloud_demean.points.size (), cloud.points.size ());
+
+ EXPECT_NEAR (cloud_demean.points[0].x, 0.034503, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[0].y, 0.010837, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[0].z, 0.013447, 1e-4);
+
+ EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].x, -0.048849, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].y, 0.072507, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].z, -0.071702, 1e-4);
+
+ vector<int> indices (cloud.points.size ());
+ for (int i = 0; i < static_cast<int> (indices.size ()); ++i) { indices[i] = i; }
+
+ // Check standard demean w/ indices
+ demeanPointCloud (cloud, indices, centroid, cloud_demean);
+ EXPECT_EQ (cloud_demean.is_dense, cloud.is_dense);
+ EXPECT_EQ (cloud_demean.width, cloud.width);
+ EXPECT_EQ (cloud_demean.height, cloud.height);
+ EXPECT_EQ (cloud_demean.points.size (), cloud.points.size ());
+
+ EXPECT_NEAR (cloud_demean.points[0].x, 0.034503, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[0].y, 0.010837, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[0].z, 0.013447, 1e-4);
+
+ EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].x, -0.048849, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].y, 0.072507, 1e-4);
+ EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].z, -0.071702, 1e-4);
+
+ // Check eigen demean
+ Eigen::MatrixXf mat_demean;
+ demeanPointCloud (cloud, centroid, mat_demean);
+ EXPECT_EQ (mat_demean.cols (), int (cloud.points.size ()));
+ EXPECT_EQ (mat_demean.rows (), 4);
+
+ EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
+ EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
+ EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
+
+ EXPECT_NEAR (mat_demean (0, cloud_demean.points.size () - 1), -0.048849, 1e-4);
+ EXPECT_NEAR (mat_demean (1, cloud_demean.points.size () - 1), 0.072507, 1e-4);
+ EXPECT_NEAR (mat_demean (2, cloud_demean.points.size () - 1), -0.071702, 1e-4);
+
+ // Check eigen demean + indices
+ demeanPointCloud (cloud, indices, centroid, mat_demean);
+ EXPECT_EQ (mat_demean.cols (), int (cloud.points.size ()));
+ EXPECT_EQ (mat_demean.rows (), 4);
+
+ EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
+ EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
+ EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
+
+ EXPECT_NEAR (mat_demean (0, cloud_demean.points.size () - 1), -0.048849, 1e-4);
+ EXPECT_NEAR (mat_demean (1, cloud_demean.points.size () - 1), 0.072507, 1e-4);
+ EXPECT_NEAR (mat_demean (2, cloud_demean.points.size () - 1), -0.071702, 1e-4);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, Transform)
+{
+ Eigen::Vector3f offset (100, 0, 0);
+ float angle = PI/4;
+ Eigen::Quaternionf rotation (cos (angle / 2), 0, 0, sin (angle / 2));
+
+ PointCloud<PointXYZ> cloud_out;
+ const vector<PointXYZ, Eigen::aligned_allocator<PointXYZ> > &points (cloud_out.points);
+ transformPointCloud (cloud, cloud_out, offset, rotation);
+
+ EXPECT_EQ (cloud.points.size (), cloud_out.points.size ());
+ EXPECT_EQ (100, points[0].x);
+ EXPECT_EQ (0, points[0].y);
+ EXPECT_EQ (0, points[0].z);
+ EXPECT_NEAR (100+rho, points[1].x, 1e-4);
+ EXPECT_NEAR (rho, points[1].y, 1e-4);
+ EXPECT_EQ (0, points[1].z);
+ EXPECT_NEAR (100-rho, points[2].x, 1e-4);
+ EXPECT_NEAR (rho, points[2].y, 1e-4);
+ EXPECT_EQ (0, points[2].z);
+ EXPECT_EQ (100, points[3].x);
+ EXPECT_EQ (0, points[3].y);
+ EXPECT_EQ (1, points[3].z);
+
+ PointCloud<PointXYZ> cloud_out2;
+ const vector<PointXYZ, Eigen::aligned_allocator<PointXYZ> > &points2 (cloud_out2.points);
+ Eigen::Translation3f translation (offset);
+ Eigen::Affine3f transform;
+ transform = translation * rotation;
+ transformPointCloud (cloud, cloud_out2, transform);
+
+ EXPECT_EQ (cloud.points.size (), cloud_out2.points.size ());
+ EXPECT_EQ (100, points2[0].x);
+ EXPECT_EQ (0, points2[0].y);
+ EXPECT_EQ (0, points2[0].z);
+ EXPECT_NEAR (100+rho, points2[1].x, 1e-4);
+ EXPECT_NEAR (rho, points2[1].y, 1e-4);
+ EXPECT_EQ (0, points2[1].z);
+ EXPECT_NEAR (100-rho, points2[2].x, 1e-4);
+ EXPECT_NEAR (rho, points2[2].y, 1e-4);
+ EXPECT_EQ (0, points2[2].z);
+ EXPECT_EQ (100, points2[3].x);
+ EXPECT_EQ (0, points2[3].y);
+ EXPECT_EQ (1, points2[3].z);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, TransformCopyFields)
+{
+ Eigen::Affine3f transform;
+ transform = Eigen::Translation3f (100, 0, 0);
+
+ PointXYZRGBNormal empty_point;
+ std::vector<int> indices (1);
+
+ PointCloud<PointXYZRGBNormal> cloud (2, 1);
+ cloud.points[0].rgba = 0xFF0000;
+ cloud.points[1].rgba = 0x00FF00;
+
+ // Preserve data in all fields
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloud (cloud, cloud_out, transform, true);
+ ASSERT_EQ (cloud.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
+ EXPECT_RGBA_EQ (cloud.points[1], cloud_out.points[1]);
+ }
+ // Preserve data in all fields (with indices)
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloud (cloud, indices, cloud_out, transform, true);
+ ASSERT_EQ (indices.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
+ }
+ // Do not preserve data in all fields
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloud (cloud, cloud_out, transform, false);
+ ASSERT_EQ (cloud.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
+ EXPECT_RGBA_EQ (empty_point, cloud_out.points[1]);
+ }
+ // Do not preserve data in all fields (with indices)
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloud (cloud, indices, cloud_out, transform, false);
+ ASSERT_EQ (indices.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
+ }
+ // Preserve data in all fields (with normals version)
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloudWithNormals (cloud, cloud_out, transform, true);
+ ASSERT_EQ (cloud.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
+ EXPECT_RGBA_EQ (cloud.points[1], cloud_out.points[1]);
+ }
+ // Preserve data in all fields (with normals and indices version)
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloudWithNormals (cloud, indices, cloud_out, transform, true);
+ ASSERT_EQ (indices.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
+ }
+ // Do not preserve data in all fields (with normals version)
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloudWithNormals (cloud, cloud_out, transform, false);
+ ASSERT_EQ (cloud.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
+ EXPECT_RGBA_EQ (empty_point, cloud_out.points[1]);
+ }
+ // Do not preserve data in all fields (with normals and indices version)
+ {
+ PointCloud<PointXYZRGBNormal> cloud_out;
+ transformPointCloudWithNormals (cloud, indices, cloud_out, transform, false);
+ ASSERT_EQ (indices.size (), cloud_out.size ());
+ EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, Matrix4Affine3Transform)
+{
+ float rot_x = 2.8827f;
+ float rot_y = -0.31190f;
+ float rot_z = -0.93058f;
+ Eigen::Affine3f affine;
+ pcl::getTransformation (0, 0, 0, rot_x, rot_y, rot_z, affine);
+
+ EXPECT_NEAR (affine (0, 0), 0.56854731f, 1e-4); EXPECT_NEAR (affine (0, 1), -0.82217032f, 1e-4); EXPECT_NEAR (affine (0, 2), -0.028107658f, 1e-4);
+ EXPECT_NEAR (affine (1, 0), -0.76327348f, 1e-4); EXPECT_NEAR (affine (1, 1), -0.51445758f, 1e-4); EXPECT_NEAR (affine (1, 2), -0.39082864f, 1e-4);
+ EXPECT_NEAR (affine (2, 0), 0.30686751f, 1e-4); EXPECT_NEAR (affine (2, 1), 0.24365838f, 1e-4); EXPECT_NEAR (affine (2, 2), -0.920034f, 1e-4);
+
+ // Approximative!!! Uses SVD internally! See http://eigen.tuxfamily.org/dox/Transform_8h_source.html
+ Eigen::Matrix3f rotation = affine.rotation ();
+
+ EXPECT_NEAR (rotation (0, 0), 0.56854731f, 1e-4); EXPECT_NEAR (rotation (0, 1), -0.82217032f, 1e-4); EXPECT_NEAR (rotation (0, 2), -0.028107658f, 1e-4);
+ EXPECT_NEAR (rotation (1, 0), -0.76327348f, 1e-4); EXPECT_NEAR (rotation (1, 1), -0.51445758f, 1e-4); EXPECT_NEAR (rotation (1, 2), -0.39082864f, 1e-4);
+ EXPECT_NEAR (rotation (2, 0), 0.30686751f, 1e-4); EXPECT_NEAR (rotation (2, 1), 0.24365838f, 1e-4); EXPECT_NEAR (rotation (2, 2), -0.920034f, 1e-4);
+
+ float trans_x, trans_y, trans_z;
+ pcl::getTransformation (0.1f, 0.2f, 0.3f, rot_x, rot_y, rot_z, affine);
+ pcl::getTranslationAndEulerAngles (affine, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z);
+ EXPECT_FLOAT_EQ (trans_x, 0.1f);
+ EXPECT_FLOAT_EQ (trans_y, 0.2f);
+ EXPECT_FLOAT_EQ (trans_z, 0.3f);
+ EXPECT_FLOAT_EQ (rot_x, 2.8827f);
+ EXPECT_FLOAT_EQ (rot_y, -0.31190f);
+ EXPECT_FLOAT_EQ (rot_z, -0.93058f);
+
+ Eigen::Matrix4f transformation (Eigen::Matrix4f::Identity ());
+ transformation.block<3, 3> (0, 0) = affine.rotation ();
+ transformation.block<3, 1> (0, 3) = affine.translation ();
+
+ PointXYZ p (1.f, 2.f, 3.f);
+ Eigen::Vector3f v3 = p.getVector3fMap ();
+ Eigen::Vector4f v4 = p.getVector4fMap ();
+
+ Eigen::Vector3f v3t (affine * v3);
+ Eigen::Vector4f v4t (transformation * v4);
+
+ PointXYZ pt = pcl::transformPoint (p, affine);
+
+ EXPECT_NEAR (pt.x, v3t.x (), 1e-4); EXPECT_NEAR (pt.x, v4t.x (), 1e-4);
+ EXPECT_NEAR (pt.y, v3t.y (), 1e-4); EXPECT_NEAR (pt.y, v4t.y (), 1e-4);
+ EXPECT_NEAR (pt.z, v3t.z (), 1e-4); EXPECT_NEAR (pt.z, v4t.z (), 1e-4);
+
+ PointNormal pn;
+ pn.getVector3fMap () = p.getVector3fMap ();
+ pn.getNormalVector3fMap () = Eigen::Vector3f (0.60f, 0.48f, 0.64f);
+ Eigen::Vector3f n3 = pn.getNormalVector3fMap ();
+ Eigen::Vector4f n4 = pn.getNormalVector4fMap ();
+
+ Eigen::Vector3f n3t (affine.rotation() * n3);
+ Eigen::Vector4f n4t (transformation * n4);
+
+ PointNormal pnt = pcl::transformPointWithNormal (pn, affine);
+
+ EXPECT_NEAR (pnt.x, v3t.x (), 1e-4); EXPECT_NEAR (pnt.x, v4t.x (), 1e-4);
+ EXPECT_NEAR (pnt.y, v3t.y (), 1e-4); EXPECT_NEAR (pnt.y, v4t.y (), 1e-4);
+ EXPECT_NEAR (pnt.z, v3t.z (), 1e-4); EXPECT_NEAR (pnt.z, v4t.z (), 1e-4);
+ EXPECT_NEAR (pnt.normal_x, n3t.x (), 1e-4); EXPECT_NEAR (pnt.normal_x, n4t.x (), 1e-4);
+ EXPECT_NEAR (pnt.normal_y, n3t.y (), 1e-4); EXPECT_NEAR (pnt.normal_y, n4t.y (), 1e-4);
+ EXPECT_NEAR (pnt.normal_z, n3t.z (), 1e-4); EXPECT_NEAR (pnt.normal_z, n4t.z (), 1e-4);
+
+ PointCloud<PointXYZ> c, ct;
+ c.push_back (p);
+ pcl::transformPointCloud (c, ct, affine);
+ EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
+ EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
+ EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
+
+ pcl::transformPointCloud (c, ct, transformation);
+ EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
+ EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
+ EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
+
+ affine = transformation;
+
+ std::vector<int> indices (1); indices[0] = 0;
+
+ pcl::transformPointCloud (c, indices, ct, affine);
+ EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
+ EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
+ EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
+
+ pcl::transformPointCloud (c, indices, ct, transformation);
+ EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
+ EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
+ EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, commonTransform)
+{
+ Eigen::Vector3f xaxis (1,0,0), yaxis (0,1,0), zaxis (0,0,1);
+ Eigen::Affine3f trans = pcl::getTransFromUnitVectorsZY (zaxis, yaxis);
+ Eigen::Vector3f xaxistrans=trans*xaxis, yaxistrans=trans*yaxis, zaxistrans=trans*zaxis;
+ //std::cout << xaxistrans<<"\n"<<yaxistrans<<"\n"<<zaxistrans<<"\n";
+ EXPECT_NEAR ((xaxistrans-xaxis).norm(), 0.0f, 1e-6);
+ EXPECT_NEAR ((yaxistrans-yaxis).norm(), 0.0f, 1e-6);
+ EXPECT_NEAR ((zaxistrans-zaxis).norm(), 0.0f, 1e-6);
+
+ trans = pcl::getTransFromUnitVectorsXY (xaxis, yaxis);
+ xaxistrans=trans*xaxis, yaxistrans=trans*yaxis, zaxistrans=trans*zaxis;
+ //std::cout << xaxistrans<<"\n"<<yaxistrans<<"\n"<<zaxistrans<<"\n";
+ EXPECT_NEAR ((xaxistrans-xaxis).norm(), 0.0f, 1e-6);
+ EXPECT_NEAR ((yaxistrans-yaxis).norm(), 0.0f, 1e-6);
+ EXPECT_NEAR ((zaxistrans-zaxis).norm(), 0.0f, 1e-6);
+}
+
+/* ---[ */
+int
+ main (int argc, char** argv)
+{
+ if (argc < 2)
+ {
+ std::cerr << "No test file given. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+ if (loadPCDFile (argv[1], cloud_blob) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ testing::InitGoogleTest (&argc, argv);
+ init();
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
-PCL_ADD_TEST(features_ptr test_features_ptr
- FILES test_ptr.cpp
- LINK_WITH pcl_gtest pcl_features)
+set(SUBSYS_NAME tests_features)
+set(SUBSYS_DESC "Point cloud library features module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS features)
+set(OPT_DEPS io keypoints) # module does not depend on these
-PCL_ADD_TEST(feature_base test_base_feature
- FILES test_base_feature.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_cppf_estimation test_cppf_estimation
- FILES test_cppf_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/colored_cloud.pcd")
-PCL_ADD_TEST(feature_normal_estimation test_normal_estimation
- FILES test_normal_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_pfh_estimation test_pfh_estimation
- FILES test_pfh_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
-PCL_ADD_TEST(feature_cvfh_estimation test_cvfh_estimation
- FILES test_cvfh_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io pcl_filters
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/milk.pcd")
+if (build)
+ PCL_ADD_TEST(features_ptr test_features_ptr
+ FILES test_ptr.cpp
+ LINK_WITH pcl_gtest pcl_features)
+ PCL_ADD_TEST(feature_gradient_estimation test_gradient_estimation
+ FILES test_gradient_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features)
+ PCL_ADD_TEST(feature_rift_estimation test_rift_estimation
+ FILES test_rift_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features)
+
+ if (BUILD_io)
+ PCL_ADD_TEST(feature_base test_base_feature
+ FILES test_base_feature.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_cppf_estimation test_cppf_estimation
+ FILES test_cppf_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/colored_cloud.pcd")
+ PCL_ADD_TEST(feature_normal_estimation test_normal_estimation
+ FILES test_normal_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_pfh_estimation test_pfh_estimation
+ FILES test_pfh_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+
+ PCL_ADD_TEST(feature_cvfh_estimation test_cvfh_estimation
+ FILES test_cvfh_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io pcl_filters
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/milk.pcd")
+
+ PCL_ADD_TEST(feature_ppf_estimation test_ppf_estimation
+ FILES test_ppf_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_shot_estimation test_shot_estimation
+ FILES test_shot_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_boundary_estimation test_boundary_estimation
+ FILES test_boundary_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_curvatures_estimation test_curvatures_estimation
+ FILES test_curvatures_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_spin_estimation test_spin_estimation
+ FILES test_spin_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_rsd_estimation test_rsd_estimation
+ FILES test_rsd_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_grsd_estimation test_grsd_estimation
+ FILES test_grsd_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_invariants_estimation test_invariants_estimation
+ FILES test_invariants_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_board_estimation test_board_estimation
+ FILES test_board_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_shot_lrf_estimation test_shot_lrf_estimation
+ FILES test_shot_lrf_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(features_narf test_narf
+ FILES test_narf.cpp
+ LINK_WITH pcl_gtest pcl_features ${FLANN_LIBRARIES})
+ PCL_ADD_TEST(a_ii_normals_test test_ii_normals
+ FILES test_ii_normals.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd")
+ PCL_ADD_TEST(feature_moment_of_inertia_estimation test_moment_of_inertia_estimation
+ FILES test_moment_of_inertia_estimation.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/lamppost.pcd")
+ PCL_ADD_TEST(feature_rops_estimation test_rops_estimation
+ FILES test_rops_estimation.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/rops_cloud.pcd" "${PCL_SOURCE_DIR}/test/rops_indices.txt" "${PCL_SOURCE_DIR}/test/rops_triangles.txt")
+ if (BUILD_keypoints)
+ PCL_ADD_TEST(feature_brisk test_brisk
+ FILES test_brisk.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_filters pcl_keypoints pcl_common pcl_features pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/brisk_image_gt.pcd" "${PCL_SOURCE_DIR}/test/brisk_keypoints_gt.pcd" "${PCL_SOURCE_DIR}/test/brisk_descriptors_gt.pcd")
+ endif (BUILD_keypoints)
+ endif (BUILD_io)
+endif (build)
-PCL_ADD_TEST(feature_ppf_estimation test_ppf_estimation
- FILES test_ppf_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_shot_estimation test_shot_estimation
- FILES test_shot_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_boundary_estimation test_boundary_estimation
- FILES test_boundary_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_curvatures_estimation test_curvatures_estimation
- FILES test_curvatures_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_spin_estimation test_spin_estimation
- FILES test_spin_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_rsd_estimation test_rsd_estimation
- FILES test_rsd_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_grsd_estimation test_grsd_estimation
- FILES test_grsd_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_invariants_estimation test_invariants_estimation
- FILES test_invariants_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_gradient_estimation test_gradient_estimation
- FILES test_gradient_estimation.cpp
- LINK_WITH pcl_gtest pcl_features)
-PCL_ADD_TEST(feature_rift_estimation test_rift_estimation
- FILES test_rift_estimation.cpp
- LINK_WITH pcl_gtest pcl_features)
-PCL_ADD_TEST(feature_board_estimation test_board_estimation
- FILES test_board_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_shot_lrf_estimation test_shot_lrf_estimation
- FILES test_shot_lrf_estimation.cpp
- LINK_WITH pcl_gtest pcl_features pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(feature_brisk test_brisk
- FILES test_brisk.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_filters pcl_keypoints pcl_common pcl_features pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/brisk_image_gt.pcd" "${PCL_SOURCE_DIR}/test/brisk_keypoints_gt.pcd" "${PCL_SOURCE_DIR}/test/brisk_descriptors_gt.pcd")
-PCL_ADD_TEST(features_narf test_narf
- FILES test_narf.cpp
- LINK_WITH pcl_gtest pcl_features ${FLANN_LIBRARIES})
-PCL_ADD_TEST(a_ii_normals_test test_ii_normals
- FILES test_ii_normals.cpp
- LINK_WITH pcl_gtest pcl_io pcl_features
- ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd")
-PCL_ADD_TEST(feature_moment_of_inertia_estimation test_moment_of_inertia_estimation
- FILES test_moment_of_inertia_estimation.cpp
- LINK_WITH pcl_gtest pcl_io pcl_features
- ARGUMENTS "${PCL_SOURCE_DIR}/test/lamppost.pcd")
-PCL_ADD_TEST(feature_rops_estimation test_rops_estimation
- FILES test_rops_estimation.cpp
- LINK_WITH pcl_gtest pcl_io pcl_features
- ARGUMENTS "${PCL_SOURCE_DIR}/test/rops_cloud.pcd" "${PCL_SOURCE_DIR}/test/rops_indices.txt" "${PCL_SOURCE_DIR}/test/rops_triangles.txt")
float nx = -0.2f * p.x;
float ny = -0.5f;
float nz = 1.0f;
- float magnitude = sqrtf (nx * nx + ny * ny + nz * nz);
+ float magnitude = std::sqrt (nx * nx + ny * ny + nz * nz);
nx /= magnitude;
ny /= magnitude;
nz /= magnitude;
PointCloud<GFPFHSignature16> descriptor;
gfpfh.compute (descriptor);
- const float ref_values[] = { 3216, 7760, 8740, 26584, 4645, 2995, 3029, 4349, 6192, 5440, 9514, 47563, 21814, 22073, 5734, 1253 };
+ const float ref_values[] = { 1877, 6375, 5361, 14393, 6674, 2471, 2248, 2753, 3117, 4585, 14388, 32407, 15122, 3061, 3202, 794 };
EXPECT_EQ (descriptor.points.size (), 1);
for (size_t i = 0; i < size_t (descriptor.points[0].descriptorSize ()); ++i)
PointXYZI p;
p.x = x;
p.y = y;
- p.z = sqrtf (400 - x * x - y * y);
+ p.z = std::sqrt (400 - x * x - y * y);
p.intensity = expf ((-powf (x - 3.0f, 2.0f) + powf (y + 2.0f, 2.0f)) / (2.0f * 25.0f)) + expf ((-powf (x + 5.0f, 2.0f) + powf (y - 5.0f, 2.0f))
/ (2.0f * 4.0f));
float nx = p.x;
float ny = p.y;
float nz = p.z;
- float magnitude = sqrtf (nx * nx + ny * ny + nz * nz);
+ float magnitude = std::sqrt (nx * nx + ny * ny + nz * nz);
nx /= magnitude;
ny /= magnitude;
nz /= magnitude;
EXPECT_NEAR (shots352->points[103].descriptor[11], 0.0024724449, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[19], 0.0031367359, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[20], 0.17439659, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[21], 0.070665278, 1e-4);
+ EXPECT_NEAR (shots352->points[103].descriptor[21], 0.06542316, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[42], 0.013304681, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[53], 0.0073520984, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[54], 0.013584172, 1e-4);
EXPECT_NEAR (shots1344->points[103].descriptor[10], 0.0020453099, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[11], 0.0021887729, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[21], 0.062557608, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[21], 0.0579300672, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[42], 0.011778189, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[53], 0.0065085669, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[54], 0.012025614, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[55], 0.0044803056, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[64], 0.064429596, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[65], 0.046486385, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[64], 0.064453065, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[65], 0.046504568, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[86], 0.011518310, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[357], 0.0020453099, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[360], 0.0027993850, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[386], 0.045115642, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[387], 0.059068538, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[386], 0.0451327376, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[387], 0.0544394031, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[389], 0.0047547864, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[453], 0.0051176427, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[481], 0.0053625242, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[482], 0.012025614, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[511], 0.0057367259, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[512], 0.048357654, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[512], 0.048375979, 1e-5);
// Test results when setIndices and/or setSearchSurface are used
boost::shared_ptr<vector<int> > test_indices (new vector<int> (0));
EXPECT_NEAR (shots352->points[103].descriptor[11], 0.0024724449, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[19], 0.0031367359, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[20], 0.17439659, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[21], 0.070665278, 1e-4);
+ EXPECT_NEAR (shots352->points[103].descriptor[21], 0.06542316, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[42], 0.013304681, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[53], 0.0073520984, 1e-4);
EXPECT_NEAR (shots352->points[103].descriptor[54], 0.013584172, 1e-4);
EXPECT_NEAR (shots1344->points[103].descriptor[10], 0.0020453099, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[11], 0.0021887729, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[21], 0.062557608, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[21], 0.057930067, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[42], 0.011778189, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[53], 0.0065085669, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[54], 0.012025614, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[55], 0.0044803056, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[64], 0.064429596, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[65], 0.046486385, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[64], 0.0644530654, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[65], 0.0465045683, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[86], 0.011518310, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[357], 0.0020453099, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[360], 0.0027993850, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[386], 0.045115642, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[387], 0.059068538, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[386], 0.0451327376, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[387], 0.0544394031, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[389], 0.0047547864, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[453], 0.0051176427, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[481], 0.0053625242, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[482], 0.012025614, 1e-5);
EXPECT_NEAR (shots1344->points[103].descriptor[511], 0.0057367259, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[512], 0.048357654, 1e-5);
+ EXPECT_NEAR (shots1344->points[103].descriptor[512], 0.048375979, 1e-5);
// Test results when setIndices and/or setSearchSurface are used
boost::shared_ptr<vector<int> > test_indices (new vector<int> (0));
PointXYZI p;
p.x = x;
p.y = y;
- p.z = sqrtf (400.0f - x * x - y * y);
+ p.z = std::sqrt (400.0f - x * x - y * y);
p.intensity = expf (-(powf (x - 3.0f, 2.0f) + powf (y + 2.0f, 2.0f)) / (2.0f * 25.0f)) + expf (-(powf (x + 5.0f, 2.0f) + powf (y - 5.0f, 2.0f))
/ (2.0f * 4.0f));
-#PCL_ADD_TEST(common_convolution test_convolution FILES test_convolution.cpp LINK_WITH pcl_filters pcl_common)
-
-PCL_ADD_TEST(filters_filters test_filters
- FILES test_filters.cpp
- LINK_WITH pcl_gtest pcl_filters pcl_io pcl_sample_consensus pcl_kdtree pcl_features pcl_segmentation
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
-
-PCL_ADD_TEST(filters_sampling test_filters_sampling
- FILES test_sampling.cpp
- LINK_WITH pcl_gtest pcl_filters pcl_io pcl_kdtree pcl_features
- ARGUMENTS "${PCL_SOURCE_DIR}/test/sac_plane_test.pcd" "${PCL_SOURCE_DIR}/test/cturtle.pcd")
-
-PCL_ADD_TEST(filters_bilateral test_filters_bilateral
- FILES test_bilateral.cpp
- LINK_WITH pcl_gtest pcl_filters pcl_io pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
-
-PCL_ADD_TEST(filters_grid_minimum test_filters_grid_minimum
- FILES test_grid_minimum.cpp
- LINK_WITH pcl_gtest pcl_common pcl_filters)
-
-PCL_ADD_TEST(filters_model_outlier_removal test_model_outlier_removal
- FILES test_model_outlier_removal.cpp
- LINK_WITH pcl_gtest pcl_filters pcl_io pcl_sample_consensus pcl_kdtree pcl_features
- ARGUMENTS ${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd)
-
-PCL_ADD_TEST(filters_morphological test_morphological
- FILES test_morphological.cpp
- LINK_WITH pcl_gtest pcl_common pcl_filters)
-
-PCL_ADD_TEST(filters_local_maximum test_filters_local_maximum
- FILES test_local_maximum.cpp
- LINK_WITH pcl_gtest pcl_common pcl_filters)
-
+set(SUBSYS_NAME tests_filters)
+set(SUBSYS_DESC "Point cloud library filters module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS filters)
+set(OPT_DEPS io features segmentation)
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ #PCL_ADD_TEST(common_convolution test_convolution FILES test_convolution.cpp LINK_WITH pcl_filters pcl_common)
+
+ PCL_ADD_TEST(filters_grid_minimum test_filters_grid_minimum
+ FILES test_grid_minimum.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_filters)
+
+ PCL_ADD_TEST(filters_morphological test_morphological
+ FILES test_morphological.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_filters)
+
+ PCL_ADD_TEST(filters_local_maximum test_filters_local_maximum
+ FILES test_local_maximum.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_filters pcl_search pcl_octree)
+
+ if (BUILD_io)
+
+ PCL_ADD_TEST(filters_bilateral test_filters_bilateral
+ FILES test_bilateral.cpp
+ LINK_WITH pcl_gtest pcl_filters pcl_io pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+
+ if (BUILD_features)
+ PCL_ADD_TEST(filters_sampling test_filters_sampling
+ FILES test_sampling.cpp
+ LINK_WITH pcl_gtest pcl_filters pcl_io pcl_kdtree pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/sac_plane_test.pcd" "${PCL_SOURCE_DIR}/test/cturtle.pcd")
+
+ PCL_ADD_TEST(filters_model_outlier_removal test_model_outlier_removal
+ FILES test_model_outlier_removal.cpp
+ LINK_WITH pcl_gtest pcl_filters pcl_io pcl_sample_consensus pcl_kdtree pcl_features
+ ARGUMENTS ${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd)
+
+ if (BUILD_segmentation)
+ PCL_ADD_TEST(filters_filters test_filters
+ FILES test_filters.cpp
+ LINK_WITH pcl_gtest pcl_filters pcl_io pcl_sample_consensus pcl_kdtree pcl_features pcl_segmentation
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+ endif (BUILD_segmentation)
+ endif (BUILD_features)
+ endif (BUILD_io)
+endif (build)
-PCL_ADD_TEST(geometry_iterator test_iterator
- FILES test_iterator.cpp
- LINK_WITH pcl_gtest pcl_common)
-
-PCL_ADD_TEST(geometry_mesh_circulators test_mesh_circulators
- FILES test_mesh_circulators.cpp
- LINK_WITH pcl_gtest)
-
-PCL_ADD_TEST(geometry_mesh_conversion test_mesh_conversion
- FILES test_mesh_conversion.cpp
- LINK_WITH pcl_gtest pcl_common)
-
-PCL_ADD_TEST(geometry_mesh_data test_mesh_data
- FILES test_mesh_data.cpp
- LINK_WITH pcl_gtest)
-
-PCL_ADD_TEST(geometry_mesh_get_boundary test_mesh_get_boundary
- FILES test_mesh_get_boundary.cpp
- LINK_WITH pcl_gtest)
-
-PCL_ADD_TEST(geometry_mesh_indices test_mesh_indices
- FILES test_mesh_indices.cpp
- LINK_WITH pcl_gtest)
-
-add_definitions(-DPCL_TEST_GEOMETRY_BINARY_DIR="${CMAKE_CURRENT_BINARY_DIR}")
-PCL_ADD_TEST(geometry_mesh_io test_mesh_io
- FILES test_mesh_io.cpp
- LINK_WITH pcl_gtest)
-
-PCL_ADD_TEST(geometry_mesh test_mesh
- FILES test_mesh.cpp test_mesh_common_functions.h
- LINK_WITH pcl_gtest)
-
-PCL_ADD_TEST(geometry_polygon_mesh test_polygon_mesh
- FILES test_polygon_mesh.cpp
- LINK_WITH pcl_gtest)
-
-PCL_ADD_TEST(geometry_quad_mesh test_quad_mesh
- FILES test_quad_mesh.cpp
- LINK_WITH pcl_gtest)
-
-PCL_ADD_TEST(geometry_triangle_mesh test_triangle_mesh
- FILES test_triangle_mesh.cpp
- LINK_WITH pcl_gtest)
+set(SUBSYS_NAME tests_geometry)
+set(SUBSYS_DESC "Point cloud library geometry module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS geometry)
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ PCL_ADD_TEST(geometry_iterator test_iterator
+ FILES test_iterator.cpp
+ LINK_WITH pcl_gtest pcl_common)
+
+ PCL_ADD_TEST(geometry_mesh_circulators test_mesh_circulators
+ FILES test_mesh_circulators.cpp
+ LINK_WITH pcl_gtest)
+
+ PCL_ADD_TEST(geometry_mesh_conversion test_mesh_conversion
+ FILES test_mesh_conversion.cpp
+ LINK_WITH pcl_gtest pcl_common)
+
+ PCL_ADD_TEST(geometry_mesh_data test_mesh_data
+ FILES test_mesh_data.cpp
+ LINK_WITH pcl_gtest)
+
+ PCL_ADD_TEST(geometry_mesh_get_boundary test_mesh_get_boundary
+ FILES test_mesh_get_boundary.cpp
+ LINK_WITH pcl_gtest)
+
+ PCL_ADD_TEST(geometry_mesh_indices test_mesh_indices
+ FILES test_mesh_indices.cpp
+ LINK_WITH pcl_gtest)
+
+ add_definitions(-DPCL_TEST_GEOMETRY_BINARY_DIR="${CMAKE_CURRENT_BINARY_DIR}")
+ PCL_ADD_TEST(geometry_mesh_io test_mesh_io
+ FILES test_mesh_io.cpp
+ LINK_WITH pcl_gtest)
+
+ PCL_ADD_TEST(geometry_mesh test_mesh
+ FILES test_mesh.cpp test_mesh_common_functions.h
+ LINK_WITH pcl_gtest)
+
+ PCL_ADD_TEST(geometry_polygon_mesh test_polygon_mesh
+ FILES test_polygon_mesh.cpp
+ LINK_WITH pcl_gtest)
+
+ PCL_ADD_TEST(geometry_quad_mesh test_quad_mesh
+ FILES test_quad_mesh.cpp
+ LINK_WITH pcl_gtest)
+
+ PCL_ADD_TEST(geometry_triangle_mesh test_triangle_mesh
+ FILES test_triangle_mesh.cpp
+ LINK_WITH pcl_gtest)
+endif (build)
else
EXPECT_EQ (abs(dx) + abs(dy), idx);
- float length = sqrtf (float (dx * dx + dy * dy));
+ float length = std::sqrt (float (dx * dx + dy * dy));
float dir_x = float (dx) / length;
float dir_y = float (dy) / length;
-PCL_ADD_TEST(io_io test_io
- FILES test_io.cpp
- LINK_WITH pcl_gtest pcl_io)
+set(SUBSYS_NAME tests_io)
+set(SUBSYS_DESC "Point cloud library io module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS io)
+set(OPT_DEPS visualization)
-PCL_ADD_TEST(io_iterators test_iterators
- FILES test_iterators.cpp
- LINK_WITH pcl_gtest pcl_io)
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
-PCL_ADD_TEST(compression_range_coder test_range_coder
- FILES test_range_coder.cpp
- LINK_WITH pcl_gtest pcl_io)
+if (build)
+ PCL_ADD_TEST(io_io test_io
+ FILES test_io.cpp
+ LINK_WITH pcl_gtest pcl_io)
-PCL_ADD_TEST (io_grabbers test_grabbers
- FILES test_grabbers.cpp
- LINK_WITH pcl_gtest pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/grabber_sequences")
-# Uses VTK readers to verify
-if (VTK_FOUND AND NOT ANDROID)
- PCL_ADD_TEST (io_ply_mesh_io test_ply_mesh_io
- FILES test_ply_mesh_io.cpp
- LINK_WITH pcl_gtest pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/tum_rabbit.vtk")
-endif ()
+ PCL_ADD_TEST(io_iterators test_iterators
+ FILES test_iterators.cpp
+ LINK_WITH pcl_gtest pcl_io)
+
+ PCL_ADD_TEST(compression_range_coder test_range_coder
+ FILES test_range_coder.cpp
+ LINK_WITH pcl_gtest pcl_io)
-PCL_ADD_TEST(point_cloud_image_extractors test_point_cloud_image_extractors
- FILES test_point_cloud_image_extractors.cpp
- LINK_WITH pcl_gtest pcl_io)
+ PCL_ADD_TEST (io_grabbers test_grabbers
+ FILES test_grabbers.cpp
+ LINK_WITH pcl_gtest pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/grabber_sequences")
+ # Uses VTK readers to verify
+ if (VTK_FOUND AND NOT ANDROID)
+ include_directories(${VTK_INCLUDE_DIRS})
+ PCL_ADD_TEST (io_ply_mesh_io test_ply_mesh_io
+ FILES test_ply_mesh_io.cpp
+ LINK_WITH pcl_gtest pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/tum_rabbit.vtk")
+ endif ()
-PCL_ADD_TEST(buffers test_buffers
- FILES test_buffers.cpp
- LINK_WITH pcl_gtest)
+ PCL_ADD_TEST(point_cloud_image_extractors test_point_cloud_image_extractors
+ FILES test_point_cloud_image_extractors.cpp
+ LINK_WITH pcl_gtest pcl_io)
+ PCL_ADD_TEST(buffers test_buffers
+ FILES test_buffers.cpp
+ LINK_WITH pcl_gtest pcl_common)
+endif (build)
memcpy (d.data (), dptr, buffer.size () * sizeof (T));
buffer.push (d);
for (size_t j = 0; j < buffer.size (); ++j)
- if (isnan (eptr[j]))
- EXPECT_TRUE (isnan (buffer[j]));
+ if (pcl_isnan (eptr[j]))
+ EXPECT_TRUE (pcl_isnan (buffer[j]));
else
EXPECT_EQ (eptr[j], buffer[j]);
dptr += buffer.size ();
#include <pcl/point_types.h>
#include <pcl/common/io.h>
#include <pcl/console/print.h>
+#include <pcl/io/auto_io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/io/ascii_io.h>
TEST (PCL, PCDReaderWriterASCIIColorPrecision)
{
PointCloud<PointXYZRGB> cloud;
+ cloud.points.reserve (256 / 4 * 256 / 4 * 256 / 4 * 256 / 16);
for (size_t r_i = 0; r_i < 256; r_i += 5)
for (size_t g_i = 0; g_i < 256; g_i += 5)
for (size_t b_i = 0; b_i < 256; b_i += 5)
- {
- PointXYZRGB p;
- p.r = static_cast<unsigned char> (r_i);
- p.g = static_cast<unsigned char> (g_i);
- p.b = static_cast<unsigned char> (b_i);
- p.x = p.y = p.z = 0.f;
-
- cloud.push_back (p);
- }
+ for (size_t a_i = 0; a_i < 256; a_i += 10)
+ {
+ PointXYZRGB p;
+ p.r = static_cast<unsigned char> (r_i);
+ p.g = static_cast<unsigned char> (g_i);
+ p.b = static_cast<unsigned char> (b_i);
+ p.a = static_cast<unsigned char> (a_i);
+ p.x = p.y = p.z = 0.f;
+
+ cloud.push_back (p);
+ }
cloud.height = 1;
cloud.width = uint32_t (cloud.size ());
cloud.is_dense = true;
afile.close();
ASCIIReader reader;
- reader.setInputFields( pcl::PointXYZI() );
+ reader.setInputFields<pcl::PointXYZI> ();
EXPECT_GE(reader.read("test_pcd.txt", rcloud), 0);
EXPECT_EQ(cloud.points.size(), rcloud.points.size() );
#endif
}
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename T> class AutoIOTest : public testing::Test { };
+typedef ::testing::Types<BOOST_PP_SEQ_ENUM (PCL_XYZ_POINT_TYPES PCL_NORMAL_POINT_TYPES)> PCLXyzNormalPointTypes;
+TYPED_TEST_CASE (AutoIOTest, PCLXyzNormalPointTypes);
+TYPED_TEST (AutoIOTest, AutoLoadCloudFiles)
+{
+ PointCloud<TypeParam> cloud;
+ PointCloud<TypeParam> cloud_pcd;
+ PointCloud<TypeParam> cloud_ply;
+ PointCloud<TypeParam> cloud_ifs;
+
+ cloud.width = 10;
+ cloud.height = 5;
+ cloud.resize (cloud.width * cloud.height);
+ cloud.is_dense = true;
+
+ save ("test_autoio.pcd", cloud);
+ save ("test_autoio.ply", cloud);
+ save ("test_autoio.ifs", cloud);
+
+ load ("test_autoio.pcd", cloud_pcd);
+ EXPECT_EQ (cloud_pcd.width * cloud_pcd.height, cloud.width * cloud.height);
+ EXPECT_EQ (cloud_pcd.is_dense, cloud.is_dense);
+
+ load ("test_autoio.ply", cloud_ply);
+ EXPECT_EQ (cloud_ply.width * cloud_ply.height, cloud.width * cloud.height);
+ EXPECT_EQ (cloud_ply.is_dense, cloud.is_dense);
+
+ load ("test_autoio.ifs", cloud_ifs);
+ EXPECT_EQ (cloud_ifs.width * cloud_ifs.height, cloud.width * cloud.height);
+ EXPECT_EQ (cloud_ifs.is_dense, cloud.is_dense);
+
+ remove ("test_autoio.pcd");
+ remove ("test_autoio.ply");
+ remove ("test_autoio.ifs");
+}
+
/* ---[ */
int
main (int argc, char** argv)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2014-, Centrum Wiskunde Informatica.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <gtest/gtest.h>
+#include <pcl/point_cloud.h>
+#include <pcl/point_types.h>
+#include <pcl/octree/octree.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/compression/octree_pointcloud_compression.h>
+#include <pcl/compression/compression_profiles.h>
+
+#include <string>
+#include <exception>
+
+using namespace std;
+
+int total_runs = 0;
+
+char* pcd_file;
+
+#define MAX_POINTS 10000.0
+#define MAX_XYZ 1024.0
+#define MAX_COLOR 255
+#define NUMBER_OF_TEST_RUNS 2
+
+TEST (PCL, OctreeDeCompressionRandomPointXYZRGBA)
+{
+ srand(static_cast<unsigned int> (time(NULL)));
+
+ // iterate over all pre-defined compression profiles
+ for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
+ compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile) {
+ // instantiate point cloud compression encoder/decoder
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA>* pointcloud_encoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA>((pcl::io::compression_Profiles_e) compression_profile, false);
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA>* pointcloud_decoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZRGBA>();
+ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZRGBA>());
+ // iterate over runs
+ for (int test_idx = 0; test_idx < NUMBER_OF_TEST_RUNS; test_idx++, total_runs++)
+ {
+ try
+ {
+ int point_count = MAX_POINTS * rand() / RAND_MAX;
+ if (point_count < 1)
+ { // empty point cloud hangs decoder
+ total_runs--;
+ continue;
+ }
+ // create shared pointcloud instances
+ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGBA>());
+ // assign input point clouds to octree
+ // create random point cloud
+ for (int point = 0; point < point_count; point++)
+ {
+ // gereate a random point
+ pcl::PointXYZRGBA new_point;
+ new_point.x = static_cast<float> (MAX_XYZ * rand() / RAND_MAX);
+ new_point.y = static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ new_point.z = static_cast<float> (MAX_XYZ * rand() / RAND_MAX);
+ new_point.r = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
+ new_point.g = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
+ new_point.b = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
+ new_point.a = static_cast<int> (MAX_COLOR * rand() / RAND_MAX);
+ // OctreePointCloudPointVector can store all points..
+ cloud->push_back(new_point);
+ }
+
+// std::cout << "Run: " << total_runs << " compression profile:" << compression_profile << " point_count: " << point_count;
+ std::stringstream compressed_data;
+ pointcloud_encoder->encodePointCloud(cloud, compressed_data);
+ pointcloud_decoder->decodePointCloud(compressed_data, cloud_out);
+ EXPECT_GT((int)cloud_out->width, 0) << "decoded PointCloud width <= 0";
+ EXPECT_GT((int)cloud_out->height, 0) << " decoded PointCloud height <= 0 ";
+ }
+ catch (std::exception& e)
+ {
+ std::cout << e.what() << std::endl;
+ }
+ } // runs
+ } // compression profiles
+} // TEST
+
+TEST (PCL, OctreeDeCompressionRandomPointXYZ)
+{
+ srand(static_cast<unsigned int> (time(NULL)));
+
+ // iterate over all pre-defined compression profiles
+ for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
+ compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile)
+ {
+ // instantiate point cloud compression encoder/decoder
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZ>* pointcloud_encoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZ>((pcl::io::compression_Profiles_e) compression_profile, false);
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZ>* pointcloud_decoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZ>();
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZ>());
+ // loop over runs
+ for (int test_idx = 0; test_idx < NUMBER_OF_TEST_RUNS; test_idx++, total_runs++)
+ {
+ int point_count = MAX_POINTS * rand() / RAND_MAX;
+ // create shared pointcloud instances
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
+ // assign input point clouds to octree
+ // create random point cloud
+ for (int point = 0; point < point_count; point++)
+ {
+ // generate a random point
+ pcl::PointXYZ new_point(static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ static_cast<float> (MAX_XYZ * rand() / RAND_MAX),
+ static_cast<float> (MAX_XYZ * rand() / RAND_MAX));
+ cloud->push_back(new_point);
+ }
+// std::cout << "Run: " << total_runs << " compression profile:" << compression_profile << " point_count: " << point_count;
+ std::stringstream compressed_data;
+ try
+ { // decodePointCloud() throws exceptions on errors
+ pointcloud_encoder->encodePointCloud(cloud, compressed_data);
+ pointcloud_decoder->decodePointCloud(compressed_data, cloud_out);
+ EXPECT_GT((int)cloud_out->width, 0) << "decoded PointCloud width <= 0";
+ EXPECT_GT((int)cloud_out->height, 0) << " decoded PointCloud height <= 0 ";
+ }
+ catch (std::exception& e)
+ {
+ std::cout << e.what() << std::endl;
+ }
+ } // runs
+ } // compression profiles
+} // TEST
+
+TEST(PCL, OctreeDeCompressionFile)
+{
+ pcl::PointCloud<pcl::PointXYZRGB>::Ptr input_cloud_ptr (new pcl::PointCloud<pcl::PointXYZRGB>);
+
+ // load point cloud from file, when present
+ if (pcd_file == NULL) return;
+ int rv = pcl::io::loadPCDFile(pcd_file, *input_cloud_ptr);
+ float voxel_sizes[] = { 0.1, 0.01 };
+
+ EXPECT_EQ(rv, 0) << " loadPCDFile " << pcd_file;
+ EXPECT_GT((int) input_cloud_ptr->width , 0) << "invalid point cloud width from " << pcd_file;
+ EXPECT_GT((int) input_cloud_ptr->height, 0) << "invalid point cloud heigth from " << pcd_file;
+
+ // iterate over compression profiles
+ for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
+ compression_profile != pcl::io::COMPRESSION_PROFILE_COUNT; ++compression_profile) {
+ // instantiate point cloud compression encoder/decoder
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>* PointCloudEncoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>((pcl::io::compression_Profiles_e) compression_profile, false);
+ pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>* PointCloudDecoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>();
+
+ // iterate over various voxel sizes
+ for (int i = 0; i < sizeof(voxel_sizes)/sizeof(voxel_sizes[0]); i++) {
+ pcl::octree::OctreePointCloud<pcl::PointXYZRGB> octree(voxel_sizes[i]);
+ pcl::PointCloud<pcl::PointXYZRGB>::Ptr octree_out(new pcl::PointCloud<pcl::PointXYZRGB>());
+ octree.setInputCloud((*input_cloud_ptr).makeShared());
+ octree.addPointsFromInputCloud();
+
+ std::stringstream compressedData;
+ PointCloudEncoder->encodePointCloud(octree.getInputCloud(), compressedData);
+ PointCloudDecoder->decodePointCloud(compressedData, octree_out);
+ EXPECT_GT((int)octree_out->width, 0) << "decoded PointCloud width <= 0";
+ EXPECT_GT((int)octree_out->height, 0) << " decoded PointCloud height <= 0 ";
+ total_runs++;
+ }
+ delete PointCloudDecoder;
+ delete PointCloudEncoder;
+ }
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ if (argc > 1) {
+ pcd_file = argv[1];
+ }
+ return (RUN_ALL_TESTS ());
+ std::cerr << "Finished " << total_runs << " runs." << std::endl;
+}
+/* ]--- */
-PCL_ADD_TEST (kdtree_kdtree test_kdtree
- FILES test_kdtree.cpp
- LINK_WITH pcl_gtest pcl_kdtree pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/sac_plane_test.pcd" "${PCL_SOURCE_DIR}/test/kdtree/kdtree_unit_test_results.xml")
+set(SUBSYS_NAME tests_kdtree)
+set(SUBSYS_DESC "Point cloud library kdtree module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS kdtree)
+set(OPT_DEPS io) # io is not a mandatory dependency in kdtree
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ if (BUILD_io)
+ PCL_ADD_TEST (kdtree_kdtree test_kdtree
+ FILES test_kdtree.cpp
+ LINK_WITH pcl_gtest pcl_kdtree pcl_io pcl_common
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/sac_plane_test.pcd" "${PCL_SOURCE_DIR}/test/kdtree/kdtree_unit_test_results.xml")
+ endif (BUILD_io)
+endif (build)
-PCL_ADD_TEST(keypoints_general test_keypoints
- FILES test_keypoints.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_filters pcl_keypoints
- ARGUMENTS "${PCL_SOURCE_DIR}/test/cturtle.pcd")
+set(SUBSYS_NAME tests_keypoints)
+set(SUBSYS_DESC "Point cloud library keypoints module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS keypoints)
+set(OPT_DEPS io) # module does not depend on these
-PCL_ADD_TEST(keypoints_iss_3d test_iss_3d
- FILES test_iss_3d.cpp
- LINK_WITH pcl_gtest pcl_keypoints pcl_io
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ if (BUILD_io)
+ PCL_ADD_TEST(keypoints_general test_keypoints
+ FILES test_keypoints.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_filters pcl_keypoints
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/cturtle.pcd")
+
+ PCL_ADD_TEST(keypoints_iss_3d test_iss_3d
+ FILES test_iss_3d.cpp
+ LINK_WITH pcl_gtest pcl_keypoints pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ endif (BUILD_io)
+endif (build)
-PCL_ADD_TEST(a_octree_test test_octree
- FILES test_octree.cpp
- LINK_WITH pcl_gtest pcl_common)
+set(SUBSYS_NAME tests_octree)
+set(SUBSYS_DESC "Point cloud library octree module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS octree)
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ PCL_ADD_TEST(a_octree_test test_octree
+ FILES test_octree.cpp
+ LINK_WITH pcl_gtest pcl_common)
+endif (build)
/*
* Software License Agreement (BSD License)
*
+ * Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010, Willow Garage, Inc.
+ * Copyright (c) 2017-, Open Perception, Inc.
+ *
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
// retrieve data from leaf node voxel
int* voxel_container = octreeA.createLeaf(voxels[i].x, voxels[i].y, voxels[i].z);
// check if retrieved data is identical to data[]
- ASSERT_EQ(*voxel_container, data[i]);
+ ASSERT_EQ (data[i], *voxel_container);
}
for (i = 128; i < 256; i++)
{
// check if leaf node exists in tree
- ASSERT_EQ( octreeA.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), true);
+ ASSERT_TRUE (octreeA.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
// remove leaf node
octreeA.removeLeaf (voxels[i].x, voxels[i].y, voxels[i].z);
// leaf node shouldn't exist in tree anymore
- ASSERT_EQ( octreeA.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), false);
+ ASSERT_FALSE (octreeA.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
// test serialization
for (i = 0; i < 128; i++)
{
// check if leafs exist in both octrees
- ASSERT_EQ( octreeA.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), true);
- ASSERT_EQ( octreeB.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), true);
+ ASSERT_TRUE (octreeA.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
+ ASSERT_TRUE (octreeB.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
for (i = 128; i < 256; i++)
{
// these leafs were not copies and should not exist
- ASSERT_EQ( octreeB.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), false);
+ ASSERT_FALSE (octreeB.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
// testing deleteTree();
octreeB.deleteTree ();
// octreeB.getLeafCount() should be zero now;
- ASSERT_EQ (0u, octreeB.getLeafCount());
+ ASSERT_EQ (0u, octreeB.getLeafCount ());
// .. and previous leafs deleted..
for (i = 0; i < 128; i++)
{
- ASSERT_EQ(octreeB.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), false);
+ ASSERT_FALSE (octreeB.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
// test tree serialization
octreeA.serializeTree (treeBinaryA, leafVectorA);
// make sure, we retrieved all data objects
- ASSERT_EQ(leafVectorA.size(), octreeA.getLeafCount());
+ ASSERT_EQ (octreeA.getLeafCount (), leafVectorA.size ());
// check if leaf data is found in octree input data
bool bFound;
break;
}
- ASSERT_EQ(bFound, true);
+ ASSERT_TRUE (bFound);
}
// test tree serialization
break;
}
- ASSERT_EQ(bFound, true);
+ ASSERT_TRUE (bFound);
}
// test tree serialization with leaf data vectors
octreeB.deserializeTree (treeBinaryA, leafVectorA);
// test size and leaf count of reconstructed octree
- ASSERT_EQ(octreeA.getLeafCount(), octreeB.getLeafCount());
- ASSERT_EQ(128u, octreeB.getLeafCount());
+ ASSERT_EQ (octreeA.getLeafCount (), octreeB.getLeafCount ());
+ ASSERT_EQ (128u, octreeB.getLeafCount ());
octreeB.serializeTree (treeBinaryB, leafVectorB);
// compare octree data content of octree A and octree B
- ASSERT_EQ(leafVectorB.size(), octreeB.getLeafCount());
- ASSERT_EQ(leafVectorA.size(), leafVectorB.size());
+ ASSERT_EQ (leafVectorB.size (), octreeB.getLeafCount ());
+ ASSERT_EQ (leafVectorA.size (), leafVectorB.size ());
for (i = 0; i < leafVectorB.size (); i++)
{
- ASSERT_EQ( (*leafVectorA[i] == *leafVectorB[i]), true);
+ ASSERT_EQ (*leafVectorA[i], *leafVectorB[i]);
}
// test iterator
{
// depth should always be less than tree depth
unsigned int depth = a_it.getCurrentOctreeDepth ();
- ASSERT_LE(depth, octreeA.getTreeDepth());
+ ASSERT_LE (depth, octreeA.getTreeDepth ());
// store node, branch and leaf count
const OctreeNode* node = a_it.getCurrentOctreeNode ();
}
// compare node, branch and leaf count against actual tree values
- ASSERT_EQ(node_count, branch_count + leaf_count);
- ASSERT_EQ(leaf_count, octreeA.getLeafCount ());
-
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (octreeA.getLeafCount (), leaf_count);
}
TEST (PCL, Octree_Dynamic_Depth_Test)
{
++leaf_node_counter;
unsigned int depth = it2.getCurrentOctreeDepth ();
- ASSERT_EQ((depth==1)||(depth==6), true);
+ ASSERT_TRUE ((depth == 1) || (depth == 6));
}
// clean up
OctreeNode* node = it.getCurrentOctreeNode ();
- ASSERT_EQ(node->getNodeType(), LEAF_NODE);
+ ASSERT_EQ (LEAF_NODE, node->getNodeType());
OctreeContainerPointIndices& container = it.getLeafContainer();
if (it.getCurrentOctreeDepth () < octree.getTreeDepth ())
- ASSERT_LE(container.getSize(), leafAggSize);
+ ASSERT_LE (container.getSize (), leafAggSize);
// add points from leaf node to indexVector
container.getPointIndices (indexVector);
for (i=0; i<tmpVector.size(); ++i)
{
- ASSERT_GE(cloud->points[tmpVector[i]].x, min_pt(0));
- ASSERT_GE(cloud->points[tmpVector[i]].y, min_pt(1));
- ASSERT_GE(cloud->points[tmpVector[i]].z, min_pt(2));
- ASSERT_LE(cloud->points[tmpVector[i]].x, max_pt(0));
- ASSERT_LE(cloud->points[tmpVector[i]].y, max_pt(1));
- ASSERT_LE(cloud->points[tmpVector[i]].z, max_pt(2));
+ ASSERT_GE (cloud->points[tmpVector[i]].x, min_pt(0));
+ ASSERT_GE (cloud->points[tmpVector[i]].y, min_pt(1));
+ ASSERT_GE (cloud->points[tmpVector[i]].z, min_pt(2));
+ ASSERT_LE (cloud->points[tmpVector[i]].x, max_pt(0));
+ ASSERT_LE (cloud->points[tmpVector[i]].y, max_pt(1));
+ ASSERT_LE (cloud->points[tmpVector[i]].z, max_pt(2));
}
leaf_count++;
}
- ASSERT_EQ(pointcount, indexVector.size());
+
+ ASSERT_EQ (indexVector.size(), pointcount);
// make sure all indices are within indexVector
for (i = 0; i < indexVector.size (); ++i)
{
#if !defined(__APPLE__)
bool indexFound = (std::find(indexVector.begin(), indexVector.end(), i) != indexVector.end());
- ASSERT_EQ(indexFound, true);
+ ASSERT_TRUE (indexFound);
#endif
}
// compare node, branch and leaf count against actual tree values
- ASSERT_EQ(leaf_count, octree.getLeafCount ());
-
+ ASSERT_EQ (octree.getLeafCount (), leaf_count);
}
}
}
- ASSERT_EQ(static_cast<unsigned int> (256), octreeA.getLeafCount());
+ ASSERT_EQ (static_cast<unsigned int> (256), octreeA.getLeafCount ());
for (i = 0; i < 128; i++)
{
// retrieve and check data from leaf voxel
int* voxel_container = octreeA.findLeaf(voxels[i].x, voxels[i].y, voxels[i].z);
- ASSERT_EQ(*voxel_container, data[i]);
+ ASSERT_EQ (data[i], *voxel_container);
}
for (i = 128; i < 256; i++)
{
// check if leaf node exists in tree
- ASSERT_EQ( octreeA.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), true);
+ ASSERT_TRUE (octreeA.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
// remove leaf node
octreeA.removeLeaf (voxels[i].x, voxels[i].y, voxels[i].z);
// leaf node shouldn't exist in tree anymore
- ASSERT_EQ( octreeA.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), false);
+ ASSERT_FALSE (octreeA.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
////////////
// check if leafs exist in octrees
for (i = 0; i < 128; i++)
{
- ASSERT_EQ( octreeB.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), true);
+ ASSERT_TRUE (octreeB.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
// these leafs should not exist..
for (i = 128; i < 256; i++)
{
- ASSERT_EQ( octreeB.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), false);
+ ASSERT_FALSE (octreeB.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
// checking deleteTree();
octreeB.setTreeDepth (8);
// octreeB.getLeafCount() should be zero now;
- ASSERT_EQ(static_cast<unsigned int> (0), octreeB.getLeafCount());
+ ASSERT_EQ (static_cast<unsigned int> (0), octreeB.getLeafCount ());
for (i = 0; i < 128; i++)
{
- ASSERT_EQ( octreeB.existLeaf(voxels[i].x,voxels[i].y,voxels[i].z), false);
+ ASSERT_FALSE (octreeB.existLeaf (voxels[i].x, voxels[i].y, voxels[i].z));
}
// test tree serialization
octreeA.serializeTree (treeBinaryA, leafVectorA);
// make sure, we retrieved all data objects
- ASSERT_EQ(leafVectorA.size(), octreeA.getLeafCount());
+ ASSERT_EQ (octreeA.getLeafCount (), leafVectorA.size ());
// check if leaf data is found in octree input data
bool bFound;
break;
}
- ASSERT_EQ(bFound, true);
+ ASSERT_TRUE (bFound);
}
// test tree serialization
break;
}
- ASSERT_EQ(bFound, true);
+ ASSERT_TRUE (bFound);
}
// test tree serialization with leaf data vectors
octreeA.serializeTree (treeBinaryA, leafVectorA);
octreeB.deserializeTree (treeBinaryA, leafVectorA);
- ASSERT_EQ(octreeA.getLeafCount(), octreeB.getLeafCount());
- ASSERT_EQ(static_cast<unsigned int> (128), octreeB.getLeafCount());
+ ASSERT_EQ (octreeA.getLeafCount (), octreeB.getLeafCount ());
+ ASSERT_EQ (static_cast<unsigned int> (128), octreeB.getLeafCount ());
octreeB.serializeTree (treeBinaryB, leafVectorB);
// test size and leaf count of reconstructed octree
- ASSERT_EQ(leafVectorB.size(), octreeB.getLeafCount());
- ASSERT_EQ(leafVectorA.size(), leafVectorB.size());
+ ASSERT_EQ (leafVectorB.size (), octreeB.getLeafCount ());
+ ASSERT_EQ (leafVectorA.size (), leafVectorB.size ());
for (i = 0; i < leafVectorB.size (); i++)
{
- ASSERT_EQ( (*leafVectorA[i] == *leafVectorB[i]), true);
+ ASSERT_EQ (*leafVectorA[i], *leafVectorB[i]);
}
-
}
-
TEST (PCL, Octree2Buf_Base_Double_Buffering_Test)
{
octreeB.serializeTree (treeBinaryB, leafVectorB, true);
// check leaf count of rebuilt octree
- ASSERT_EQ(octreeA.getLeafCount(), octreeB.getLeafCount());
- ASSERT_EQ(leafVectorB.size(), octreeB.getLeafCount());
- ASSERT_EQ(leafVectorA.size(), leafVectorB.size());
+ ASSERT_EQ (octreeA.getLeafCount (), octreeB.getLeafCount ());
+ ASSERT_EQ (leafVectorB.size (), octreeB.getLeafCount ());
+ ASSERT_EQ (leafVectorA.size (), leafVectorB.size ());
// check if octree octree structure is consistent.
for (i = 0; i < leafVectorB.size (); i++)
{
- ASSERT_EQ( (*leafVectorA[i] == *leafVectorB[i]), true);
+ ASSERT_EQ (*leafVectorA[i], *leafVectorB[i]);
}
}
octreeB.serializeTree (treeBinaryB, leafVectorB, true);
// check leaf count of rebuilt octree
- ASSERT_EQ(octreeA.getLeafCount(), octreeB.getLeafCount());
- ASSERT_EQ(leafVectorB.size(), octreeB.getLeafCount());
- ASSERT_EQ(leafVectorA.size(), leafVectorB.size());
- ASSERT_EQ(treeBinaryA.size(), octreeB.getBranchCount());
- ASSERT_EQ(treeBinaryA.size(), treeBinaryB.size());
+ ASSERT_EQ (octreeA.getLeafCount (), octreeB.getLeafCount ());
+ ASSERT_EQ (leafVectorB.size (), octreeB.getLeafCount ());
+ ASSERT_EQ (leafVectorA.size (), leafVectorB.size ());
+ ASSERT_EQ (treeBinaryA.size (), octreeB.getBranchCount ());
+ ASSERT_EQ (treeBinaryA.size (), treeBinaryB.size ());
// check if octree octree structure is consistent.
for (i = 0; i < leafVectorB.size (); i++)
{
- ASSERT_EQ( (*leafVectorA[i] == *leafVectorB[i]), true);
+ ASSERT_EQ (*leafVectorA[i], *leafVectorB[i]);
}
// switch buffers
octreeA.switchBuffers ();
octreeB.switchBuffers ();
-
}
-
}
TEST (PCL, Octree_Pointcloud_Test)
{
-
size_t i;
int test_runs = 100;
int pointcount = 300;
for (point = 0; point < pointcount; point++)
{
// gereate a random point
- PointXYZ newPoint (static_cast<float> (1024.0 * rand () / RAND_MAX),
- static_cast<float> (1024.0 * rand () / RAND_MAX),
+ PointXYZ newPoint (static_cast<float> (1024.0 * rand () / RAND_MAX),
+ static_cast<float> (1024.0 * rand () / RAND_MAX),
static_cast<float> (1024.0 * rand () / RAND_MAX));
if (!octreeA.isVoxelOccupiedAtPoint (newPoint))
cloudB->push_back (newPoint);
}
- ASSERT_EQ(octreeA.getLeafCount(), cloudA->points.size());
+ ASSERT_EQ (cloudA->points.size (), octreeA.getLeafCount ());
// checks for getVoxelDataAtPoint() and isVoxelOccupiedAtPoint() functionality
for (i = 0; i < cloudA->points.size (); i++)
{
- ASSERT_EQ(octreeA.isVoxelOccupiedAtPoint(cloudA->points[i]), true);
+ ASSERT_TRUE (octreeA.isVoxelOccupiedAtPoint (cloudA->points[i]));
octreeA.deleteVoxelAtPoint (cloudA->points[i]);
- ASSERT_EQ(octreeA.isVoxelOccupiedAtPoint(cloudA->points[i]), false);
+ ASSERT_FALSE (octreeA.isVoxelOccupiedAtPoint (cloudA->points[i]));
}
- ASSERT_EQ(octreeA.getLeafCount(), static_cast<unsigned int> (0));
+ ASSERT_EQ (static_cast<unsigned int> (0), octreeA.getLeafCount ());
// check if all points from leaf data can be found in input pointcloud data sets
octreeB.defineBoundingBox ();
{
// depth should always be less than tree depth
unsigned int depth = b_it.getCurrentOctreeDepth ();
- ASSERT_LE(depth, octreeB.getTreeDepth());
+ ASSERT_LE (depth, octreeB.getTreeDepth ());
Eigen::Vector3f voxel_min, voxel_max;
octreeB.getVoxelBounds (b_it, voxel_min, voxel_max);
- ASSERT_GE(voxel_min.x (), minx - 1e-4);
- ASSERT_GE(voxel_min.y (), miny - 1e-4);
- ASSERT_GE(voxel_min.z (), minz - 1e-4);
+ ASSERT_GE (voxel_min.x (), minx - 1e-4);
+ ASSERT_GE (voxel_min.y (), miny - 1e-4);
+ ASSERT_GE (voxel_min.z (), minz - 1e-4);
- ASSERT_LE(voxel_max.x (), maxx + 1e-4);
- ASSERT_LE(voxel_max.y (), maxy + 1e-4);
- ASSERT_LE(voxel_max.z (), maxz + 1e-4);
+ ASSERT_LE (voxel_max.x (), maxx + 1e-4);
+ ASSERT_LE (voxel_max.y (), maxy + 1e-4);
+ ASSERT_LE (voxel_max.z (), maxz + 1e-4);
// store node, branch and leaf count
const OctreeNode* node = b_it.getCurrentOctreeNode ();
}
// compare node, branch and leaf count against actual tree values
- ASSERT_EQ(node_count, branch_count + leaf_count);
- ASSERT_EQ(leaf_count, octreeB.getLeafCount ());
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (octreeB.getLeafCount (), leaf_count);
for (i = 0; i < cloudB->points.size (); i++)
{
-
std::vector<int> pointIdxVec;
octreeB.voxelSearch (cloudB->points[i], pointIdxVec);
++current;
}
- ASSERT_EQ(bIdxFound, true);
-
+ ASSERT_TRUE (bIdxFound);
}
-
}
-
}
TEST (PCL, Octree_Pointcloud_Density_Test)
{
-
// instantiate point cloud and fill it with point data
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
for (float z = 1.5f; z < 3.5f; z += 1.0f)
for (float y = 1.5f; y < 3.5f; y += 1.0f)
for (float x = 1.5f; x < 3.5f; x += 1.0f)
- ASSERT_EQ(octreeA.getVoxelDensityAtPoint (PointXYZ(x, y, z)), 1000u);
+ ASSERT_EQ (1000u, octreeA.getVoxelDensityAtPoint (PointXYZ(x, y, z)));
for (float z = 0.05f; z < 5.0f; z += 0.1f)
for (float y = 0.05f; y < 5.0f; y += 0.1f)
for (float x = 0.05f; x < 5.0f; x += 0.1f)
- ASSERT_EQ(octreeB.getVoxelDensityAtPoint (PointXYZ(x, y, z)), 1u);
+ ASSERT_EQ (1u, octreeB.getVoxelDensityAtPoint (PointXYZ (x, y, z)));
}
TEST (PCL, Octree_Pointcloud_Iterator_Test)
leafNodeCounter++;
}
- ASSERT_EQ(indexVector.size(), cloudIn->points.size ());
- ASSERT_EQ(leafNodeCounter, octreeA.getLeafCount());
+ ASSERT_EQ (cloudIn->points.size (), indexVector.size());
+ ASSERT_EQ (octreeA.getLeafCount (), leafNodeCounter);
OctreePointCloud<PointXYZ>::Iterator it2;
OctreePointCloud<PointXYZ>::Iterator it2_end = octreeA.end();
traversCounter++;
}
- ASSERT_EQ(octreeA.getLeafCount() + octreeA.getBranchCount(), traversCounter);
+ ASSERT_EQ (octreeA.getLeafCount () + octreeA.getBranchCount (), traversCounter);
// breadth-first iterator test
for (bfIt = octreeA.breadth_begin(); bfIt != bfIt_end; ++bfIt)
{
// tree depth of visited nodes must grow
- ASSERT_EQ( bfIt.getCurrentOctreeDepth()>=lastDepth, true);
+ ASSERT_TRUE (bfIt.getCurrentOctreeDepth () >= lastDepth);
lastDepth = bfIt.getCurrentOctreeDepth ();
if (bfIt.isBranchNode ())
{
branchNodeCount++;
// leaf nodes are traversed in the end
- ASSERT_EQ( leafNodeVisited, false);
+ ASSERT_FALSE (leafNodeVisited);
}
if (bfIt.isLeafNode ())
}
// check if every branch node and every leaf node has been visited
- ASSERT_EQ( leafNodeCount, octreeA.getLeafCount());
- ASSERT_EQ( branchNodeCount, octreeA.getBranchCount());
-
+ ASSERT_EQ (octreeA.getLeafCount (), leafNodeCount);
+ ASSERT_EQ (octreeA.getBranchCount (), branchNodeCount);
}
TEST(PCL, Octree_Pointcloud_Occupancy_Test)
// check occupancy of voxels
for (i = 0; i < 1000; i++)
{
- ASSERT_EQ(octree.isVoxelOccupiedAtPoint(cloudIn->points[i]), true);
+ ASSERT_TRUE (octree.isVoxelOccupiedAtPoint (cloudIn->points[i]));
octree.deleteVoxelAtPoint (cloudIn->points[i]);
- ASSERT_EQ(octree.isVoxelOccupiedAtPoint(cloudIn->points[i]), false);
+ ASSERT_FALSE (octree.isVoxelOccupiedAtPoint (cloudIn->points[i]));
}
-
}
-
}
TEST (PCL, Octree_Pointcloud_Change_Detector_Test)
for (i = 0; i < 1000; i++)
{
octree.addPointToCloud (
- PointXYZ (static_cast<float> (100.0 + 5.0 * rand () / RAND_MAX),
+ PointXYZ (static_cast<float> (100.0 + 5.0 * rand () / RAND_MAX),
static_cast<float> (100.0 + 10.0 * rand () / RAND_MAX),
static_cast<float> (100.0 + 10.0 * rand () / RAND_MAX)),
cloudIn);
octree.getPointIndicesFromNewVoxels (newPointIdxVector);
// should be 1000
- ASSERT_EQ( newPointIdxVector.size(), static_cast<std::size_t> (1000));
+ ASSERT_EQ (static_cast<std::size_t> (1000), newPointIdxVector.size ());
// all point indices found should have an index of >= 1000
for (i = 0; i < 1000; i++)
{
- ASSERT_EQ( ( newPointIdxVector [i] >= 1000 ), true);
+ ASSERT_TRUE (newPointIdxVector[i] >= 1000);
}
}
octree.getVoxelCentroids (voxelCentroids);
// we expect 10 voxel centroids
- ASSERT_EQ (voxelCentroids.size(), static_cast<std::size_t> (10));
+ ASSERT_EQ (static_cast<std::size_t> (10), voxelCentroids.size());
// check centroid calculation
for (i = 0; i < 10; i++)
{
- EXPECT_NEAR(voxelCentroids[i].x, static_cast<float> (i)+0.4, 1e-4);
- EXPECT_NEAR(voxelCentroids[i].y, static_cast<float> (i)+0.4, 1e-4);
- EXPECT_NEAR(voxelCentroids[i].z, static_cast<float> (i)+0.4, 1e-4);
+ EXPECT_NEAR (voxelCentroids[i].x, static_cast<float> (i) + 0.4, 1e-4);
+ EXPECT_NEAR (voxelCentroids[i].y, static_cast<float> (i) + 0.4, 1e-4);
+ EXPECT_NEAR (voxelCentroids[i].z, static_cast<float> (i) + 0.4, 1e-4);
}
// ADDING AN ADDITIONAL POINT CLOUD TO OctreePointCloudVoxelCentroid
// check centroid calculation
for (i = 0; i < 10; i++)
{
- EXPECT_NEAR(voxelCentroids[i].x, static_cast<float> (i)+0.4, 1e-4);
- EXPECT_NEAR(voxelCentroids[i].y, static_cast<float> (i)+0.4, 1e-4);
- EXPECT_NEAR(voxelCentroids[i].z, static_cast<float> (i)+0.4, 1e-4);
+ EXPECT_NEAR (voxelCentroids[i].x, static_cast<float> (i) + 0.4, 1e-4);
+ EXPECT_NEAR (voxelCentroids[i].y, static_cast<float> (i) + 0.4, 1e-4);
+ EXPECT_NEAR (voxelCentroids[i].z, static_cast<float> (i) + 0.4, 1e-4);
}
}
{
// define a random search point
- PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
+ PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
pointCandidates.pop ();
}
-
+
// octree nearest neighbor search
octree.deleteTree ();
octree.addPointsFromInputCloud ();
octree.nearestKSearch (searchPoint, static_cast<int> (K), k_indices, k_sqr_distances);
- ASSERT_EQ( k_indices.size(), k_indices_bruteforce.size());
+ ASSERT_EQ (k_indices_bruteforce.size (), k_indices.size());
// compare nearest neighbor results of octree with bruteforce search
i = 0;
while (k_indices_bruteforce.size ())
{
- ASSERT_EQ( k_indices.back(), k_indices_bruteforce.back());
- EXPECT_NEAR(k_sqr_distances.back(), k_sqr_distances_bruteforce.back(), 1e-4);
+ ASSERT_EQ (k_indices_bruteforce.back(), k_indices.back ());
+ EXPECT_NEAR (k_sqr_distances_bruteforce.back (), k_sqr_distances.back (), 1e-4);
k_indices_bruteforce.pop_back ();
k_indices.pop_back ();
k_sqr_distances_bruteforce.pop_back ();
k_sqr_distances.pop_back ();
-
}
-
}
-
}
TEST (PCL, Octree_Pointcloud_Box_Search)
idxInResults = true;
}
- ASSERT_EQ(idxInResults, inBox);
-
+ ASSERT_EQ (idxInResults, inBox);
}
-
}
}
{
// define a random search point
- PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
+ PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
if (BFindex == ANNindex)
{
- EXPECT_NEAR(ANNdistance, BFdistance, 1e-4);
+ EXPECT_NEAR (ANNdistance, BFdistance, 1e-4);
bestMatchCount++;
}
}
// we should have found the absolute nearest neighbor at least once
- ASSERT_EQ( (bestMatchCount > 0), true);
-
+ ASSERT_TRUE (bestMatchCount > 0);
}
TEST (PCL, Octree_Pointcloud_Neighbours_Within_Radius_Search)
for (test_id = 0; test_id < test_runs; test_id++)
{
// define a random search point
- PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
+ PointXYZ searchPoint (static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
// execute octree radius search
octree.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius);
- ASSERT_EQ( cloudNWRRadius.size(), cloudSearchBruteforce.size());
+ ASSERT_EQ (cloudSearchBruteforce.size (), cloudNWRRadius.size ());
// check if result from octree radius search can be also found in bruteforce search
std::vector<int>::const_iterator current = cloudNWRSearch.begin ();
+ (cloudIn->points[*current].y - searchPoint.y) * (cloudIn->points[*current].y - searchPoint.y)
+ (cloudIn->points[*current].z - searchPoint.z) * (cloudIn->points[*current].z - searchPoint.z));
- ASSERT_EQ( (pointDist<=searchRadius), true);
+ ASSERT_TRUE (pointDist <= searchRadius);
++current;
}
// check if result limitation works
octree.radiusSearch (searchPoint, searchRadius, cloudNWRSearch, cloudNWRRadius, 5);
- ASSERT_EQ( cloudNWRRadius.size() <= 5, true);
+ ASSERT_TRUE (cloudNWRRadius.size () <= 5);
}
TEST (PCL, Octree_Pointcloud_Ray_Traversal)
{
-
const unsigned int test_runs = 100;
unsigned int test_id;
cloudIn->height = 1;
cloudIn->points.resize (cloudIn->width * cloudIn->height);
- Eigen::Vector3f p (static_cast<float> (10.0 * rand () / RAND_MAX),
+ Eigen::Vector3f p (static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
// origin
- Eigen::Vector3f o (static_cast<float> (12.0 * rand () / RAND_MAX),
+ Eigen::Vector3f o (static_cast<float> (12.0 * rand () / RAND_MAX),
static_cast<float> (12.0 * rand () / RAND_MAX),
static_cast<float> (12.0 * rand () / RAND_MAX));
octree_search.getIntersectedVoxelIndices (o, dir, indicesInRay);
// check if all voxels in the cloud are penetraded by the ray
- ASSERT_EQ( voxelsInRay.size (), cloudIn->points.size ());
+ ASSERT_EQ (cloudIn->points.size (), voxelsInRay.size ());
// check if all indices of penetrated voxels are in cloud
- ASSERT_EQ( indicesInRay.size (), cloudIn->points.size ());
+ ASSERT_EQ (cloudIn->points.size (), indicesInRay.size ());
octree_search.getIntersectedVoxelCenters (o, dir, voxelsInRay2, 1);
octree_search.getIntersectedVoxelIndices (o, dir, indicesInRay2, 1);
// check if only the point from a single voxel has been returned
- ASSERT_EQ( voxelsInRay2.size (), 1u );
- ASSERT_EQ( indicesInRay2.size (), 1u );
+ ASSERT_EQ (1u, voxelsInRay2.size ());
+ ASSERT_EQ (1u, indicesInRay2.size ());
// check if this point is the closest point to the origin
pcl::PointXYZ pt = cloudIn->points[ indicesInRay2[0] ];
Eigen::Vector3f d = Eigen::Vector3f (pt.x, pt.y, pt.z) - o;
float min_dist = d.norm ();
- for (unsigned int i = 0; i < cloudIn->width * cloudIn->height; i++) {
- pt = cloudIn->points[i];
- d = Eigen::Vector3f (pt.x, pt.y, pt.z) - o;
- ASSERT_GE( d.norm (), min_dist );
+ for (unsigned int i = 0; i < cloudIn->width * cloudIn->height; i++)
+ {
+ pt = cloudIn->points[i];
+ d = Eigen::Vector3f (pt.x, pt.y, pt.z) - o;
+ ASSERT_GE (d.norm (), min_dist);
}
-
}
-
}
TEST (PCL, Octree_Pointcloud_Adjacency)
{
-
const unsigned int test_runs = 100;
unsigned int test_id;
-
-
-
srand (static_cast<unsigned int> (time (NULL)));
for (test_id = 0; test_id < test_runs; test_id++)
{
// instantiate point cloud
PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
-
+
float resolution = static_cast<float> (0.01 * rand () / RAND_MAX) + 0.00001f;
//Define a random point
- PointXYZ point (static_cast<float> (0.5 * rand () / RAND_MAX),
+ PointXYZ point (static_cast<float> (0.5 * rand () / RAND_MAX),
static_cast<float> (0.5 * rand () / RAND_MAX),
static_cast<float> (0.5 * rand () / RAND_MAX));
//This is done to define the grid
{
for (int dz = -1; dz <= 1; ++dz)
{
- float factor = 1.0f;//*std::sqrt (dx*dx + dy*dy + dz*dz);
- PointXYZ neighbor (point.x+dx*resolution*factor, point.y+dy*resolution*factor, point.z+dz*resolution*factor);
+ float factor = 1.0f;//*std::sqrt (dx*dx + dy*dy + dz*dz);
+ PointXYZ neighbor (point.x+dx*resolution*factor, point.y+dy*resolution*factor, point.z+dz*resolution*factor);
cloudIn->push_back (neighbor);
}
}
}
-
+
//Add another point that isn't touching previous or neighbors
- PointXYZ point2 (static_cast<float> (point.x + 10*resolution),
+ PointXYZ point2 (static_cast<float> (point.x + 10*resolution),
static_cast<float> (point.y + 10*resolution),
static_cast<float> (point.z + 10*resolution));
cloudIn->push_back (point2);
// Add points which are not neighbors
for (int i = 0; i < 100; ++i)
{
- PointXYZ not_neighbor_point (static_cast<float> (10.0 * rand () / RAND_MAX),
+ PointXYZ not_neighbor_point (static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
if ( (point2.getVector3fMap () - not_neighbor_point.getVector3fMap ()).norm () > 3*resolution )
cloudIn->push_back(not_neighbor_point);
}
}
-
-
+
+
OctreePointCloudAdjacency<PointXYZ> octree (resolution);
octree.setInputCloud (cloudIn);
octree.addPointsFromInputCloud ();
-
+
OctreePointCloudAdjacencyContainer<PointXYZ> *leaf_container;
-
+
leaf_container = octree.getLeafContainerAtPoint (point);
//Point should have 26 neighbors, plus itself
- ASSERT_EQ( leaf_container->size() == 27, true);
+ ASSERT_EQ (27, leaf_container->size ());
leaf_container = octree.getLeafContainerAtPoint (point2);
-
- //Other point should only have itself for neighbor
- ASSERT_EQ( leaf_container->size() == 1, true);
+ //Other point should only have itself for neighbor
+ ASSERT_EQ (1, leaf_container->size ());
}
+}
+
+TEST (PCL, Octree_Pointcloud_Bounds)
+{
+ const double SOME_RESOLUTION (10 + 1/3.0);
+ const int SOME_DEPTH (4);
+ const double DESIRED_MAX = ((1<<SOME_DEPTH) + 0.5)*SOME_RESOLUTION;
+ const double DESIRED_MIN = 0;
+
+ OctreePointCloud<PointXYZ> tree (SOME_RESOLUTION);
+ tree.defineBoundingBox (DESIRED_MIN, DESIRED_MIN, DESIRED_MIN, DESIRED_MAX, DESIRED_MAX, DESIRED_MAX);
+
+ double min_x, min_y, min_z, max_x, max_y, max_z;
+ tree.getBoundingBox (min_x, min_y, min_z, max_x, max_y, max_z);
+
+ ASSERT_GE (max_x, DESIRED_MAX);
+ ASSERT_GE (DESIRED_MIN, min_x);
+
+ const double LARGE_MIN = 1e7-45*SOME_RESOLUTION;
+ const double LARGE_MAX = 1e7-5*SOME_RESOLUTION;
+ tree.defineBoundingBox (LARGE_MIN, LARGE_MIN, LARGE_MIN, LARGE_MAX, LARGE_MAX, LARGE_MAX);
+ tree.getBoundingBox (min_x, min_y, min_z, max_x, max_y, max_z);
+ const unsigned int depth = tree.getTreeDepth ();
+ tree.defineBoundingBox (min_x, min_y, min_z, max_x, max_y, max_z);
+
+ ASSERT_EQ (depth, tree.getTreeDepth ());
+
+ double min_x2, min_y2, min_z2, max_x2, max_y2, max_z2;
+ tree.getBoundingBox (min_x2, min_y2, min_z2, max_x2, max_y2, max_z2);
+ ASSERT_DOUBLE_EQ (min_x2, min_x);
+ ASSERT_DOUBLE_EQ (max_x2, max_x);
}
/* ---[ */
int
-PCL_ADD_TEST (outofcore_test test_outofcore
- FILES test_outofcore.cpp
- LINK_WITH pcl_gtest pcl_common pcl_io pcl_filters pcl_outofcore
- #ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd"
- )
+set(SUBSYS_NAME tests_outofcore)
+set(SUBSYS_DESC "Point cloud library outofcore module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS outofcore)
+set(OPT_DEPS) # module does not depend on these
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ include_directories(${VTK_INCLUDE_DIRS})
+ PCL_ADD_TEST (outofcore_test test_outofcore
+ FILES test_outofcore.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_io pcl_filters pcl_outofcore pcl_visualization
+ #ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd"
+ )
+endif (build)
--- /dev/null
+set(SUBSYS_NAME tests_people)
+set(SUBSYS_DESC "Point cloud library people module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS people)
+set(OPT_DEPS) # module does not depend on these
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ include_directories(${VTK_INCLUDE_DIRS})
+ PCL_ADD_TEST(a_people_detection_test test_people_detection
+ FILES test_people_groundBasedPeopleDetectionApp.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_io pcl_kdtree pcl_search pcl_features pcl_sample_consensus pcl_filters pcl_io pcl_segmentation pcl_people
+ ARGUMENTS "${PCL_SOURCE_DIR}/people/data/trainedLinearSVMForPeopleDetectionWithHOG.yaml" "${PCL_SOURCE_DIR}/test/five_people.pcd")
+endif (build)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2013-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * main_ground_based_people_detection_app.cpp
+ * Created on: Nov 30, 2012
+ * Author: Matteo Munaro
+ *
+ * Test file for testing people detection on a point cloud.
+ * As a first step, the ground is manually initialized, then people detection is performed with the GroundBasedPeopleDetectionApp class,
+ * which implements the people detection algorithm described here:
+ * M. Munaro, F. Basso and E. Menegatti,
+ * Tracking people within groups with RGB-D data,
+ * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012.
+ */
+
+#include <gtest/gtest.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/point_types.h>
+#include <pcl/visualization/pcl_visualizer.h>
+#include <pcl/sample_consensus/sac_model_plane.h>
+#include <pcl/people/ground_based_people_detection_app.h>
+
+typedef pcl::PointXYZRGB PointT;
+typedef pcl::PointCloud<PointT> PointCloudT;
+
+enum { COLS = 640, ROWS = 480 };
+PointCloudT::Ptr cloud;
+pcl::people::PersonClassifier<pcl::RGB> person_classifier;
+std::string svm_filename;
+float min_confidence;
+float min_width;
+float max_width;
+float min_height;
+float max_height;
+float voxel_size;
+Eigen::Matrix3f rgb_intrinsics_matrix;
+Eigen::VectorXf ground_coeffs;
+
+TEST (PCL, PersonClassifier)
+{
+ // Create classifier for people detection:
+ EXPECT_TRUE (person_classifier.loadSVMFromFile(svm_filename)); // load trained SVM
+}
+
+TEST (PCL, GroundBasedPeopleDetectionApp)
+{
+ // People detection app initialization:
+ pcl::people::GroundBasedPeopleDetectionApp<PointT> people_detector; // people detection object
+ people_detector.setVoxelSize(voxel_size); // set the voxel size
+ people_detector.setIntrinsics(rgb_intrinsics_matrix); // set RGB camera intrinsic parameters
+ people_detector.setClassifier(person_classifier); // set person classifier
+ people_detector.setPersonClusterLimits(min_height, max_height, min_width, max_width);
+
+ // Perform people detection on the new cloud:
+ std::vector<pcl::people::PersonCluster<PointT> > clusters; // vector containing persons clusters
+ people_detector.setInputCloud(cloud);
+ people_detector.setGround(ground_coeffs); // set floor coefficients
+ EXPECT_TRUE (people_detector.compute(clusters)); // perform people detection
+
+ unsigned int k = 0;
+ for(std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it)
+ {
+ if(it->getPersonConfidence() > min_confidence) // draw only people with confidence above a threshold
+ k++;
+ }
+ EXPECT_EQ (k, 5); // verify number of people found (should be five)
+}
+
+int main (int argc, char** argv)
+{
+ if (argc < 2)
+ {
+ cerr << "No svm filename provided. Please download `trainedLinearSVMForPeopleDetectionWithHOG.yaml` and pass its path to the test." << endl;
+ return (-1);
+ }
+
+ if (argc < 3)
+ {
+ cerr << "No test file given. Please download 'five_people.pcd` and pass its path to the test." << endl;
+ return (-1);
+ }
+
+ cloud = PointCloudT::Ptr (new PointCloudT);
+ if (pcl::io::loadPCDFile (argv[2], *cloud) < 0)
+ {
+ cerr << "Failed to read test file. Please download `five_people.pcd` and pass its path to the test." << endl;
+ return (-1);
+ }
+
+ // Algorithm parameters:
+ svm_filename = argv[1];
+ min_confidence = -1.5;
+ min_width = 0.1;
+ max_width = 8.0;
+ min_height = 1.3;
+ max_height = 2.3;
+ voxel_size = 0.06;
+
+ rgb_intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics
+ ground_coeffs.resize(4);
+ ground_coeffs << -0.0103586, 0.997011, 0.0765573, -1.26614; // set ground coefficients
+
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
--- /dev/null
+set(SUBSYS_NAME tests_recognition)
+set(SUBSYS_DESC "Point cloud library recognition module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS recognition)
+set(OPT_DEPS keypoints) # module does not depend on these
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ PCL_ADD_TEST(a_recognition_ism_test test_recognition_ism
+ FILES test_recognition_ism.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/ism_train.pcd" "${PCL_SOURCE_DIR}/test/ism_test.pcd")
+
+ if (BUILD_keypoints)
+ PCL_ADD_TEST(a_recognition_cg_test test_recognition_cg
+ FILES test_recognition_cg.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_io pcl_kdtree pcl_features pcl_recognition pcl_keypoints
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/milk.pcd" "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+ endif (BUILD_keypoints)
+endif (build)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010-2012, Willow Garage, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: $
+ *
+ */
+#include <gtest/gtest.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/point_cloud.h>
+#include <pcl/common/transforms.h>
+#include <pcl/correspondence.h>
+#include <pcl/features/normal_3d_omp.h>
+#include <pcl/features/shot_omp.h>
+#include <pcl/features/board.h>
+#include <pcl/filters/uniform_sampling.h>
+#include <pcl/recognition/cg/hough_3d.h>
+#include <pcl/recognition/cg/geometric_consistency.h>
+#include <pcl/kdtree/kdtree_flann.h>
+#include <pcl/kdtree/impl/kdtree_flann.hpp>
+#include <pcl/common/eigen.h>
+
+using namespace std;
+using namespace pcl;
+using namespace pcl::io;
+
+typedef PointXYZ PointType;
+typedef Normal NormalType;
+typedef ReferenceFrame RFType;
+typedef SHOT352 DescriptorType;
+
+PointCloud<PointType>::Ptr model_ (new PointCloud<PointType> ());
+PointCloud<PointType>::Ptr model_downsampled_ (new PointCloud<PointType> ());
+PointCloud<PointType>::Ptr scene_ (new PointCloud<PointType> ());
+PointCloud<PointType>::Ptr scene_downsampled_ (new PointCloud<PointType> ());
+PointCloud<NormalType>::Ptr model_normals_ (new PointCloud<NormalType> ());
+PointCloud<NormalType>::Ptr scene_normals_ (new PointCloud<NormalType> ());
+PointCloud<DescriptorType>::Ptr model_descriptors_ (new PointCloud<DescriptorType> ());
+PointCloud<DescriptorType>::Ptr scene_descriptors_ (new PointCloud<DescriptorType> ());
+CorrespondencesPtr model_scene_corrs_ (new Correspondences ());
+
+double
+computeRmsE (const PointCloud<PointType>::ConstPtr &model, const PointCloud<PointType>::ConstPtr &scene, const Eigen::Matrix4f &rototranslation)
+{
+ PointCloud<PointType> transformed_model;
+ transformPointCloud (*model, transformed_model, rototranslation);
+
+ KdTreeFLANN<PointType> tree;
+ tree.setInputCloud (scene);
+
+ double sqr_norm_sum = 0;
+ int found_points = 0;
+
+ vector<int> neigh_indices (1);
+ vector<float> neigh_sqr_dists (1);
+ for (size_t i = 0; i < transformed_model.size (); ++i)
+ {
+
+ int found_neighs = tree.nearestKSearch (transformed_model.at (i), 1, neigh_indices, neigh_sqr_dists);
+ if(found_neighs == 1)
+ {
+ ++found_points;
+ sqr_norm_sum += static_cast<double> (neigh_sqr_dists[0]);
+ }
+ }
+
+ if (found_points > 0)
+ return sqrt (sqr_norm_sum / double (transformed_model.size ()));
+
+ return numeric_limits<double>::max ();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, Hough3DGrouping)
+{
+ PointCloud<RFType>::Ptr model_rf (new PointCloud<RFType> ());
+ PointCloud<RFType>::Ptr scene_rf (new PointCloud<RFType> ());
+
+ //RFs
+ BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;
+ rf_est.setRadiusSearch (0.015);
+ rf_est.setInputCloud (model_downsampled_);
+ rf_est.setInputNormals (model_normals_);
+ rf_est.setSearchSurface (model_);
+ rf_est.compute (*model_rf);
+
+ rf_est.setInputCloud (scene_downsampled_);
+ rf_est.setInputNormals (scene_normals_);
+ rf_est.setSearchSurface (scene_);
+ rf_est.compute (*scene_rf);
+
+ vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
+
+ //Actual CG
+ Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;
+ clusterer.setInputCloud (model_downsampled_);
+ clusterer.setInputRf (model_rf);
+ clusterer.setSceneCloud (scene_downsampled_);
+ clusterer.setSceneRf (scene_rf);
+ clusterer.setModelSceneCorrespondences (model_scene_corrs_);
+ clusterer.setHoughBinSize (0.03);
+ clusterer.setHoughThreshold (25);
+ EXPECT_TRUE (clusterer.recognize (rototranslations));
+
+ //Assertions
+ EXPECT_EQ (rototranslations.size (), 1);
+ EXPECT_LT (computeRmsE (model_, scene_, rototranslations[0]), 1E-2);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, GeometricConsistencyGrouping)
+{
+ vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
+
+ GeometricConsistencyGrouping<PointType, PointType> clusterer;
+ clusterer.setInputCloud (model_downsampled_);
+ clusterer.setSceneCloud (scene_downsampled_);
+ clusterer.setModelSceneCorrespondences (model_scene_corrs_);
+ clusterer.setGCSize (0.015);
+ clusterer.setGCThreshold (25);
+ EXPECT_TRUE (clusterer.recognize (rototranslations));
+
+ //Assertions
+ EXPECT_EQ (rototranslations.size (), 1);
+ EXPECT_LT (computeRmsE (model_, scene_, rototranslations[0]), 1E-4);
+}
+
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ if (argc < 3)
+ {
+ cerr << "No test file given. Please download `milk.pcd` and `milk_cartoon_all_small_clorox.pcd` and pass their paths to the test." << endl;
+ return (-1);
+ }
+
+ if (loadPCDFile (argv[1], *model_) < 0)
+ {
+ cerr << "Failed to read test file. Please download `milk.pcd` and pass its path to the test." << endl;
+ return (-1);
+ }
+
+ if (loadPCDFile (argv[2], *scene_) < 0)
+ {
+ cerr << "Failed to read test file. Please download `milk_cartoon_all_small_clorox.pcd` and pass its path to the test." << endl;
+ return (-1);
+ }
+
+ //Normals
+ NormalEstimationOMP<PointType, NormalType> norm_est;
+ norm_est.setKSearch (10);
+ norm_est.setInputCloud (model_);
+ norm_est.compute (*model_normals_);
+
+ norm_est.setInputCloud (scene_);
+ norm_est.compute (*scene_normals_);
+
+ //Downsampling
+ UniformSampling<PointType> uniform_sampling;
+ uniform_sampling.setInputCloud (model_);
+ uniform_sampling.setRadiusSearch (0.005);
+ uniform_sampling.filter (*model_downsampled_);
+
+ uniform_sampling.setInputCloud (scene_);
+ uniform_sampling.setRadiusSearch (0.02);
+ uniform_sampling.filter (*scene_downsampled_);
+
+ //Descriptor
+ SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;
+ descr_est.setRadiusSearch (0.015);
+ descr_est.setInputCloud (model_downsampled_);
+ descr_est.setInputNormals (model_normals_);
+ descr_est.setSearchSurface (model_);
+ descr_est.compute (*model_descriptors_);
+
+ descr_est.setInputCloud (scene_downsampled_);
+ descr_est.setInputNormals (scene_normals_);
+ descr_est.setSearchSurface (scene_);
+ descr_est.compute (*scene_descriptors_);
+
+ //Correspondences with KdTree
+ KdTreeFLANN<DescriptorType> match_search;
+ match_search.setInputCloud (model_descriptors_);
+
+ for (size_t i = 0; i < scene_descriptors_->size (); ++i)
+ {
+ if ( pcl_isfinite( scene_descriptors_->at (i).descriptor[0] ) )
+ {
+ vector<int> neigh_indices (1);
+ vector<float> neigh_sqr_dists (1);
+ int found_neighs = match_search.nearestKSearch (scene_descriptors_->at (i), 1, neigh_indices, neigh_sqr_dists);
+ if(found_neighs == 1 && neigh_sqr_dists[0] < 0.25f)
+ {
+ Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
+ model_scene_corrs_->push_back (corr);
+ }
+ }
+ }
+
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010-2012, Willow Garage, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: $
+ *
+ */
+
+#include <gtest/gtest.h>
+#include <pcl/kdtree/kdtree_flann.h>
+#include <pcl/point_types.h>
+#include <pcl/point_cloud.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/search/search.h>
+#include <pcl/features/normal_3d.h>
+#include <pcl/features/feature.h>
+#include <pcl/features/fpfh.h>
+#include <pcl/features/impl/fpfh.hpp>
+#include <pcl/recognition/implicit_shape_model.h>
+#include <pcl/recognition/impl/implicit_shape_model.hpp>
+
+pcl::PointCloud<pcl::PointXYZ>::Ptr training_cloud;
+pcl::PointCloud<pcl::PointXYZ>::Ptr testing_cloud;
+pcl::PointCloud<pcl::Normal>::Ptr training_normals;
+pcl::PointCloud<pcl::Normal>::Ptr testing_normals;
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (ISM, TrainRecognize)
+{
+ std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clouds;
+ std::vector<pcl::PointCloud<pcl::Normal>::Ptr > normals;
+ std::vector<unsigned int> classes;
+
+ clouds.push_back (training_cloud);
+ normals.push_back (training_normals);
+ classes.push_back (0);
+
+ pcl::FPFHEstimation<pcl::PointXYZ, pcl::Normal, pcl::Histogram<153> >::Ptr fpfh
+ (new pcl::FPFHEstimation<pcl::PointXYZ, pcl::Normal, pcl::Histogram<153> >);
+ fpfh->setRadiusSearch (30.0);
+ pcl::Feature< pcl::PointXYZ, pcl::Histogram<153> >::Ptr feature_estimator(fpfh);
+
+ pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal>::ISMModelPtr model = boost::shared_ptr<pcl::features::ISMModel> (new pcl::features::ISMModel);
+
+ pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal> ism;
+ ism.setFeatureEstimator(feature_estimator);
+ ism.setTrainingClouds (clouds);
+ ism.setTrainingClasses (classes);
+ ism.setTrainingNormals (normals);
+ ism.setSamplingSize (2.0f);
+ ism.trainISM (model);
+
+ int _class = 0;
+ double radius = model->sigmas_[_class] * 10.0;
+ double sigma = model->sigmas_[_class];
+
+ boost::shared_ptr<pcl::features::ISMVoteList<pcl::PointXYZ> > vote_list = ism.findObjects (model, testing_cloud, testing_normals, _class);
+ EXPECT_NE (vote_list->getNumberOfVotes (), 0);
+ std::vector<pcl::ISMPeak, Eigen::aligned_allocator<pcl::ISMPeak> > strongest_peaks;
+ vote_list->findStrongestPeaks (strongest_peaks, _class, radius, sigma);
+
+ EXPECT_NE (strongest_peaks.size (), 0);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (ISM, TrainWithWrongParameters)
+{
+ pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal> ism;
+
+ float prev_sampling_size = ism.getSamplingSize ();
+ EXPECT_NE (prev_sampling_size, 0.0);
+ ism.setSamplingSize (0.0f);
+ float curr_sampling_size = ism.getSamplingSize ();
+ EXPECT_EQ (curr_sampling_size, prev_sampling_size);
+
+ unsigned int prev_number_of_clusters = ism.getNumberOfClusters ();
+ EXPECT_NE (prev_number_of_clusters, 0);
+ ism.setNumberOfClusters (0);
+ unsigned int curr_number_of_clusters = ism.getNumberOfClusters ();
+ EXPECT_EQ (curr_number_of_clusters, prev_number_of_clusters);
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ if (argc < 2)
+ {
+ std::cerr << "This test requires two point clouds (one for training and one for testing)." << std::endl;
+ std::cerr << "You can use these two clouds 'ism_train.pcd' and 'ism_test.pcd'." << std::endl;
+ return (-1);
+ }
+
+ training_cloud = (new pcl::PointCloud<pcl::PointXYZ>)->makeShared();
+ if (pcl::io::loadPCDFile (argv[1], *training_cloud) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `ism_train.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+ testing_cloud = (new pcl::PointCloud<pcl::PointXYZ>)->makeShared();
+ if (pcl::io::loadPCDFile (argv[2], *testing_cloud) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `ism_test.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ training_normals = (new pcl::PointCloud<pcl::Normal>)->makeShared();
+ testing_normals = (new pcl::PointCloud<pcl::Normal>)->makeShared();
+ pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimator;
+ normal_estimator.setRadiusSearch (25.0);
+ normal_estimator.setInputCloud(training_cloud);
+ normal_estimator.compute(*training_normals);
+ normal_estimator.setInputCloud(testing_cloud);
+ normal_estimator.compute(*testing_normals);
+
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
-PCL_ADD_TEST(a_registration_test test_registration
- FILES test_registration.cpp
- LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_search pcl_kdtree pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/bun4.pcd" "${PCL_SOURCE_DIR}/test/milk_color.pcd")
-
-PCL_ADD_TEST(registration_api test_registration_api
- FILES test_registration_api.cpp test_registration_api_data.h
- LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/bun4.pcd")
-
-PCL_ADD_TEST(registration_warp_api test_warps
+set(SUBSYS_NAME tests_registration)
+set(SUBSYS_DESC "Point cloud library registration module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS registration)
+set(OPT_DEPS io) # module does not depend on these
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ PCL_ADD_TEST(registration_warp_api test_warps
FILES test_warps.cpp
- LINK_WITH pcl_gtest pcl_io pcl_registration)
-
-PCL_ADD_TEST(correspondence_estimation test_correspondence_estimation
- FILES test_correspondence_estimation.cpp
- LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features)
-
-PCL_ADD_TEST(correspondence_rejectors test_correspondence_rejectors
- FILES test_correspondence_rejectors.cpp
- LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bunny.pcd")
-
-PCL_ADD_TEST(fpcs_ia test_fpcs_ia
- FILES test_fpcs_ia.cpp test_fpcs_ia_data.h
- LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_search pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/bun4.pcd")
-
-PCL_ADD_TEST(kfpcs_ia test_kfpcs_ia
- FILES test_kfpcs_ia.cpp test_kfpcs_ia_data.h
- LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_search pcl_kdtree
- ARGUMENTS "${PCL_SOURCE_DIR}/test/office1_keypoints.pcd" "${PCL_SOURCE_DIR}/test/office2_keypoints.pcd")
\ No newline at end of file
+ LINK_WITH pcl_gtest pcl_registration)
+
+ PCL_ADD_TEST(correspondence_estimation test_correspondence_estimation
+ FILES test_correspondence_estimation.cpp
+ LINK_WITH pcl_gtest pcl_registration pcl_features)
+
+ if (BUILD_io)
+ PCL_ADD_TEST(a_registration_test test_registration
+ FILES test_registration.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_search pcl_kdtree pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/bun4.pcd" "${PCL_SOURCE_DIR}/test/milk_color.pcd")
+
+ PCL_ADD_TEST(registration_api test_registration_api
+ FILES test_registration_api.cpp test_registration_api_data.h
+ LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/bun4.pcd")
+
+ PCL_ADD_TEST(correspondence_rejectors test_correspondence_rejectors
+ FILES test_correspondence_rejectors.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bunny.pcd")
+
+ PCL_ADD_TEST(fpcs_ia test_fpcs_ia
+ FILES test_fpcs_ia.cpp test_fpcs_ia_data.h
+ LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_search pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/bun4.pcd")
+
+ PCL_ADD_TEST(kfpcs_ia test_kfpcs_ia
+ FILES test_kfpcs_ia.cpp test_kfpcs_ia_data.h
+ LINK_WITH pcl_gtest pcl_io pcl_registration pcl_features pcl_search pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/office1_keypoints.pcd" "${PCL_SOURCE_DIR}/test/office2_keypoints.pcd")
+ endif (BUILD_io)
+endif (build)
Eigen::Affine3f delta_transform;
sampleRandomTransform (delta_transform, 0., 0.05);
// Sample random global transform for each pair, to make sure we aren't biased around the origin
- Eigen::Affine3f net_transform;
+ Eigen::Affine3f net_transform;
sampleRandomTransform (net_transform, 2*M_PI, 10.);
-
+
PointCloud<PointXYZ>::ConstPtr source (cloud_source.makeShared ());
PointCloud<PointXYZ>::Ptr source_trans (new PointCloud<PointXYZ>);
PointCloud<PointXYZ>::Ptr target_trans (new PointCloud<PointXYZ>);
-
+
pcl::transformPointCloud (*source, *source_trans, delta_transform.inverse () * net_transform);
pcl::transformPointCloud (*source, *target_trans, net_transform);
reg.setInputSource (source_trans);
reg.setInputTarget (target_trans);
-
+
// Register
reg.align (cloud_reg);
Eigen::Matrix4f trans_final = reg.getFinalTransformation ();
size_t ntransforms = 10;
for (size_t t = 0; t < ntransforms; t++)
{
-
+
// Sample a fixed offset between cloud pairs
Eigen::Affine3f delta_transform;
// No rotation, since at a random offset this could make it converge to a wrong (but still reasonable) result
EXPECT_EQ (transformation (3, 3), 1);
*/
EXPECT_LT (reg.getFitnessScore (), 0.001);
-
+
// Check again, for all possible caching schemes
for (int iter = 0; iter < 4; iter++)
{
if (force_cache_reciprocal)
tree_recip->setInputCloud (temp_src);
reg.setSearchMethodSource (tree_recip, force_cache_reciprocal);
-
+
// Register
reg.align (output);
EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
if (force_cache_reciprocal)
tree_recip->setInputCloud (src);
reg.setSearchMethodSource (tree_recip, force_cache_reciprocal);
-
+
// Register
reg.align (output);
EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
EXPECT_LT (reg.getFitnessScore (), 0.005);
}
-
+
// We get different results on 32 vs 64-bit systems. To address this, we've removed the explicit output test
copyPointCloud (cloud_target, *tgt);
PointCloud<PointT> output;
-
GeneralizedIterativeClosestPoint<PointT, PointT> reg;
reg.setInputSource (src);
reg.setInputTarget (tgt);
// Register
reg.align (output);
EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
- EXPECT_LT (reg.getFitnessScore (), 0.001);
+ EXPECT_LT (reg.getFitnessScore (), 0.0001);
// Check again, for all possible caching schemes
for (int iter = 0; iter < 4; iter++)
if (force_cache_reciprocal)
tree_recip->setInputCloud (src);
reg.setSearchMethodSource (tree_recip, force_cache_reciprocal);
-
+
// Register
reg.align (output);
EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
EXPECT_LT (reg.getFitnessScore (), 0.001);
}
+
+ // Test guess matrix
+ Eigen::Isometry3f transform = Eigen::Isometry3f (Eigen::AngleAxisf (0.25 * M_PI, Eigen::Vector3f::UnitX ())
+ * Eigen::AngleAxisf (0.50 * M_PI, Eigen::Vector3f::UnitY ())
+ * Eigen::AngleAxisf (0.33 * M_PI, Eigen::Vector3f::UnitZ ()));
+ transform.translation () = Eigen::Vector3f (0.1, 0.2, 0.3);
+ PointCloud<PointT>::Ptr transformed_tgt (new PointCloud<PointT>);
+ pcl::transformPointCloud (*tgt, *transformed_tgt, transform.matrix ()); // transformed_tgt is now a copy of tgt with a transformation matrix applied
+
+ GeneralizedIterativeClosestPoint<PointT, PointT> reg_guess;
+ reg_guess.setInputSource (src);
+ reg_guess.setInputTarget (transformed_tgt);
+ reg_guess.setMaximumIterations (50);
+ reg_guess.setTransformationEpsilon (1e-8);
+ reg_guess.align (output, transform.matrix ());
+ EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_LT (reg.getFitnessScore (), 0.0001);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
reg.align (output);
EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
EXPECT_LT (reg.getFitnessScore (), 0.001);
-
+
// Check again, for all possible caching schemes
for (int iter = 0; iter < 4; iter++)
{
if (force_cache_reciprocal)
tree_recip->setInputCloud (src);
reg.setSearchMethodSource (tree_recip, force_cache_reciprocal);
-
+
// Register
reg.align (output);
EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
reg.align (cloud_reg);
EXPECT_EQ (int (cloud_reg.points.size ()), int (cloud_source.points.size ()));
EXPECT_LT (reg.getFitnessScore (), 0.0005);
-
+
// Check again, for all possible caching schemes
typedef pcl::PointXYZ PointT;
for (int iter = 0; iter < 4; iter++)
if (force_cache_reciprocal)
tree_recip->setInputCloud (cloud_source_ptr);
reg.setSearchMethodSource(tree_recip, force_cache_reciprocal);
-
+
// Register
reg.align (cloud_reg);
EXPECT_EQ (int (cloud_reg.points.size ()), int (cloud_source.points.size ()));
* 1) the number of iterations is increased 1000 --> 5000
* 2) the feature correspondence randomness (the number of kNNs) is decreased 10 --> 2
*/
-
+
// Transform the source cloud by a large amount
Eigen::Vector3f initial_offset (100, 0, 0);
float angle = static_cast<float> (M_PI) / 2.0f;
fpfh_est.setInputNormals (normals.makeShared ());
fpfh_est.compute (features_target);
- // Initialize Sample Consensus Prerejective with 5x the number of iterations and 1/5 feature kNNs as SAC-IA
+ // Initialize Sample Consensus Prerejective with 5x the number of iterations and 1/5 feature kNNs as SAC-IA
SampleConsensusPrerejective<PointXYZ, PointXYZ, FPFHSignature33> reg;
reg.setMaxCorrespondenceDistance (0.1);
reg.setMaximumIterations (5000);
reg.setSimilarityThreshold (0.6f);
reg.setCorrespondenceRandomness (2);
-
+
// Set source and target cloud/features
reg.setInputSource (cloud_source_ptr);
reg.setInputTarget (cloud_target_ptr);
// Register
reg.align (cloud_reg);
-
+
// Check output consistency and quality of alignment
EXPECT_EQ (static_cast<int> (cloud_reg.points.size ()), static_cast<int> (cloud_source.points.size ()));
float inlier_fraction = static_cast<float> (reg.getInliers ().size ()) / static_cast<float> (cloud_source.points.size ());
EXPECT_GT (inlier_fraction, 0.95f);
-
+
// Check again, for all possible caching schemes
typedef pcl::PointXYZ PointT;
for (int iter = 0; iter < 4; iter++)
if (force_cache_reciprocal)
tree_recip->setInputCloud (cloud_source_ptr);
reg.setSearchMethodSource(tree_recip, force_cache_reciprocal);
-
+
// Register
reg.align (cloud_reg);
ny = 0.6f * p.y - 0.2f;
nz = 1.0f;
- float magnitude = sqrtf (nx * nx + ny * ny + nz * nz);
+ float magnitude = std::sqrt (nx * nx + ny * ny + nz * nz);
nx /= magnitude;
ny /= magnitude;
nz /= magnitude;
ny = 0.6f * p.y - 0.2f;
nz = 1.0f;
- float magnitude = sqrtf (nx * nx + ny * ny + nz * nz);
+ float magnitude = std::sqrt (nx * nx + ny * ny + nz * nz);
nx /= magnitude;
ny /= magnitude;
nz /= magnitude;
-PCL_ADD_TEST(sample_consensus test_sample_consensus
- FILES test_sample_consensus.cpp
- LINK_WITH pcl_gtest pcl_sample_consensus)
+set(SUBSYS_NAME tests_sample_consensus)
+set(SUBSYS_DESC "Point cloud library sample_consensus module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS sample_consensus)
+set(OPT_DEPS io)
-PCL_ADD_TEST(sample_consensus_plane_models test_sample_consensus_plane_models
- FILES test_sample_consensus_plane_models.cpp
- LINK_WITH pcl_gtest pcl_io pcl_sample_consensus
- ARGUMENTS "${PCL_SOURCE_DIR}/test/sac_plane_test.pcd")
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
-PCL_ADD_TEST(sample_consensus_quadric_models test_sample_consensus_quadric_models
- FILES test_sample_consensus_quadric_models.cpp
- LINK_WITH pcl_gtest pcl_sample_consensus)
+if (build)
+ PCL_ADD_TEST(sample_consensus test_sample_consensus
+ FILES test_sample_consensus.cpp
+ LINK_WITH pcl_gtest pcl_sample_consensus pcl_search pcl_common)
-PCL_ADD_TEST(sample_consensus_line_models test_sample_consensus_line_models
- FILES test_sample_consensus_line_models.cpp
- LINK_WITH pcl_gtest pcl_sample_consensus)
+ if (BUILD_io)
+ PCL_ADD_TEST(sample_consensus_plane_models test_sample_consensus_plane_models
+ FILES test_sample_consensus_plane_models.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_sample_consensus
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/sac_plane_test.pcd")
+ endif (BUILD_io)
+
+ PCL_ADD_TEST(sample_consensus_quadric_models test_sample_consensus_quadric_models
+ FILES test_sample_consensus_quadric_models.cpp
+ LINK_WITH pcl_gtest pcl_sample_consensus)
+
+ PCL_ADD_TEST(sample_consensus_line_models test_sample_consensus_line_models
+ FILES test_sample_consensus_line_models.cpp
+ LINK_WITH pcl_gtest pcl_sample_consensus)
+endif (build)
-PCL_ADD_TEST(kdtree_search test_kdtree_search
- FILES test_kdtree.cpp
- LINK_WITH pcl_gtest pcl_search pcl_io pcl_kdtree)
+set(SUBSYS_NAME tests_search)
+set(SUBSYS_DESC "Point cloud library search module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS search)
+set(OPT_DEPS io)
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ PCL_ADD_TEST(kdtree_search test_kdtree_search
+ FILES test_kdtree.cpp
+ LINK_WITH pcl_gtest pcl_search pcl_kdtree)
PCL_ADD_TEST(flann_search test_flann_search
FILES test_flann_search.cpp
- LINK_WITH pcl_gtest pcl_search pcl_io pcl_kdtree)
+ LINK_WITH pcl_gtest pcl_search pcl_kdtree)
PCL_ADD_TEST(organized_neighbor test_organized_search
FILES test_organized.cpp
- LINK_WITH pcl_gtest pcl_search pcl_io pcl_kdtree)
+ LINK_WITH pcl_gtest pcl_search pcl_kdtree)
+
+ PCL_ADD_TEST(octree_search test_octree_search
+ FILES test_octree.cpp
+ LINK_WITH pcl_gtest pcl_search pcl_octree pcl_common)
-PCL_ADD_TEST(octree_search test_octree_search
- FILES test_octree.cpp
- LINK_WITH pcl_gtest pcl_search pcl_io pcl_kdtree)
+ if (BUILD_io)
+ PCL_ADD_TEST(search test_search
+ FILES test_search.cpp
+ LINK_WITH pcl_gtest pcl_search pcl_io pcl_kdtree
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd")
+ endif (BUILD_io)
+endif (build)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2009, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: test_ii_normals.cpp 4084 2012-01-31 02:05:42Z rusu $
+ *
+ */
+
+#include <gtest/gtest.h>
+#include <pcl/search/brute_force.h>
+#include <pcl/search/kdtree.h>
+#include <pcl/search/organized.h>
+#include <pcl/search/octree.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/common/time.h>
+#include <boost/random/variate_generator.hpp>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/uniform_int.hpp>
+#include <boost/random/uniform_real.hpp>
+
+using namespace pcl;
+using namespace std;
+
+/** \brief if set to value other than 0 -> fine grained output */
+#define DEBUG_OUT 1
+#define EXCESSIVE_TESTING 0
+
+#define TEST_unorganized_dense_cloud_COMPLETE_KNN 1
+#define TEST_unorganized_dense_cloud_VIEW_KNN 1
+#define TEST_unorganized_sparse_cloud_COMPLETE_KNN 1
+#define TEST_unorganized_sparse_cloud_VIEW_KNN 1
+#define TEST_unorganized_grid_cloud_COMPLETE_RADIUS 1
+#define TEST_unorganized_dense_cloud_COMPLETE_RADIUS 1
+#define TEST_unorganized_dense_cloud_VIEW_RADIUS 1
+#define TEST_unorganized_sparse_cloud_COMPLETE_RADIUS 1
+#define TEST_unorganized_sparse_cloud_VIEW_RADIUS 1
+#define TEST_ORGANIZED_SPARSE_COMPLETE_KNN 1
+#define TEST_ORGANIZED_SPARSE_VIEW_KNN 1
+#define TEST_ORGANIZED_SPARSE_COMPLETE_RADIUS 1
+#define TEST_ORGANIZED_SPARSE_VIEW_RADIUS 1
+
+#if EXCESSIVE_TESTING
+/** \brief number of points used for creating unordered point clouds */
+const unsigned int unorganized_point_count = 100000;
+
+/** \brief number of search operations on ordered point clouds*/
+const unsigned int query_count = 5000;
+#else
+/** \brief number of points used for creating unordered point clouds */
+const unsigned int unorganized_point_count = 1200;
+
+/** \brief number of search operations on ordered point clouds*/
+const unsigned int query_count = 100;
+#endif
+
+/** \brief organized point cloud*/
+PointCloud<PointXYZ>::Ptr organized_sparse_cloud (new PointCloud<PointXYZ>);
+
+/** \brief unorganized point cloud*/
+PointCloud<PointXYZ>::Ptr unorganized_dense_cloud (new PointCloud<PointXYZ>);
+
+/** \brief unorganized point cloud*/
+PointCloud<PointXYZ>::Ptr unorganized_sparse_cloud (new PointCloud<PointXYZ>);
+
+/** \brief unorganized point cloud*/
+PointCloud<PointXYZ>::Ptr unorganized_grid_cloud (new PointCloud<PointXYZ>);
+
+/** \brief uniform distributed random number generator for unsigned it in range [0;10]*/
+boost::variate_generator< boost::mt19937, boost::uniform_int<unsigned> > rand_uint(boost::mt19937 (), boost::uniform_int<unsigned> (0, 10));
+/** \brief uniform distributed random number generator for floats in the range [0;1] */
+boost::variate_generator< boost::mt19937, boost::uniform_real<float> > rand_float(boost::mt19937 (), boost::uniform_real<float> (0, 1));
+
+/** \brief used by the *_VIEW_* tests to use only a subset of points from the point cloud*/
+std::vector<int> unorganized_input_indices;
+
+/** \brief used by the *_VIEW_* tests to use only a subset of points from the point cloud*/
+std::vector<int> organized_input_indices;
+
+/** \brief instance of brute force search method to be tested*/
+pcl::search::BruteForce<pcl::PointXYZ> brute_force;
+
+/** \brief instance of KDTree search method to be tested*/
+pcl::search::KdTree<pcl::PointXYZ> KDTree;
+
+/** \brief instance of Octree search method to be tested*/
+pcl::search::Octree<pcl::PointXYZ> octree_search (0.1);
+
+/** \brief instance of Organized search method to be tested*/
+pcl::search::OrganizedNeighbor<pcl::PointXYZ> organized;
+
+/** \brief list of search methods for unorganized search test*/
+vector<search::Search<PointXYZ>* > unorganized_search_methods;
+
+/** \brief list of search methods for organized search test*/
+vector<search::Search<PointXYZ>* > organized_search_methods;
+
+/** \brief lists of indices to be used as query points for various search methods and different cloud types*/
+vector<int> unorganized_dense_cloud_query_indices;
+vector<int> unorganized_sparse_cloud_query_indices;
+vector<int> organized_sparse_query_indices;
+
+/** \briet test whether the result of a search containes unique point ids or not
+ * @param indices resulting indices from a search
+ * @param name name of the search method that returned these distances
+ * @return true if indices are unique, false otherwise
+ */
+bool testUniqueness (const vector<int>& indices, const string& name)
+{
+ bool uniqueness = true;
+ for (unsigned idx1 = 1; idx1 < indices.size () && uniqueness; ++idx1)
+ {
+ // check whether resulting indices are unique
+ for (unsigned idx2 = 0; idx2 < idx1; ++idx2)
+ {
+ if (indices [idx1] == indices [idx2])
+ {
+#if DEBUG_OUT
+ std::cout << name << " search: index is twice at positions: " << idx1 << " (" << indices [idx1] << ") , " << idx2 << " (" << indices [idx2] << ")" << std::endl;
+#endif
+ // can only be set to false anyway -> no sync required
+ uniqueness = false;
+ break;
+ }
+ }
+ }
+ return uniqueness;
+}
+
+
+/** \brief tests whether the ordering of results is ascending on distances
+ * \param distances resulting distances from a search
+ * \param name name of the search method that returned these distances
+ * \return true if distances in weak ascending order, false otherwise
+ */
+bool testOrder (const vector<float>& distances, const string& name)
+{
+ bool ordered = true;
+ for (unsigned idx1 = 1; idx1 < distances.size (); ++idx1)
+ {
+ if (distances [idx1-1] > distances [idx1])
+ {
+#if DEBUG_OUT
+ std::cout << name << " search: not properly sorted: " << idx1 - 1 << "(" << distances [idx1-1] << ") > "
+ << idx1 << "(" << distances [idx1] << ")"<< std::endl;
+#endif
+ ordered = false;
+ break;
+ }
+ }
+
+ return ordered;
+}
+
+/** \brief test whether the results are from the view (subset of the cloud) given by input_indices and also not Nan
+ * @param indices_mask defines the subset of allowed points (view) in the result of the search
+ * @param nan_mask defines a lookup that indicates whether a point at a given position is finite or not
+ * @param indices result of a search to be tested
+ * @param name name of search method that returned the result
+ * @return true if result is valid, false otherwise
+ */
+template<typename PointT> bool
+testResultValidity (const typename PointCloud<PointT>::ConstPtr point_cloud, const vector<bool>& indices_mask, const vector<bool>& nan_mask, const vector<int>& indices, const vector<int>& /*input_indices*/, const string& name)
+{
+ bool validness = true;
+ for (vector<int>::const_iterator iIt = indices.begin (); iIt != indices.end (); ++iIt)
+ {
+ if (!indices_mask [*iIt])
+ {
+#if DEBUG_OUT
+ cerr << name << ": result contains an invalid point: " << *iIt << " not in indices list.\n";
+
+// for (vector<int>::const_iterator iIt2 = input_indices.begin (); iIt2 != input_indices.end (); ++iIt2)
+// cout << *iIt2 << " ";
+// cout << endl;
+#endif
+ validness = false;
+ break;
+ }
+ else if (!nan_mask [*iIt])
+ {
+#if DEBUG_OUT
+ cerr << name << ": result contains an invalid point: " << *iIt << " = NaN (" << point_cloud->points [*iIt].x << " , "
+ << point_cloud->points [*iIt].y << " , "
+ << point_cloud->points [*iIt].z << ")\n";
+#endif
+ validness = false;
+ break;
+ }
+ }
+
+ return validness;
+}
+
+/** \brief compares two sets of search results
+ * \param indices1
+ * \param distances1
+ * \param name1
+ * \param indices2
+ * \param distances2
+ * \param name2
+ * \param eps threshold for comparing the distances
+ * \return true if both sets are the same, false otherwise
+ */
+bool compareResults (const std::vector<int>& indices1, const::vector<float>& distances1, const std::string& name1,
+ const std::vector<int>& indices2, const::vector<float>& distances2, const std::string& name2, float eps)
+{
+ bool equal = true;
+ if (indices1.size () != indices2.size ())
+ {
+#if DEBUG_OUT
+ cerr << "size of results between " << name1 << " search and " << name2 << " search do not match " <<indices1.size () << " vs. " << indices2.size () << endl;
+// for (unsigned idx = 0; idx < std::min (indices1.size (), indices2.size ()); ++idx)
+// {
+// cout << idx <<".\t" << indices1[idx] << "\t(" << distances1[idx] << "),\t" << indices2[idx] << "\t(" << distances2[idx] << ")\n";
+// }
+// for (unsigned idx = std::min (indices1.size (), indices2.size ()); idx < std::max (indices1.size (), indices2.size ()); ++idx)
+// {
+// if (idx >= indices1.size ())
+// cout << idx <<".\t \t ,\t" << indices2[idx] << "\t(" << distances2[idx] << ")\n";
+// else
+// cout << idx <<".\t" << indices1[idx] << "\t(" << distances1[idx] << ")\n";
+// }
+#endif
+ equal = false;
+ }
+ else
+ {
+ for (unsigned idx = 0; idx < indices1.size (); ++idx)
+ {
+ if (indices1[idx] != indices2[idx] && fabs (distances1[idx] - distances2[idx]) > eps)
+ {
+#if DEBUG_OUT
+ cerr << "results between " << name1 << " search and " << name2 << " search do not match: " << idx << " nearest neighbor: "
+ << indices1[idx] << " with distance: " << distances1[idx] << " vs. "
+ << indices2[idx] << " with distance: " << distances2[idx] << endl;
+#endif
+ equal = false;
+ break;
+ }
+ }
+ }
+ return equal;
+}
+
+/** \brief does KNN search and tests the results to be unique, valid and ordered. Additionally it test whether all test methods are returning the same results
+ * \param cloud the input point cloud
+ * \param search_methods vector of all search methods to be tested
+ * \param query_indices indices of query points in the point cloud (not necessarily in input_indices)
+ * \param input_indices indices defining a subset of the point cloud.
+ */
+template<typename PointT> void
+testKNNSearch (typename PointCloud<PointT>::ConstPtr point_cloud, vector<search::Search<PointT>*> search_methods,
+ const vector<int>& query_indices, const vector<int>& input_indices = vector<int> () )
+{
+ vector< vector<int> >indices (search_methods.size ());
+ vector< vector<float> >distances (search_methods.size ());
+ vector<bool> passed (search_methods.size (), true);
+
+ vector<bool> indices_mask (point_cloud->size (), true);
+ vector<bool> nan_mask (point_cloud->size (), true);
+
+ if (input_indices.size () != 0)
+ {
+ indices_mask.assign (point_cloud->size (), false);
+ for (vector<int>::const_iterator iIt = input_indices.begin (); iIt != input_indices.end (); ++iIt)
+ indices_mask [*iIt] = true;
+ }
+
+ // remove also Nans
+ #pragma omp parallel for
+ for (int pIdx = 0; pIdx < int (point_cloud->size ()); ++pIdx)
+ {
+ if (!isFinite (point_cloud->points [pIdx]))
+ nan_mask [pIdx] = false;
+ }
+
+ boost::shared_ptr<vector<int> > input_indices_;
+ if (input_indices.size ())
+ input_indices_.reset (new vector<int> (input_indices));
+
+ #pragma omp parallel for
+ for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
+ search_methods [sIdx]->setInputCloud (point_cloud, input_indices_);
+
+ // test knn values from 1, 8, 64, 512
+ for (unsigned knn = 1; knn <= 512; knn <<= 3)
+ {
+ // find nn for each point in the cloud
+ for (vector<int>::const_iterator qIt = query_indices.begin (); qIt != query_indices.end (); ++qIt)
+ {
+ #pragma omp parallel for
+ for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
+ {
+ search_methods [sIdx]->nearestKSearch (point_cloud->points[*qIt], knn, indices [sIdx], distances [sIdx]);
+ passed [sIdx] = passed [sIdx] && testUniqueness (indices [sIdx], search_methods [sIdx]->getName ());
+ passed [sIdx] = passed [sIdx] && testOrder (distances [sIdx], search_methods [sIdx]->getName ());
+ passed [sIdx] = passed [sIdx] && testResultValidity<PointT>(point_cloud, indices_mask, nan_mask, indices [sIdx], input_indices, search_methods [sIdx]->getName ());
+ }
+
+ // compare results to each other
+ #pragma omp parallel for
+ for (int sIdx = 1; sIdx < int (search_methods.size ()); ++sIdx)
+ {
+ passed [sIdx] = passed [sIdx] && compareResults (indices [0], distances [0], search_methods [0]->getName (),
+ indices [sIdx], distances [sIdx], search_methods [sIdx]->getName (), 1e-6f);
+ }
+ }
+ }
+ for (size_t sIdx = 0; sIdx < search_methods.size (); ++sIdx)
+ {
+ cout << search_methods [sIdx]->getName () << ": " << (passed[sIdx]?"passed":"failed") << endl;
+ EXPECT_TRUE (passed [sIdx]);
+ }
+}
+
+/** \brief does radius search and tests the results to be unique, valid and ordered. Additionally it test whether all test methods are returning the same results
+ * \param cloud the input point cloud
+ * \param search_methods vector of all search methods to be tested
+ * \param query_indices indices of query points in the point cloud (not necessarily in input_indices)
+ * \param input_indices indices defining a subset of the point cloud.
+ */
+template<typename PointT> void
+testRadiusSearch (typename PointCloud<PointT>::ConstPtr point_cloud, vector<search::Search<PointT>*> search_methods,
+ const vector<int>& query_indices, const vector<int>& input_indices = vector<int> ())
+{
+ vector< vector<int> >indices (search_methods.size ());
+ vector< vector<float> >distances (search_methods.size ());
+ vector <bool> passed (search_methods.size (), true);
+ vector<bool> indices_mask (point_cloud->size (), true);
+ vector<bool> nan_mask (point_cloud->size (), true);
+
+ if (input_indices.size () != 0)
+ {
+ indices_mask.assign (point_cloud->size (), false);
+ for (vector<int>::const_iterator iIt = input_indices.begin (); iIt != input_indices.end (); ++iIt)
+ indices_mask [*iIt] = true;
+ }
+
+ // remove also Nans
+ #pragma omp parallel for
+ for (int pIdx = 0; pIdx < int (point_cloud->size ()); ++pIdx)
+ {
+ if (!isFinite (point_cloud->points [pIdx]))
+ nan_mask [pIdx] = false;
+ }
+
+ boost::shared_ptr<vector<int> > input_indices_;
+ if (input_indices.size ())
+ input_indices_.reset (new vector<int> (input_indices));
+
+ #pragma omp parallel for
+ for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
+ search_methods [sIdx]->setInputCloud (point_cloud, input_indices_);
+
+ // test radii 0.01, 0.02, 0.04, 0.08
+ for (float radius = 0.01f; radius < 0.1f; radius *= 2.0f)
+ {
+ //cout << radius << endl;
+ // find nn for each point in the cloud
+ for (vector<int>::const_iterator qIt = query_indices.begin (); qIt != query_indices.end (); ++qIt)
+ {
+ #pragma omp parallel for
+ for (int sIdx = 0; sIdx < static_cast<int> (search_methods.size ()); ++sIdx)
+ {
+ search_methods [sIdx]->radiusSearch (point_cloud->points[*qIt], radius, indices [sIdx], distances [sIdx], 0);
+ passed [sIdx] = passed [sIdx] && testUniqueness (indices [sIdx], search_methods [sIdx]->getName ());
+ passed [sIdx] = passed [sIdx] && testOrder (distances [sIdx], search_methods [sIdx]->getName ());
+ passed [sIdx] = passed [sIdx] && testResultValidity<PointT>(point_cloud, indices_mask, nan_mask, indices [sIdx], input_indices, search_methods [sIdx]->getName ());
+ }
+
+ // compare results to each other
+ #pragma omp parallel for
+ for (int sIdx = 1; sIdx < static_cast<int> (search_methods.size ()); ++sIdx)
+ {
+ passed [sIdx] = passed [sIdx] && compareResults (indices [0], distances [0], search_methods [0]->getName (),
+ indices [sIdx], distances [sIdx], search_methods [sIdx]->getName (), 1e-6f);
+ }
+ }
+ }
+ for (unsigned sIdx = 0; sIdx < search_methods.size (); ++sIdx)
+ {
+ cout << search_methods [sIdx]->getName () << ": " << (passed[sIdx]?"passed":"failed") << endl;
+ EXPECT_TRUE (passed [sIdx]);
+ }
+}
+
+#if TEST_unorganized_dense_cloud_COMPLETE_KNN
+// Test search on unorganized point clouds
+TEST (PCL, unorganized_dense_cloud_Complete_KNN)
+{
+ testKNNSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices);
+}
+#endif
+
+#if TEST_unorganized_dense_cloud_VIEW_KNN
+// Test search on unorganized point clouds
+TEST (PCL, unorganized_dense_cloud_View_KNN)
+{
+ testKNNSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices, unorganized_input_indices);
+}
+#endif
+
+#if TEST_unorganized_sparse_cloud_COMPLETE_KNN
+// Test search on unorganized point clouds
+TEST (PCL, unorganized_sparse_cloud_Complete_KNN)
+{
+ testKNNSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices);
+}
+#endif
+
+#if TEST_unorganized_sparse_cloud_VIEW_KNN
+TEST (PCL, unorganized_sparse_cloud_View_KNN)
+{
+ testKNNSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices, unorganized_input_indices);
+}
+#endif
+
+#if TEST_unorganized_dense_cloud_COMPLETE_RADIUS
+// Test search on unorganized point clouds
+TEST (PCL, unorganized_dense_cloud_Complete_Radius)
+{
+ testRadiusSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices);
+}
+#endif
+
+#if TEST_unorganized_grid_cloud_COMPLETE_RADIUS
+// Test search on unorganized point clouds in a grid
+TEST (PCL, unorganized_grid_cloud_Complete_Radius)
+{
+ vector<int> query_indices;
+ query_indices.reserve (query_count);
+
+ unsigned skip = static_cast<unsigned> (unorganized_grid_cloud->size ()) / query_count;
+ for (unsigned idx = 0; idx < unorganized_grid_cloud->size () && query_indices.size () < query_count; ++idx)
+ if ((rand () % skip) == 0 && isFinite (unorganized_grid_cloud->points [idx]))
+ query_indices.push_back (idx);
+
+ testRadiusSearch (unorganized_grid_cloud, unorganized_search_methods, query_indices);
+}
+#endif
+
+#if TEST_unorganized_dense_cloud_VIEW_RADIUS
+// Test search on unorganized point clouds
+TEST (PCL, unorganized_dense_cloud_View_Radius)
+{
+ testRadiusSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices, unorganized_input_indices);
+}
+#endif
+
+#if TEST_unorganized_sparse_cloud_COMPLETE_RADIUS
+// Test search on unorganized point clouds
+TEST (PCL, unorganized_sparse_cloud_Complete_Radius)
+{
+ testRadiusSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices);
+}
+#endif
+
+#if TEST_unorganized_sparse_cloud_VIEW_RADIUS
+TEST (PCL, unorganized_sparse_cloud_View_Radius)
+{
+ testRadiusSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices, unorganized_input_indices);
+}
+#endif
+
+#if TEST_ORGANIZED_SPARSE_COMPLETE_KNN
+TEST (PCL, Organized_Sparse_Complete_KNN)
+{
+ testKNNSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices);
+}
+#endif
+
+#if TEST_ORGANIZED_SPARSE_VIEW_KNN
+TEST (PCL, Organized_Sparse_View_KNN)
+{
+ testKNNSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices, organized_input_indices);
+}
+#endif
+
+#if TEST_ORGANIZED_SPARSE_COMPLETE_RADIUS
+TEST (PCL, Organized_Sparse_Complete_Radius)
+{
+ testRadiusSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices);
+}
+#endif
+
+#if TEST_ORGANIZED_SPARSE_VIEW_RADIUS
+TEST (PCL, Organized_Sparse_View_Radius)
+{
+ testRadiusSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices, organized_input_indices);
+}
+#endif
+
+/** \brief create subset of point in cloud to use as query points
+ * \param[out] query_indices resulting query indices - not guaranteed to have size of query_count but guaranteed not to exceed that value
+ * \param cloud input cloud required to check for nans and to get number of points
+ * \param[in] query_count maximum number of query points
+ */
+void createQueryIndices (std::vector<int>& query_indices, PointCloud<PointXYZ>::ConstPtr point_cloud, unsigned query_count)
+{
+ query_indices.clear ();
+ query_indices.reserve (query_count);
+
+ unsigned skip = static_cast<unsigned> (point_cloud->size ()) / query_count;
+ for (unsigned idx = 0; idx < point_cloud->size () && query_indices.size () < query_count; ++idx)
+ if ((rand () % skip) == 0 && isFinite (point_cloud->points [idx]))
+ query_indices.push_back (idx);
+}
+
+/** \brief create an approx 50% view (subset) of a cloud.
+ * \param indices
+ * \param max_index highest accented index usually given by cloud->size () - 1
+ */
+void createIndices (std::vector<int>& indices, unsigned max_index)
+{
+ // ~10% of the input cloud
+ for (unsigned idx = 0; idx <= max_index; ++idx)
+ if (rand_uint () == 0)
+ indices.push_back (idx);
+
+ boost::variate_generator< boost::mt19937, boost::uniform_int<> > rand_indices(boost::mt19937 (), boost::uniform_int<> (0, static_cast<int> (indices.size ()) - 1));
+ // shuffle indices -> not ascending index list
+ for (unsigned idx = 0; idx < max_index; ++idx)
+ {
+ unsigned idx1 = rand_indices ();
+ unsigned idx2 = rand_indices ();
+
+ std::swap (indices[idx1], indices[idx2]);
+ }
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ if (argc < 2)
+ {
+ std::cout << "need path to table_scene_mug_stereo_textured.pcd file\n";
+ return (-1);
+ }
+
+ pcl::io::loadPCDFile (argv [1], *organized_sparse_cloud);
+
+ // create unorganized cloud
+ unorganized_dense_cloud->resize (unorganized_point_count);
+ unorganized_dense_cloud->height = 1;
+ unorganized_dense_cloud->width = unorganized_point_count;
+ unorganized_dense_cloud->is_dense = true;
+
+ unorganized_sparse_cloud->resize (unorganized_point_count);
+ unorganized_sparse_cloud->height = 1;
+ unorganized_sparse_cloud->width = unorganized_point_count;
+ unorganized_sparse_cloud->is_dense = false;
+
+ PointXYZ point;
+ for (unsigned pIdx = 0; pIdx < unorganized_point_count; ++pIdx)
+ {
+ point.x = rand_float ();
+ point.y = rand_float ();
+ point.z = rand_float ();
+
+ unorganized_dense_cloud->points [pIdx] = point;
+
+ if (rand_uint () == 0)
+ unorganized_sparse_cloud->points [pIdx].x = unorganized_sparse_cloud->points [pIdx].y = unorganized_sparse_cloud->points [pIdx].z = std::numeric_limits<float>::quiet_NaN ();
+ else
+ unorganized_sparse_cloud->points [pIdx] = point;
+ }
+
+ unorganized_grid_cloud->reserve (1000);
+ unorganized_grid_cloud->height = 1;
+ unorganized_grid_cloud->width = 1000;
+ unorganized_grid_cloud->is_dense = true;
+
+ // values between 0 and 1
+ for (unsigned xIdx = 0; xIdx < 10; ++xIdx)
+ {
+ for (unsigned yIdx = 0; yIdx < 10; ++yIdx)
+ {
+ for (unsigned zIdx = 0; zIdx < 10; ++zIdx)
+ {
+ point.x = 0.1f * static_cast<float>(xIdx);
+ point.y = 0.1f * static_cast<float>(yIdx);
+ point.z = 0.1f * static_cast<float>(zIdx);
+ unorganized_grid_cloud->push_back (point);
+ }
+ }
+ }
+
+ createIndices (organized_input_indices, static_cast<unsigned> (organized_sparse_cloud->size () - 1));
+ createIndices (unorganized_input_indices, unorganized_point_count - 1);
+
+ brute_force.setSortedResults (true);
+ KDTree.setSortedResults (true);
+ octree_search.setSortedResults (true);
+ organized.setSortedResults (true);
+
+ unorganized_search_methods.push_back (&brute_force);
+ unorganized_search_methods.push_back (&KDTree);
+ unorganized_search_methods.push_back (&octree_search);
+
+ organized_search_methods.push_back (&brute_force);
+ organized_search_methods.push_back (&KDTree);
+ organized_search_methods.push_back (&octree_search);
+ organized_search_methods.push_back (&organized);
+
+ createQueryIndices (unorganized_dense_cloud_query_indices, unorganized_dense_cloud, query_count);
+ createQueryIndices (unorganized_sparse_cloud_query_indices, unorganized_sparse_cloud, query_count);
+ createQueryIndices (organized_sparse_query_indices, organized_sparse_cloud, query_count);
+
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
-# Random walker requires Eigen::Sparse module that is available since 3.1.0
-if(NOT ("${EIGEN_VERSION}" VERSION_LESS 3.1.0))
- PCL_ADD_TEST(random_walker test_random_walker
- FILES test_random_walker.cpp
- LINK_WITH pcl_gtest
- ARGUMENTS "${PCL_SOURCE_DIR}/test/segmentation/data")
-endif()
+set(SUBSYS_NAME tests_segmentation)
+set(SUBSYS_DESC "Point cloud library segmentation module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS segmentation)
+set(OPT_DEPS) # module does not depend on these
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ # Random walker requires Eigen::Sparse module that is available since 3.1.0
+ if(NOT ("${EIGEN_VERSION}" VERSION_LESS 3.1.0))
+ PCL_ADD_TEST(random_walker test_random_walker
+ FILES test_random_walker.cpp
+ LINK_WITH pcl_gtest
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/segmentation/data")
+ endif()
+
+ PCL_ADD_TEST(a_segmentation_test test_segmentation
+ FILES test_segmentation.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_segmentation pcl_features pcl_kdtree pcl_search pcl_common
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/car6.pcd" "${PCL_SOURCE_DIR}/test/colored_cloud.pcd")
+
+ PCL_ADD_TEST(test_non_linear test_non_linear
+ FILES test_non_linear.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_io pcl_sample_consensus pcl_segmentation pcl_kdtree pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/noisy_slice_displaced.pcd")
+endif (build)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010-2011, Willow Garage, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: test_segmentation.cpp 3564 2011-12-16 06:11:13Z rusu $
+ *
+ */
+
+#include <gtest/gtest.h>
+#include <pcl/point_types.h>
+#include <pcl/point_cloud.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/ModelCoefficients.h>
+#include <pcl/PointIndices.h>
+#include <pcl/sample_consensus/model_types.h>
+#include <pcl/sample_consensus/method_types.h>
+#include <pcl/segmentation/sac_segmentation.h>
+
+using namespace pcl;
+using namespace pcl::io;
+
+PointCloud<PointXYZ>::Ptr cloud_;
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (SACSegmentation, Segmentation)
+{
+ ModelCoefficients::Ptr sphere_coefficients (new ModelCoefficients);
+ PointIndices::Ptr inliers (new PointIndices);
+
+ SACSegmentation<PointXYZ> seg;
+ seg.setOptimizeCoefficients (true);
+ seg.setModelType (SACMODEL_SPHERE);
+ seg.setMethodType (SAC_RANSAC);
+ seg.setMaxIterations (10000);
+ seg.setDistanceThreshold (0.01);
+ seg.setRadiusLimits (0.03, 0.07); // true radius is 0.05
+ seg.setInputCloud (cloud_);
+ seg.segment (*inliers, *sphere_coefficients);
+
+ EXPECT_NEAR (sphere_coefficients->values[0], 0.998776, 1e-2);
+ EXPECT_NEAR (sphere_coefficients->values[1], 0.752023, 1e-2);
+ EXPECT_NEAR (sphere_coefficients->values[2], 1.24558, 1e-2);
+ EXPECT_NEAR (sphere_coefficients->values[3], 0.0536238, 1e-2);
+
+ EXPECT_NEAR (static_cast<int> (inliers->indices.size ()), 3516, 10);
+}
+
+//* ---[ */
+int
+ main (int argc, char** argv)
+{
+ if (argc < 2)
+ {
+ std::cerr << "No test file given. Please download `noisy_slice_displaced.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ // Load a standard PCD file from disk
+ PointCloud<PointXYZ> cloud;
+ if (loadPCDFile (argv[1], cloud) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `noisy_slice_displaced.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ cloud_ = cloud.makeShared ();
+
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010-2011, Willow Garage, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id$
+ *
+ */
+
+#include <gtest/gtest.h>
+#include <pcl/kdtree/kdtree_flann.h>
+#include <pcl/point_types.h>
+#include <pcl/point_cloud.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/search/search.h>
+#include <pcl/features/normal_3d.h>
+
+#include <pcl/segmentation/extract_polygonal_prism_data.h>
+#include <pcl/segmentation/segment_differences.h>
+#include <pcl/segmentation/region_growing.h>
+#include <pcl/segmentation/region_growing_rgb.h>
+#include <pcl/segmentation/min_cut_segmentation.h>
+
+using namespace pcl;
+using namespace pcl::io;
+
+PointCloud<PointXYZ>::Ptr cloud_;
+PointCloud<PointXYZ>::Ptr cloud_t_;
+KdTree<PointXYZ>::Ptr tree_;
+
+pcl::PointCloud<pcl::PointXYZRGB>::Ptr colored_cloud;
+pcl::PointCloud<pcl::PointXYZ>::Ptr another_cloud_;
+pcl::PointCloud<pcl::Normal>::Ptr normals_;
+pcl::PointCloud<pcl::Normal>::Ptr another_normals_;
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingRGBTest, Segment)
+{
+ RegionGrowingRGB<pcl::PointXYZRGB> rg;
+
+ rg.setInputCloud (colored_cloud);
+ rg.setDistanceThreshold (10);
+ rg.setRegionColorThreshold (5);
+ rg.setPointColorThreshold (6);
+ rg.setMinClusterSize (20);
+
+ std::vector <pcl::PointIndices> clusters;
+ rg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_NE (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingTest, Segment)
+{
+ pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
+ rg.setInputCloud (cloud_);
+ rg.setInputNormals (normals_);
+
+ std::vector <pcl::PointIndices> clusters;
+ rg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_NE (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingTest, SegmentWithoutCloud)
+{
+ pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
+ rg.setInputNormals (normals_);
+
+ std::vector <pcl::PointIndices> clusters;
+ rg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingTest, SegmentWithoutNormals)
+{
+ pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
+ rg.setInputCloud (cloud_);
+
+ std::vector <pcl::PointIndices> clusters;
+ rg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingTest, SegmentEmptyCloud)
+{
+ pcl::PointCloud<pcl::PointXYZ>::Ptr empty_cloud (new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::PointCloud<pcl::Normal>::Ptr empty_normals (new pcl::PointCloud<pcl::Normal>);
+
+ pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
+ rg.setInputCloud (empty_cloud);
+ rg.setInputNormals (empty_normals);
+
+ std::vector <pcl::PointIndices> clusters;
+ rg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingTest, SegmentWithDifferentNormalAndCloudSize)
+{
+ pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
+ rg.setInputCloud (another_cloud_);
+ rg.setInputNormals (normals_);
+
+ int first_cloud_size = static_cast<int> (cloud_->points.size ());
+ int second_cloud_size = static_cast<int> (another_cloud_->points.size ());
+ ASSERT_NE (first_cloud_size, second_cloud_size);
+
+ std::vector <pcl::PointIndices> clusters;
+ rg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+
+ rg.setInputCloud (cloud_);
+ rg.setInputNormals (another_normals_);
+
+ rg.extract (clusters);
+ num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingTest, SegmentWithWrongThresholdParameters)
+{
+ pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
+ rg.setInputCloud (cloud_);
+ rg.setInputNormals (normals_);
+
+ rg.setNumberOfNeighbours (0);
+
+ std::vector <pcl::PointIndices> clusters;
+ rg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+
+ rg.setNumberOfNeighbours (30);
+ rg.setResidualTestFlag (true);
+ rg.setResidualThreshold (-10.0);
+
+ rg.extract (clusters);
+ num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+
+ rg.setCurvatureTestFlag (true);
+ rg.setCurvatureThreshold (-10.0f);
+
+ rg.extract (clusters);
+ num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (RegionGrowingTest, SegmentFromPoint)
+{
+ pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
+
+ pcl::PointIndices cluster;
+ rg.getSegmentFromPoint (0, cluster);
+ EXPECT_EQ (0, cluster.indices.size ());
+
+ rg.setInputCloud (cloud_);
+ rg.setInputNormals (normals_);
+ rg.getSegmentFromPoint(0, cluster);
+ EXPECT_NE (0, cluster.indices.size());
+}
+
+#if (BOOST_VERSION >= 104400)
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (MinCutSegmentationTest, Segment)
+{
+ pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
+
+ pcl::PointXYZ object_center;
+ double radius = 0.0;
+ double sigma = 0.0;
+ double source_weight = 0.0;
+ unsigned int neighbor_number = 0;
+
+ object_center.x = -36.01f;
+ object_center.y = -64.73f;
+ object_center.z = -6.18f;
+ radius = 3.8003856;
+ sigma = 0.25;
+ source_weight = 0.8;
+ neighbor_number = 14;
+
+ pcl::PointCloud<pcl::PointXYZ>::Ptr foreground_points(new pcl::PointCloud<pcl::PointXYZ> ());
+ foreground_points->points.push_back (object_center);
+
+ mcSeg.setForegroundPoints (foreground_points);
+ mcSeg.setInputCloud (another_cloud_);
+ mcSeg.setRadius (radius);
+ mcSeg.setSigma (sigma);
+ mcSeg.setSourceWeight (source_weight);
+ mcSeg.setNumberOfNeighbours (neighbor_number);
+
+ std::vector <pcl::PointIndices> clusters;
+ mcSeg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (2, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (MinCutSegmentationTest, SegmentWithoutForegroundPoints)
+{
+ pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
+ mcSeg.setInputCloud (another_cloud_);
+ mcSeg.setRadius (3.8003856);
+
+ std::vector <pcl::PointIndices> clusters;
+ mcSeg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (MinCutSegmentationTest, SegmentWithoutCloud)
+{
+ pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
+
+ std::vector <pcl::PointIndices> clusters;
+ mcSeg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (MinCutSegmentationTest, SegmentEmptyCloud)
+{
+ pcl::PointCloud<pcl::PointXYZ>::Ptr empty_cloud (new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
+ mcSeg.setInputCloud (empty_cloud);
+
+ std::vector <pcl::PointIndices> clusters;
+ mcSeg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (0, num_of_segments);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (MinCutSegmentationTest, SegmentWithWrongParameters)
+{
+ pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
+ mcSeg.setInputCloud (another_cloud_);
+ pcl::PointXYZ object_center;
+ object_center.x = -36.01f;
+ object_center.y = -64.73f;
+ object_center.z = -6.18f;
+ pcl::PointCloud<pcl::PointXYZ>::Ptr foreground_points(new pcl::PointCloud<pcl::PointXYZ> ());
+ foreground_points->points.push_back (object_center);
+ mcSeg.setForegroundPoints (foreground_points);
+
+ unsigned int prev_neighbor_number = mcSeg.getNumberOfNeighbours ();
+ EXPECT_LT (0, prev_neighbor_number);
+
+ mcSeg.setNumberOfNeighbours (0);
+ unsigned int curr_neighbor_number = mcSeg.getNumberOfNeighbours ();
+ EXPECT_EQ (prev_neighbor_number, curr_neighbor_number);
+
+ double prev_radius = mcSeg.getRadius ();
+ EXPECT_LT (0.0, prev_radius);
+
+ mcSeg.setRadius (0.0);
+ double curr_radius = mcSeg.getRadius ();
+ EXPECT_EQ (prev_radius, curr_radius);
+
+ mcSeg.setRadius (-10.0);
+ curr_radius = mcSeg.getRadius ();
+ EXPECT_EQ (prev_radius, curr_radius);
+
+ double prev_sigma = mcSeg.getSigma ();
+ EXPECT_LT (0.0, prev_sigma);
+
+ mcSeg.setSigma (0.0);
+ double curr_sigma = mcSeg.getSigma ();
+ EXPECT_EQ (prev_sigma, curr_sigma);
+
+ mcSeg.setSigma (-10.0);
+ curr_sigma = mcSeg.getSigma ();
+ EXPECT_EQ (prev_sigma, curr_sigma);
+
+ double prev_source_weight = mcSeg.getSourceWeight ();
+ EXPECT_LT (0.0, prev_source_weight);
+
+ mcSeg.setSourceWeight (0.0);
+ double curr_source_weight = mcSeg.getSourceWeight ();
+ EXPECT_EQ (prev_source_weight, curr_source_weight);
+
+ mcSeg.setSourceWeight (-10.0);
+ curr_source_weight = mcSeg.getSourceWeight ();
+ EXPECT_EQ (prev_source_weight, curr_source_weight);
+
+ mcSeg.setRadius (3.8003856);
+
+ std::vector <pcl::PointIndices> clusters;
+ mcSeg.extract (clusters);
+ int num_of_segments = static_cast<int> (clusters.size ());
+ EXPECT_EQ (2, num_of_segments);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (SegmentDifferences, Segmentation)
+{
+ SegmentDifferences<PointXYZ> sd;
+ sd.setInputCloud (cloud_);
+ sd.setDistanceThreshold (0.00005);
+
+ // Set the target as itself
+ sd.setTargetCloud (cloud_);
+
+ PointCloud<PointXYZ> output;
+ sd.segment (output);
+
+ EXPECT_EQ (static_cast<int> (output.points.size ()), 0);
+
+ // Set a different target
+ sd.setTargetCloud (cloud_t_);
+ sd.segment (output);
+ EXPECT_EQ (static_cast<int> (output.points.size ()), 126);
+ //savePCDFile ("./test/0-t.pcd", output);
+
+ // Reverse
+ sd.setInputCloud (cloud_t_);
+ sd.setTargetCloud (cloud_);
+ sd.segment (output);
+ EXPECT_EQ (static_cast<int> (output.points.size ()), 127);
+ //savePCDFile ("./test/t-0.pcd", output);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+TEST (ExtractPolygonalPrism, Segmentation)
+{
+ PointCloud<PointXYZ>::Ptr hull (new PointCloud<PointXYZ>);
+ hull->points.resize (5);
+
+ for (size_t i = 0; i < hull->points.size (); ++i)
+ {
+ hull->points[i].x = hull->points[i].y = static_cast<float> (i);
+ hull->points[i].z = 0.0f;
+ }
+
+ ExtractPolygonalPrismData<PointXYZ> ex;
+ ex.setInputCloud (cloud_);
+ ex.setInputPlanarHull (hull);
+
+ PointIndices output;
+ ex.segment (output);
+
+ EXPECT_EQ (static_cast<int> (output.indices.size ()), 0);
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ if (argc < 4)
+ {
+ std::cerr << "This test requires three point clouds. The first one must be 'bun0.pcd'." << std::endl;
+ std::cerr << "The second must be 'car6.pcd'. The last one must be 'colored_cloud.pcd'." << std::endl;
+ std::cerr << "Please download and pass them in the specified order(including the path to them)." << std::endl;
+ return (-1);
+ }
+
+ // Load a standard PCD file from disk
+ PointCloud<PointXYZ> cloud, cloud_t, another_cloud;
+ PointCloud<PointXYZRGB> colored_cloud_1;
+ if (loadPCDFile (argv[1], cloud) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+ if (pcl::io::loadPCDFile (argv[2], another_cloud) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `car6.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+ if (pcl::io::loadPCDFile (argv[3], colored_cloud_1) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `colored_cloud.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ colored_cloud = colored_cloud_1.makeShared();
+
+ // Tranpose the cloud
+ cloud_t = cloud;
+ for (size_t i = 0; i < cloud.points.size (); ++i)
+ cloud_t.points[i].x += 0.01f;
+
+ cloud_ = cloud.makeShared ();
+ cloud_t_ = cloud_t.makeShared ();
+
+ another_cloud_ = another_cloud.makeShared();
+ normals_ = (new pcl::PointCloud<pcl::Normal>)->makeShared();
+ pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimator;
+ normal_estimator.setInputCloud(cloud_);
+ normal_estimator.setKSearch(30);
+ normal_estimator.compute(*normals_);
+
+ another_normals_ = (new pcl::PointCloud<pcl::Normal>)->makeShared();
+ normal_estimator.setInputCloud(another_cloud_);
+ normal_estimator.setKSearch(30);
+ normal_estimator.compute(*another_normals_);
+
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
-PCL_ADD_TEST(surface_marching_cubes test_marching_cubes
- FILES test_marching_cubes.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(surface_moving_least_squares test_moving_least_squares
- FILES test_moving_least_squares.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(surface_gp3 test_gp3
- FILES test_gp3.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(surface_organized_fast_mesh test_organized_fast_mesh
- FILES test_organized_fast_mesh.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(surface_grid_projection test_grid_projection
- FILES test_grid_projection.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-PCL_ADD_TEST(surface_ear_clipping test_ear_clipping
- FILES test_ear_clipping.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-#PCL_ADD_TEST(surface_poisson test_poisson
-# FILES test_poisson.cpp
-# LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features
-# ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+set(SUBSYS_NAME tests_surface)
+set(SUBSYS_DESC "Point cloud library surface module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS surface)
+set(OPT_DEPS io features sample_consensus filters) # module does not depend on these
-if(QHULL_FOUND)
- PCL_ADD_TEST(surface_convex_hull test_convex_hull
- FILES test_convex_hull.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_filters pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
- PCL_ADD_TEST(surface_concave test_concave_hull
- FILES test_concave_hull.cpp
- LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_filters pcl_search
- ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
-endif(QHULL_FOUND)
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+if (build)
+ if (BUILD_io AND BUILD_features)
+ PCL_ADD_TEST(surface_marching_cubes test_marching_cubes
+ FILES test_marching_cubes.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(surface_moving_least_squares test_moving_least_squares
+ FILES test_moving_least_squares.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(surface_gp3 test_gp3
+ FILES test_gp3.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(surface_organized_fast_mesh test_organized_fast_mesh
+ FILES test_organized_fast_mesh.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(surface_grid_projection test_grid_projection
+ FILES test_grid_projection.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(surface_ear_clipping test_ear_clipping
+ FILES test_ear_clipping.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ #PCL_ADD_TEST(surface_poisson test_poisson
+ # FILES test_poisson.cpp
+ # LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features
+ # ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+
+ if(QHULL_FOUND)
+ PCL_ADD_TEST(surface_convex_hull test_convex_hull
+ FILES test_convex_hull.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_filters pcl_search
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ if (BUILD_sample_consensus AND BUILD_filters)
+ PCL_ADD_TEST(surface_concave test_concave_hull
+ FILES test_concave_hull.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_filters pcl_search pcl_sample_consensus
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ endif (BUILD_sample_consensus AND BUILD_filters)
+ endif(QHULL_FOUND)
+ endif (BUILD_io AND BUILD_features)
+endif (build)
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: test_segmentation.cpp 3564 2011-12-16 06:11:13Z rusu $
- *
- */
-
-#include <gtest/gtest.h>
-#include <pcl/point_types.h>
-#include <pcl/point_cloud.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/ModelCoefficients.h>
-#include <pcl/PointIndices.h>
-#include <pcl/sample_consensus/model_types.h>
-#include <pcl/sample_consensus/method_types.h>
-#include <pcl/segmentation/sac_segmentation.h>
-
-using namespace pcl;
-using namespace pcl::io;
-
-PointCloud<PointXYZ>::Ptr cloud_;
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SACSegmentation, Segmentation)
-{
- ModelCoefficients::Ptr sphere_coefficients (new ModelCoefficients);
- PointIndices::Ptr inliers (new PointIndices);
-
- SACSegmentation<PointXYZ> seg;
- seg.setOptimizeCoefficients (true);
- seg.setModelType (SACMODEL_SPHERE);
- seg.setMethodType (SAC_RANSAC);
- seg.setMaxIterations (10000);
- seg.setDistanceThreshold (0.01);
- seg.setRadiusLimits (0.03, 0.07); // true radius is 0.05
- seg.setInputCloud (cloud_);
- seg.segment (*inliers, *sphere_coefficients);
-
- EXPECT_NEAR (sphere_coefficients->values[0], 0.998776, 1e-2);
- EXPECT_NEAR (sphere_coefficients->values[1], 0.752023, 1e-2);
- EXPECT_NEAR (sphere_coefficients->values[2], 1.24558, 1e-2);
- EXPECT_NEAR (sphere_coefficients->values[3], 0.0536238, 1e-2);
-
- EXPECT_NEAR (static_cast<int> (inliers->indices.size ()), 3516, 10);
-}
-
-//* ---[ */
-int
- main (int argc, char** argv)
-{
- if (argc < 2)
- {
- std::cerr << "No test file given. Please download `noisy_slice_displaced.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
-
- // Load a standard PCD file from disk
- PointCloud<PointXYZ> cloud;
- if (loadPCDFile (argv[1], cloud) < 0)
- {
- std::cerr << "Failed to read test file. Please download `noisy_slice_displaced.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
-
- cloud_ = cloud.makeShared ();
-
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
-/* ]--- */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2013-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * main_ground_based_people_detection_app.cpp
- * Created on: Nov 30, 2012
- * Author: Matteo Munaro
- *
- * Test file for testing people detection on a point cloud.
- * As a first step, the ground is manually initialized, then people detection is performed with the GroundBasedPeopleDetectionApp class,
- * which implements the people detection algorithm described here:
- * M. Munaro, F. Basso and E. Menegatti,
- * Tracking people within groups with RGB-D data,
- * In Proceedings of the International Conference on Intelligent Robots and Systems (IROS) 2012, Vilamoura (Portugal), 2012.
- */
-
-#include <gtest/gtest.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/point_types.h>
-#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/sample_consensus/sac_model_plane.h>
-#include <pcl/people/ground_based_people_detection_app.h>
-
-typedef pcl::PointXYZRGB PointT;
-typedef pcl::PointCloud<PointT> PointCloudT;
-
-enum { COLS = 640, ROWS = 480 };
-PointCloudT::Ptr cloud;
-pcl::people::PersonClassifier<pcl::RGB> person_classifier;
-std::string svm_filename;
-float min_confidence;
-float min_width;
-float max_width;
-float min_height;
-float max_height;
-float voxel_size;
-Eigen::Matrix3f rgb_intrinsics_matrix;
-Eigen::VectorXf ground_coeffs;
-
-TEST (PCL, PersonClassifier)
-{
- // Create classifier for people detection:
- EXPECT_TRUE (person_classifier.loadSVMFromFile(svm_filename)); // load trained SVM
-}
-
-TEST (PCL, GroundBasedPeopleDetectionApp)
-{
- // People detection app initialization:
- pcl::people::GroundBasedPeopleDetectionApp<PointT> people_detector; // people detection object
- people_detector.setVoxelSize(voxel_size); // set the voxel size
- people_detector.setIntrinsics(rgb_intrinsics_matrix); // set RGB camera intrinsic parameters
- people_detector.setClassifier(person_classifier); // set person classifier
- people_detector.setPersonClusterLimits(min_height, max_height, min_width, max_width);
-
- // Perform people detection on the new cloud:
- std::vector<pcl::people::PersonCluster<PointT> > clusters; // vector containing persons clusters
- people_detector.setInputCloud(cloud);
- people_detector.setGround(ground_coeffs); // set floor coefficients
- EXPECT_TRUE (people_detector.compute(clusters)); // perform people detection
-
- unsigned int k = 0;
- for(std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it)
- {
- if(it->getPersonConfidence() > min_confidence) // draw only people with confidence above a threshold
- k++;
- }
- EXPECT_EQ (k, 5); // verify number of people found (should be five)
-}
-
-int main (int argc, char** argv)
-{
- if (argc < 2)
- {
- cerr << "No svm filename provided. Please download `trainedLinearSVMForPeopleDetectionWithHOG.yaml` and pass its path to the test." << endl;
- return (-1);
- }
-
- if (argc < 3)
- {
- cerr << "No test file given. Please download 'five_people.pcd` and pass its path to the test." << endl;
- return (-1);
- }
-
- cloud = PointCloudT::Ptr (new PointCloudT);
- if (pcl::io::loadPCDFile (argv[2], *cloud) < 0)
- {
- cerr << "Failed to read test file. Please download `five_people.pcd` and pass its path to the test." << endl;
- return (-1);
- }
-
- // Algorithm parameters:
- svm_filename = argv[1];
- min_confidence = -1.5;
- min_width = 0.1;
- max_width = 8.0;
- min_height = 1.3;
- max_height = 2.3;
- voxel_size = 0.06;
-
- rgb_intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics
- ground_coeffs.resize(4);
- ground_coeffs << -0.0103586, 0.997011, 0.0765573, -1.26614; // set ground coefficients
-
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2012, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: $
- *
- */
-#include <gtest/gtest.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/point_cloud.h>
-#include <pcl/common/transforms.h>
-#include <pcl/correspondence.h>
-#include <pcl/features/normal_3d_omp.h>
-#include <pcl/features/shot_omp.h>
-#include <pcl/features/board.h>
-#include <pcl/filters/uniform_sampling.h>
-#include <pcl/recognition/cg/hough_3d.h>
-#include <pcl/recognition/cg/geometric_consistency.h>
-#include <pcl/kdtree/kdtree_flann.h>
-#include <pcl/kdtree/impl/kdtree_flann.hpp>
-#include <pcl/common/eigen.h>
-
-using namespace std;
-using namespace pcl;
-using namespace pcl::io;
-
-typedef PointXYZ PointType;
-typedef Normal NormalType;
-typedef ReferenceFrame RFType;
-typedef SHOT352 DescriptorType;
-
-PointCloud<PointType>::Ptr model_ (new PointCloud<PointType> ());
-PointCloud<PointType>::Ptr model_downsampled_ (new PointCloud<PointType> ());
-PointCloud<PointType>::Ptr scene_ (new PointCloud<PointType> ());
-PointCloud<PointType>::Ptr scene_downsampled_ (new PointCloud<PointType> ());
-PointCloud<NormalType>::Ptr model_normals_ (new PointCloud<NormalType> ());
-PointCloud<NormalType>::Ptr scene_normals_ (new PointCloud<NormalType> ());
-PointCloud<DescriptorType>::Ptr model_descriptors_ (new PointCloud<DescriptorType> ());
-PointCloud<DescriptorType>::Ptr scene_descriptors_ (new PointCloud<DescriptorType> ());
-CorrespondencesPtr model_scene_corrs_ (new Correspondences ());
-
-double
-computeRmsE (const PointCloud<PointType>::ConstPtr &model, const PointCloud<PointType>::ConstPtr &scene, const Eigen::Matrix4f &rototranslation)
-{
- PointCloud<PointType> transformed_model;
- transformPointCloud (*model, transformed_model, rototranslation);
-
- KdTreeFLANN<PointType> tree;
- tree.setInputCloud (scene);
-
- double sqr_norm_sum = 0;
- int found_points = 0;
-
- vector<int> neigh_indices (1);
- vector<float> neigh_sqr_dists (1);
- for (size_t i = 0; i < transformed_model.size (); ++i)
- {
-
- int found_neighs = tree.nearestKSearch (transformed_model.at (i), 1, neigh_indices, neigh_sqr_dists);
- if(found_neighs == 1)
- {
- ++found_points;
- sqr_norm_sum += static_cast<double> (neigh_sqr_dists[0]);
- }
- }
-
- if (found_points > 0)
- return sqrt (sqr_norm_sum / double (transformed_model.size ()));
-
- return numeric_limits<double>::max ();
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, Hough3DGrouping)
-{
- PointCloud<RFType>::Ptr model_rf (new PointCloud<RFType> ());
- PointCloud<RFType>::Ptr scene_rf (new PointCloud<RFType> ());
-
- //RFs
- BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;
- rf_est.setRadiusSearch (0.015);
- rf_est.setInputCloud (model_downsampled_);
- rf_est.setInputNormals (model_normals_);
- rf_est.setSearchSurface (model_);
- rf_est.compute (*model_rf);
-
- rf_est.setInputCloud (scene_downsampled_);
- rf_est.setInputNormals (scene_normals_);
- rf_est.setSearchSurface (scene_);
- rf_est.compute (*scene_rf);
-
- vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
-
- //Actual CG
- Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;
- clusterer.setInputCloud (model_downsampled_);
- clusterer.setInputRf (model_rf);
- clusterer.setSceneCloud (scene_downsampled_);
- clusterer.setSceneRf (scene_rf);
- clusterer.setModelSceneCorrespondences (model_scene_corrs_);
- clusterer.setHoughBinSize (0.03);
- clusterer.setHoughThreshold (25);
- EXPECT_TRUE (clusterer.recognize (rototranslations));
-
- //Assertions
- EXPECT_EQ (rototranslations.size (), 1);
- EXPECT_LT (computeRmsE (model_, scene_, rototranslations[0]), 1E-2);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, GeometricConsistencyGrouping)
-{
- vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
-
- GeometricConsistencyGrouping<PointType, PointType> clusterer;
- clusterer.setInputCloud (model_downsampled_);
- clusterer.setSceneCloud (scene_downsampled_);
- clusterer.setModelSceneCorrespondences (model_scene_corrs_);
- clusterer.setGCSize (0.015);
- clusterer.setGCThreshold (25);
- EXPECT_TRUE (clusterer.recognize (rototranslations));
-
- //Assertions
- EXPECT_EQ (rototranslations.size (), 1);
- EXPECT_LT (computeRmsE (model_, scene_, rototranslations[0]), 1E-4);
-}
-
-
-/* ---[ */
-int
-main (int argc, char** argv)
-{
- if (argc < 3)
- {
- cerr << "No test file given. Please download `milk.pcd` and `milk_cartoon_all_small_clorox.pcd` and pass their paths to the test." << endl;
- return (-1);
- }
-
- if (loadPCDFile (argv[1], *model_) < 0)
- {
- cerr << "Failed to read test file. Please download `milk.pcd` and pass its path to the test." << endl;
- return (-1);
- }
-
- if (loadPCDFile (argv[2], *scene_) < 0)
- {
- cerr << "Failed to read test file. Please download `milk_cartoon_all_small_clorox.pcd` and pass its path to the test." << endl;
- return (-1);
- }
-
- //Normals
- NormalEstimationOMP<PointType, NormalType> norm_est;
- norm_est.setKSearch (10);
- norm_est.setInputCloud (model_);
- norm_est.compute (*model_normals_);
-
- norm_est.setInputCloud (scene_);
- norm_est.compute (*scene_normals_);
-
- //Downsampling
- UniformSampling<PointType> uniform_sampling;
- uniform_sampling.setInputCloud (model_);
- uniform_sampling.setRadiusSearch (0.005);
- uniform_sampling.filter (*model_downsampled_);
-
- uniform_sampling.setInputCloud (scene_);
- uniform_sampling.setRadiusSearch (0.02);
- uniform_sampling.filter (*scene_downsampled_);
-
- //Descriptor
- SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;
- descr_est.setRadiusSearch (0.015);
- descr_est.setInputCloud (model_downsampled_);
- descr_est.setInputNormals (model_normals_);
- descr_est.setSearchSurface (model_);
- descr_est.compute (*model_descriptors_);
-
- descr_est.setInputCloud (scene_downsampled_);
- descr_est.setInputNormals (scene_normals_);
- descr_est.setSearchSurface (scene_);
- descr_est.compute (*scene_descriptors_);
-
- //Correspondences with KdTree
- KdTreeFLANN<DescriptorType> match_search;
- match_search.setInputCloud (model_descriptors_);
-
- for (size_t i = 0; i < scene_descriptors_->size (); ++i)
- {
- if ( pcl_isfinite( scene_descriptors_->at (i).descriptor[0] ) )
- {
- vector<int> neigh_indices (1);
- vector<float> neigh_sqr_dists (1);
- int found_neighs = match_search.nearestKSearch (scene_descriptors_->at (i), 1, neigh_indices, neigh_sqr_dists);
- if(found_neighs == 1 && neigh_sqr_dists[0] < 0.25f)
- {
- Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
- model_scene_corrs_->push_back (corr);
- }
- }
- }
-
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
-/* ]--- */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2012, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: $
- *
- */
-
-#include <gtest/gtest.h>
-#include <pcl/kdtree/kdtree_flann.h>
-#include <pcl/point_types.h>
-#include <pcl/point_cloud.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/search/search.h>
-#include <pcl/features/normal_3d.h>
-#include <pcl/features/feature.h>
-#include <pcl/features/fpfh.h>
-#include <pcl/features/impl/fpfh.hpp>
-#include <pcl/recognition/implicit_shape_model.h>
-#include <pcl/recognition/impl/implicit_shape_model.hpp>
-
-pcl::PointCloud<pcl::PointXYZ>::Ptr training_cloud;
-pcl::PointCloud<pcl::PointXYZ>::Ptr testing_cloud;
-pcl::PointCloud<pcl::Normal>::Ptr training_normals;
-pcl::PointCloud<pcl::Normal>::Ptr testing_normals;
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (ISM, TrainRecognize)
-{
- std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clouds;
- std::vector<pcl::PointCloud<pcl::Normal>::Ptr > normals;
- std::vector<unsigned int> classes;
-
- clouds.push_back (training_cloud);
- normals.push_back (training_normals);
- classes.push_back (0);
-
- pcl::FPFHEstimation<pcl::PointXYZ, pcl::Normal, pcl::Histogram<153> >::Ptr fpfh
- (new pcl::FPFHEstimation<pcl::PointXYZ, pcl::Normal, pcl::Histogram<153> >);
- fpfh->setRadiusSearch (30.0);
- pcl::Feature< pcl::PointXYZ, pcl::Histogram<153> >::Ptr feature_estimator(fpfh);
-
- pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal>::ISMModelPtr model = boost::shared_ptr<pcl::features::ISMModel> (new pcl::features::ISMModel);
-
- pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal> ism;
- ism.setFeatureEstimator(feature_estimator);
- ism.setTrainingClouds (clouds);
- ism.setTrainingClasses (classes);
- ism.setTrainingNormals (normals);
- ism.setSamplingSize (2.0f);
- ism.trainISM (model);
-
- int _class = 0;
- double radius = model->sigmas_[_class] * 10.0;
- double sigma = model->sigmas_[_class];
-
- boost::shared_ptr<pcl::features::ISMVoteList<pcl::PointXYZ> > vote_list = ism.findObjects (model, testing_cloud, testing_normals, _class);
- EXPECT_NE (vote_list->getNumberOfVotes (), 0);
- std::vector<pcl::ISMPeak, Eigen::aligned_allocator<pcl::ISMPeak> > strongest_peaks;
- vote_list->findStrongestPeaks (strongest_peaks, _class, radius, sigma);
-
- EXPECT_NE (strongest_peaks.size (), 0);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (ISM, TrainWithWrongParameters)
-{
- pcl::ism::ImplicitShapeModelEstimation<153, pcl::PointXYZ, pcl::Normal> ism;
-
- float prev_sampling_size = ism.getSamplingSize ();
- EXPECT_NE (prev_sampling_size, 0.0);
- ism.setSamplingSize (0.0f);
- float curr_sampling_size = ism.getSamplingSize ();
- EXPECT_EQ (curr_sampling_size, prev_sampling_size);
-
- unsigned int prev_number_of_clusters = ism.getNumberOfClusters ();
- EXPECT_NE (prev_number_of_clusters, 0);
- ism.setNumberOfClusters (0);
- unsigned int curr_number_of_clusters = ism.getNumberOfClusters ();
- EXPECT_EQ (curr_number_of_clusters, prev_number_of_clusters);
-}
-
-/* ---[ */
-int
-main (int argc, char** argv)
-{
- if (argc < 2)
- {
- std::cerr << "This test requires two point clouds (one for training and one for testing)." << std::endl;
- std::cerr << "You can use these two clouds 'ism_train.pcd' and 'ism_test.pcd'." << std::endl;
- return (-1);
- }
-
- training_cloud = (new pcl::PointCloud<pcl::PointXYZ>)->makeShared();
- if (pcl::io::loadPCDFile (argv[1], *training_cloud) < 0)
- {
- std::cerr << "Failed to read test file. Please download `ism_train.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
- testing_cloud = (new pcl::PointCloud<pcl::PointXYZ>)->makeShared();
- if (pcl::io::loadPCDFile (argv[2], *testing_cloud) < 0)
- {
- std::cerr << "Failed to read test file. Please download `ism_test.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
-
- training_normals = (new pcl::PointCloud<pcl::Normal>)->makeShared();
- testing_normals = (new pcl::PointCloud<pcl::Normal>)->makeShared();
- pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimator;
- normal_estimator.setRadiusSearch (25.0);
- normal_estimator.setInputCloud(training_cloud);
- normal_estimator.compute(*training_normals);
- normal_estimator.setInputCloud(testing_cloud);
- normal_estimator.compute(*testing_normals);
-
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
-/* ]--- */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2009, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: test_ii_normals.cpp 4084 2012-01-31 02:05:42Z rusu $
- *
- */
-
-#include <gtest/gtest.h>
-#include <pcl/search/brute_force.h>
-#include <pcl/search/kdtree.h>
-#include <pcl/search/organized.h>
-#include <pcl/search/octree.h>
-#include <pcl/io/pcd_io.h>
-#include "boost.h"
-
-#include <pcl/common/time.h>
-
-using namespace pcl;
-using namespace std;
-
-/** \brief if set to value other than 0 -> fine grained output */
-#define DEBUG_OUT 1
-#define EXCESSIVE_TESTING 0
-
-#define TEST_unorganized_dense_cloud_COMPLETE_KNN 1
-#define TEST_unorganized_dense_cloud_VIEW_KNN 1
-#define TEST_unorganized_sparse_cloud_COMPLETE_KNN 1
-#define TEST_unorganized_sparse_cloud_VIEW_KNN 1
-#define TEST_unorganized_grid_cloud_COMPLETE_RADIUS 1
-#define TEST_unorganized_dense_cloud_COMPLETE_RADIUS 1
-#define TEST_unorganized_dense_cloud_VIEW_RADIUS 1
-#define TEST_unorganized_sparse_cloud_COMPLETE_RADIUS 1
-#define TEST_unorganized_sparse_cloud_VIEW_RADIUS 1
-#define TEST_ORGANIZED_SPARSE_COMPLETE_KNN 1
-#define TEST_ORGANIZED_SPARSE_VIEW_KNN 1
-#define TEST_ORGANIZED_SPARSE_COMPLETE_RADIUS 1
-#define TEST_ORGANIZED_SPARSE_VIEW_RADIUS 1
-
-#if EXCESSIVE_TESTING
-/** \brief number of points used for creating unordered point clouds */
-const unsigned int unorganized_point_count = 100000;
-
-/** \brief number of search operations on ordered point clouds*/
-const unsigned int query_count = 5000;
-#else
-/** \brief number of points used for creating unordered point clouds */
-const unsigned int unorganized_point_count = 1200;
-
-/** \brief number of search operations on ordered point clouds*/
-const unsigned int query_count = 100;
-#endif
-
-/** \brief organized point cloud*/
-PointCloud<PointXYZ>::Ptr organized_sparse_cloud (new PointCloud<PointXYZ>);
-
-/** \brief unorganized point cloud*/
-PointCloud<PointXYZ>::Ptr unorganized_dense_cloud (new PointCloud<PointXYZ>);
-
-/** \brief unorganized point cloud*/
-PointCloud<PointXYZ>::Ptr unorganized_sparse_cloud (new PointCloud<PointXYZ>);
-
-/** \brief unorganized point cloud*/
-PointCloud<PointXYZ>::Ptr unorganized_grid_cloud (new PointCloud<PointXYZ>);
-
-/** \brief uniform distributed random number generator for unsigned it in range [0;10]*/
-boost::variate_generator< boost::mt19937, boost::uniform_int<unsigned> > rand_uint(boost::mt19937 (), boost::uniform_int<unsigned> (0, 10));
-/** \brief uniform distributed random number generator for floats in the range [0;1] */
-boost::variate_generator< boost::mt19937, boost::uniform_real<float> > rand_float(boost::mt19937 (), boost::uniform_real<float> (0, 1));
-
-/** \brief used by the *_VIEW_* tests to use only a subset of points from the point cloud*/
-std::vector<int> unorganized_input_indices;
-
-/** \brief used by the *_VIEW_* tests to use only a subset of points from the point cloud*/
-std::vector<int> organized_input_indices;
-
-/** \brief instance of brute force search method to be tested*/
-pcl::search::BruteForce<pcl::PointXYZ> brute_force;
-
-/** \brief instance of KDTree search method to be tested*/
-pcl::search::KdTree<pcl::PointXYZ> KDTree;
-
-/** \brief instance of Octree search method to be tested*/
-pcl::search::Octree<pcl::PointXYZ> octree_search (0.1);
-
-/** \brief instance of Organized search method to be tested*/
-pcl::search::OrganizedNeighbor<pcl::PointXYZ> organized;
-
-/** \brief list of search methods for unorganized search test*/
-vector<search::Search<PointXYZ>* > unorganized_search_methods;
-
-/** \brief list of search methods for organized search test*/
-vector<search::Search<PointXYZ>* > organized_search_methods;
-
-/** \brief lists of indices to be used as query points for various search methods and different cloud types*/
-vector<int> unorganized_dense_cloud_query_indices;
-vector<int> unorganized_sparse_cloud_query_indices;
-vector<int> organized_sparse_query_indices;
-
-/** \briet test whether the result of a search containes unique point ids or not
- * @param indices resulting indices from a search
- * @param name name of the search method that returned these distances
- * @return true if indices are unique, false otherwise
- */
-bool testUniqueness (const vector<int>& indices, const string& name)
-{
- bool uniqueness = true;
- for (unsigned idx1 = 1; idx1 < indices.size () && uniqueness; ++idx1)
- {
- // check whether resulting indices are unique
- for (unsigned idx2 = 0; idx2 < idx1; ++idx2)
- {
- if (indices [idx1] == indices [idx2])
- {
-#if DEBUG_OUT
- std::cout << name << " search: index is twice at positions: " << idx1 << " (" << indices [idx1] << ") , " << idx2 << " (" << indices [idx2] << ")" << std::endl;
-#endif
- // can only be set to false anyway -> no sync required
- uniqueness = false;
- break;
- }
- }
- }
- return uniqueness;
-}
-
-
-/** \brief tests whether the ordering of results is ascending on distances
- * \param distances resulting distances from a search
- * \param name name of the search method that returned these distances
- * \return true if distances in weak ascending order, false otherwise
- */
-bool testOrder (const vector<float>& distances, const string& name)
-{
- bool ordered = true;
- for (unsigned idx1 = 1; idx1 < distances.size (); ++idx1)
- {
- if (distances [idx1-1] > distances [idx1])
- {
-#if DEBUG_OUT
- std::cout << name << " search: not properly sorted: " << idx1 - 1 << "(" << distances [idx1-1] << ") > "
- << idx1 << "(" << distances [idx1] << ")"<< std::endl;
-#endif
- ordered = false;
- break;
- }
- }
-
- return ordered;
-}
-
-/** \brief test whether the results are from the view (subset of the cloud) given by input_indices and also not Nan
- * @param indices_mask defines the subset of allowed points (view) in the result of the search
- * @param nan_mask defines a lookup that indicates whether a point at a given position is finite or not
- * @param indices result of a search to be tested
- * @param name name of search method that returned the result
- * @return true if result is valid, false otherwise
- */
-template<typename PointT> bool
-testResultValidity (const typename PointCloud<PointT>::ConstPtr point_cloud, const vector<bool>& indices_mask, const vector<bool>& nan_mask, const vector<int>& indices, const vector<int>& /*input_indices*/, const string& name)
-{
- bool validness = true;
- for (vector<int>::const_iterator iIt = indices.begin (); iIt != indices.end (); ++iIt)
- {
- if (!indices_mask [*iIt])
- {
-#if DEBUG_OUT
- cerr << name << ": result contains an invalid point: " << *iIt << " not in indices list.\n";
-
-// for (vector<int>::const_iterator iIt2 = input_indices.begin (); iIt2 != input_indices.end (); ++iIt2)
-// cout << *iIt2 << " ";
-// cout << endl;
-#endif
- validness = false;
- break;
- }
- else if (!nan_mask [*iIt])
- {
-#if DEBUG_OUT
- cerr << name << ": result contains an invalid point: " << *iIt << " = NaN (" << point_cloud->points [*iIt].x << " , "
- << point_cloud->points [*iIt].y << " , "
- << point_cloud->points [*iIt].z << ")\n";
-#endif
- validness = false;
- break;
- }
- }
-
- return validness;
-}
-
-/** \brief compares two sets of search results
- * \param indices1
- * \param distances1
- * \param name1
- * \param indices2
- * \param distances2
- * \param name2
- * \param eps threshold for comparing the distances
- * \return true if both sets are the same, false otherwise
- */
-bool compareResults (const std::vector<int>& indices1, const::vector<float>& distances1, const std::string& name1,
- const std::vector<int>& indices2, const::vector<float>& distances2, const std::string& name2, float eps)
-{
- bool equal = true;
- if (indices1.size () != indices2.size ())
- {
-#if DEBUG_OUT
- cerr << "size of results between " << name1 << " search and " << name2 << " search do not match " <<indices1.size () << " vs. " << indices2.size () << endl;
-// for (unsigned idx = 0; idx < std::min (indices1.size (), indices2.size ()); ++idx)
-// {
-// cout << idx <<".\t" << indices1[idx] << "\t(" << distances1[idx] << "),\t" << indices2[idx] << "\t(" << distances2[idx] << ")\n";
-// }
-// for (unsigned idx = std::min (indices1.size (), indices2.size ()); idx < std::max (indices1.size (), indices2.size ()); ++idx)
-// {
-// if (idx >= indices1.size ())
-// cout << idx <<".\t \t ,\t" << indices2[idx] << "\t(" << distances2[idx] << ")\n";
-// else
-// cout << idx <<".\t" << indices1[idx] << "\t(" << distances1[idx] << ")\n";
-// }
-#endif
- equal = false;
- }
- else
- {
- for (unsigned idx = 0; idx < indices1.size (); ++idx)
- {
- if (indices1[idx] != indices2[idx] && fabs (distances1[idx] - distances2[idx]) > eps)
- {
-#if DEBUG_OUT
- cerr << "results between " << name1 << " search and " << name2 << " search do not match: " << idx << " nearest neighbor: "
- << indices1[idx] << " with distance: " << distances1[idx] << " vs. "
- << indices2[idx] << " with distance: " << distances2[idx] << endl;
-#endif
- equal = false;
- break;
- }
- }
- }
- return equal;
-}
-
-/** \brief does KNN search and tests the results to be unique, valid and ordered. Additionally it test whether all test methods are returning the same results
- * \param cloud the input point cloud
- * \param search_methods vector of all search methods to be tested
- * \param query_indices indices of query points in the point cloud (not necessarily in input_indices)
- * \param input_indices indices defining a subset of the point cloud.
- */
-template<typename PointT> void
-testKNNSearch (typename PointCloud<PointT>::ConstPtr point_cloud, vector<search::Search<PointT>*> search_methods,
- const vector<int>& query_indices, const vector<int>& input_indices = vector<int> () )
-{
- vector< vector<int> >indices (search_methods.size ());
- vector< vector<float> >distances (search_methods.size ());
- vector<bool> passed (search_methods.size (), true);
-
- vector<bool> indices_mask (point_cloud->size (), true);
- vector<bool> nan_mask (point_cloud->size (), true);
-
- if (input_indices.size () != 0)
- {
- indices_mask.assign (point_cloud->size (), false);
- for (vector<int>::const_iterator iIt = input_indices.begin (); iIt != input_indices.end (); ++iIt)
- indices_mask [*iIt] = true;
- }
-
- // remove also Nans
- #pragma omp parallel for
- for (int pIdx = 0; pIdx < int (point_cloud->size ()); ++pIdx)
- {
- if (!isFinite (point_cloud->points [pIdx]))
- nan_mask [pIdx] = false;
- }
-
- boost::shared_ptr<vector<int> > input_indices_;
- if (input_indices.size ())
- input_indices_.reset (new vector<int> (input_indices));
-
- #pragma omp parallel for
- for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
- search_methods [sIdx]->setInputCloud (point_cloud, input_indices_);
-
- // test knn values from 1, 8, 64, 512
- for (unsigned knn = 1; knn <= 512; knn <<= 3)
- {
- // find nn for each point in the cloud
- for (vector<int>::const_iterator qIt = query_indices.begin (); qIt != query_indices.end (); ++qIt)
- {
- #pragma omp parallel for
- for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
- {
- search_methods [sIdx]->nearestKSearch (point_cloud->points[*qIt], knn, indices [sIdx], distances [sIdx]);
- passed [sIdx] = passed [sIdx] && testUniqueness (indices [sIdx], search_methods [sIdx]->getName ());
- passed [sIdx] = passed [sIdx] && testOrder (distances [sIdx], search_methods [sIdx]->getName ());
- passed [sIdx] = passed [sIdx] && testResultValidity<PointT>(point_cloud, indices_mask, nan_mask, indices [sIdx], input_indices, search_methods [sIdx]->getName ());
- }
-
- // compare results to each other
- #pragma omp parallel for
- for (int sIdx = 1; sIdx < int (search_methods.size ()); ++sIdx)
- {
- passed [sIdx] = passed [sIdx] && compareResults (indices [0], distances [0], search_methods [0]->getName (),
- indices [sIdx], distances [sIdx], search_methods [sIdx]->getName (), 1e-6f);
- }
- }
- }
- for (size_t sIdx = 0; sIdx < search_methods.size (); ++sIdx)
- {
- cout << search_methods [sIdx]->getName () << ": " << (passed[sIdx]?"passed":"failed") << endl;
- EXPECT_TRUE (passed [sIdx]);
- }
-}
-
-/** \brief does radius search and tests the results to be unique, valid and ordered. Additionally it test whether all test methods are returning the same results
- * \param cloud the input point cloud
- * \param search_methods vector of all search methods to be tested
- * \param query_indices indices of query points in the point cloud (not necessarily in input_indices)
- * \param input_indices indices defining a subset of the point cloud.
- */
-template<typename PointT> void
-testRadiusSearch (typename PointCloud<PointT>::ConstPtr point_cloud, vector<search::Search<PointT>*> search_methods,
- const vector<int>& query_indices, const vector<int>& input_indices = vector<int> ())
-{
- vector< vector<int> >indices (search_methods.size ());
- vector< vector<float> >distances (search_methods.size ());
- vector <bool> passed (search_methods.size (), true);
- vector<bool> indices_mask (point_cloud->size (), true);
- vector<bool> nan_mask (point_cloud->size (), true);
-
- if (input_indices.size () != 0)
- {
- indices_mask.assign (point_cloud->size (), false);
- for (vector<int>::const_iterator iIt = input_indices.begin (); iIt != input_indices.end (); ++iIt)
- indices_mask [*iIt] = true;
- }
-
- // remove also Nans
- #pragma omp parallel for
- for (int pIdx = 0; pIdx < int (point_cloud->size ()); ++pIdx)
- {
- if (!isFinite (point_cloud->points [pIdx]))
- nan_mask [pIdx] = false;
- }
-
- boost::shared_ptr<vector<int> > input_indices_;
- if (input_indices.size ())
- input_indices_.reset (new vector<int> (input_indices));
-
- #pragma omp parallel for
- for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
- search_methods [sIdx]->setInputCloud (point_cloud, input_indices_);
-
- // test radii 0.01, 0.02, 0.04, 0.08
- for (float radius = 0.01f; radius < 0.1f; radius *= 2.0f)
- {
- //cout << radius << endl;
- // find nn for each point in the cloud
- for (vector<int>::const_iterator qIt = query_indices.begin (); qIt != query_indices.end (); ++qIt)
- {
- #pragma omp parallel for
- for (int sIdx = 0; sIdx < static_cast<int> (search_methods.size ()); ++sIdx)
- {
- search_methods [sIdx]->radiusSearch (point_cloud->points[*qIt], radius, indices [sIdx], distances [sIdx], 0);
- passed [sIdx] = passed [sIdx] && testUniqueness (indices [sIdx], search_methods [sIdx]->getName ());
- passed [sIdx] = passed [sIdx] && testOrder (distances [sIdx], search_methods [sIdx]->getName ());
- passed [sIdx] = passed [sIdx] && testResultValidity<PointT>(point_cloud, indices_mask, nan_mask, indices [sIdx], input_indices, search_methods [sIdx]->getName ());
- }
-
- // compare results to each other
- #pragma omp parallel for
- for (int sIdx = 1; sIdx < static_cast<int> (search_methods.size ()); ++sIdx)
- {
- passed [sIdx] = passed [sIdx] && compareResults (indices [0], distances [0], search_methods [0]->getName (),
- indices [sIdx], distances [sIdx], search_methods [sIdx]->getName (), 1e-6f);
- }
- }
- }
- for (unsigned sIdx = 0; sIdx < search_methods.size (); ++sIdx)
- {
- cout << search_methods [sIdx]->getName () << ": " << (passed[sIdx]?"passed":"failed") << endl;
- EXPECT_TRUE (passed [sIdx]);
- }
-}
-
-#if TEST_unorganized_dense_cloud_COMPLETE_KNN
-// Test search on unorganized point clouds
-TEST (PCL, unorganized_dense_cloud_Complete_KNN)
-{
- testKNNSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices);
-}
-#endif
-
-#if TEST_unorganized_dense_cloud_VIEW_KNN
-// Test search on unorganized point clouds
-TEST (PCL, unorganized_dense_cloud_View_KNN)
-{
- testKNNSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices, unorganized_input_indices);
-}
-#endif
-
-#if TEST_unorganized_sparse_cloud_COMPLETE_KNN
-// Test search on unorganized point clouds
-TEST (PCL, unorganized_sparse_cloud_Complete_KNN)
-{
- testKNNSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices);
-}
-#endif
-
-#if TEST_unorganized_sparse_cloud_VIEW_KNN
-TEST (PCL, unorganized_sparse_cloud_View_KNN)
-{
- testKNNSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices, unorganized_input_indices);
-}
-#endif
-
-#if TEST_unorganized_dense_cloud_COMPLETE_RADIUS
-// Test search on unorganized point clouds
-TEST (PCL, unorganized_dense_cloud_Complete_Radius)
-{
- testRadiusSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices);
-}
-#endif
-
-#if TEST_unorganized_grid_cloud_COMPLETE_RADIUS
-// Test search on unorganized point clouds in a grid
-TEST (PCL, unorganized_grid_cloud_Complete_Radius)
-{
- vector<int> query_indices;
- query_indices.reserve (query_count);
-
- unsigned skip = static_cast<unsigned> (unorganized_grid_cloud->size ()) / query_count;
- for (unsigned idx = 0; idx < unorganized_grid_cloud->size () && query_indices.size () < query_count; ++idx)
- if ((rand () % skip) == 0 && isFinite (unorganized_grid_cloud->points [idx]))
- query_indices.push_back (idx);
-
- testRadiusSearch (unorganized_grid_cloud, unorganized_search_methods, query_indices);
-}
-#endif
-
-#if TEST_unorganized_dense_cloud_VIEW_RADIUS
-// Test search on unorganized point clouds
-TEST (PCL, unorganized_dense_cloud_View_Radius)
-{
- testRadiusSearch (unorganized_dense_cloud, unorganized_search_methods, unorganized_dense_cloud_query_indices, unorganized_input_indices);
-}
-#endif
-
-#if TEST_unorganized_sparse_cloud_COMPLETE_RADIUS
-// Test search on unorganized point clouds
-TEST (PCL, unorganized_sparse_cloud_Complete_Radius)
-{
- testRadiusSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices);
-}
-#endif
-
-#if TEST_unorganized_sparse_cloud_VIEW_RADIUS
-TEST (PCL, unorganized_sparse_cloud_View_Radius)
-{
- testRadiusSearch (unorganized_sparse_cloud, unorganized_search_methods, unorganized_sparse_cloud_query_indices, unorganized_input_indices);
-}
-#endif
-
-#if TEST_ORGANIZED_SPARSE_COMPLETE_KNN
-TEST (PCL, Organized_Sparse_Complete_KNN)
-{
- testKNNSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices);
-}
-#endif
-
-#if TEST_ORGANIZED_SPARSE_VIEW_KNN
-TEST (PCL, Organized_Sparse_View_KNN)
-{
- testKNNSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices, organized_input_indices);
-}
-#endif
-
-#if TEST_ORGANIZED_SPARSE_COMPLETE_RADIUS
-TEST (PCL, Organized_Sparse_Complete_Radius)
-{
- testRadiusSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices);
-}
-#endif
-
-#if TEST_ORGANIZED_SPARSE_VIEW_RADIUS
-TEST (PCL, Organized_Sparse_View_Radius)
-{
- testRadiusSearch (organized_sparse_cloud, organized_search_methods, organized_sparse_query_indices, organized_input_indices);
-}
-#endif
-
-/** \brief create subset of point in cloud to use as query points
- * \param[out] query_indices resulting query indices - not guaranteed to have size of query_count but guaranteed not to exceed that value
- * \param cloud input cloud required to check for nans and to get number of points
- * \param[in] query_count maximum number of query points
- */
-void createQueryIndices (std::vector<int>& query_indices, PointCloud<PointXYZ>::ConstPtr point_cloud, unsigned query_count)
-{
- query_indices.clear ();
- query_indices.reserve (query_count);
-
- unsigned skip = static_cast<unsigned> (point_cloud->size ()) / query_count;
- for (unsigned idx = 0; idx < point_cloud->size () && query_indices.size () < query_count; ++idx)
- if ((rand () % skip) == 0 && isFinite (point_cloud->points [idx]))
- query_indices.push_back (idx);
-}
-
-/** \brief create an approx 50% view (subset) of a cloud.
- * \param indices
- * \param max_index highest accented index usually given by cloud->size () - 1
- */
-void createIndices (std::vector<int>& indices, unsigned max_index)
-{
- // ~10% of the input cloud
- for (unsigned idx = 0; idx <= max_index; ++idx)
- if (rand_uint () == 0)
- indices.push_back (idx);
-
- boost::variate_generator< boost::mt19937, boost::uniform_int<> > rand_indices(boost::mt19937 (), boost::uniform_int<> (0, static_cast<int> (indices.size ()) - 1));
- // shuffle indices -> not ascending index list
- for (unsigned idx = 0; idx < max_index; ++idx)
- {
- unsigned idx1 = rand_indices ();
- unsigned idx2 = rand_indices ();
-
- std::swap (indices[idx1], indices[idx2]);
- }
-}
-
-/* ---[ */
-int
-main (int argc, char** argv)
-{
- if (argc < 2)
- {
- std::cout << "need path to table_scene_mug_stereo_textured.pcd file\n";
- return (-1);
- }
-
- pcl::io::loadPCDFile (argv [1], *organized_sparse_cloud);
-
- // create unorganized cloud
- unorganized_dense_cloud->resize (unorganized_point_count);
- unorganized_dense_cloud->height = 1;
- unorganized_dense_cloud->width = unorganized_point_count;
- unorganized_dense_cloud->is_dense = true;
-
- unorganized_sparse_cloud->resize (unorganized_point_count);
- unorganized_sparse_cloud->height = 1;
- unorganized_sparse_cloud->width = unorganized_point_count;
- unorganized_sparse_cloud->is_dense = false;
-
- PointXYZ point;
- for (unsigned pIdx = 0; pIdx < unorganized_point_count; ++pIdx)
- {
- point.x = rand_float ();
- point.y = rand_float ();
- point.z = rand_float ();
-
- unorganized_dense_cloud->points [pIdx] = point;
-
- if (rand_uint () == 0)
- unorganized_sparse_cloud->points [pIdx].x = unorganized_sparse_cloud->points [pIdx].y = unorganized_sparse_cloud->points [pIdx].z = std::numeric_limits<float>::quiet_NaN ();
- else
- unorganized_sparse_cloud->points [pIdx] = point;
- }
-
- unorganized_grid_cloud->reserve (1000);
- unorganized_grid_cloud->height = 1;
- unorganized_grid_cloud->width = 1000;
- unorganized_grid_cloud->is_dense = true;
-
- // values between 0 and 1
- for (unsigned xIdx = 0; xIdx < 10; ++xIdx)
- {
- for (unsigned yIdx = 0; yIdx < 10; ++yIdx)
- {
- for (unsigned zIdx = 0; zIdx < 10; ++zIdx)
- {
- point.x = 0.1f * static_cast<float>(xIdx);
- point.y = 0.1f * static_cast<float>(yIdx);
- point.z = 0.1f * static_cast<float>(zIdx);
- unorganized_grid_cloud->push_back (point);
- }
- }
- }
-
- createIndices (organized_input_indices, static_cast<unsigned> (organized_sparse_cloud->size () - 1));
- createIndices (unorganized_input_indices, unorganized_point_count - 1);
-
- brute_force.setSortedResults (true);
- KDTree.setSortedResults (true);
- octree_search.setSortedResults (true);
- organized.setSortedResults (true);
-
- unorganized_search_methods.push_back (&brute_force);
- unorganized_search_methods.push_back (&KDTree);
- unorganized_search_methods.push_back (&octree_search);
-
- organized_search_methods.push_back (&brute_force);
- organized_search_methods.push_back (&KDTree);
- organized_search_methods.push_back (&octree_search);
- organized_search_methods.push_back (&organized);
-
- createQueryIndices (unorganized_dense_cloud_query_indices, unorganized_dense_cloud, query_count);
- createQueryIndices (unorganized_sparse_cloud_query_indices, unorganized_sparse_cloud, query_count);
- createQueryIndices (organized_sparse_query_indices, organized_sparse_cloud, query_count);
-
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
-/* ]--- */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#include <gtest/gtest.h>
-#include <pcl/kdtree/kdtree_flann.h>
-#include <pcl/point_types.h>
-#include <pcl/point_cloud.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/search/search.h>
-#include <pcl/features/normal_3d.h>
-
-#include <pcl/segmentation/extract_polygonal_prism_data.h>
-#include <pcl/segmentation/segment_differences.h>
-#include <pcl/segmentation/region_growing.h>
-#include <pcl/segmentation/region_growing_rgb.h>
-#include <pcl/segmentation/min_cut_segmentation.h>
-
-using namespace pcl;
-using namespace pcl::io;
-
-PointCloud<PointXYZ>::Ptr cloud_;
-PointCloud<PointXYZ>::Ptr cloud_t_;
-KdTree<PointXYZ>::Ptr tree_;
-
-pcl::PointCloud<pcl::PointXYZRGB>::Ptr colored_cloud;
-pcl::PointCloud<pcl::PointXYZ>::Ptr another_cloud_;
-pcl::PointCloud<pcl::Normal>::Ptr normals_;
-pcl::PointCloud<pcl::Normal>::Ptr another_normals_;
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingRGBTest, Segment)
-{
- RegionGrowingRGB<pcl::PointXYZRGB> rg;
-
- rg.setInputCloud (colored_cloud);
- rg.setDistanceThreshold (10);
- rg.setRegionColorThreshold (5);
- rg.setPointColorThreshold (6);
- rg.setMinClusterSize (20);
-
- std::vector <pcl::PointIndices> clusters;
- rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_NE (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingTest, Segment)
-{
- pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
- rg.setInputCloud (cloud_);
- rg.setInputNormals (normals_);
-
- std::vector <pcl::PointIndices> clusters;
- rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_NE (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingTest, SegmentWithoutCloud)
-{
- pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
- rg.setInputNormals (normals_);
-
- std::vector <pcl::PointIndices> clusters;
- rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingTest, SegmentWithoutNormals)
-{
- pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
- rg.setInputCloud (cloud_);
-
- std::vector <pcl::PointIndices> clusters;
- rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingTest, SegmentEmptyCloud)
-{
- pcl::PointCloud<pcl::PointXYZ>::Ptr empty_cloud (new pcl::PointCloud<pcl::PointXYZ>);
- pcl::PointCloud<pcl::Normal>::Ptr empty_normals (new pcl::PointCloud<pcl::Normal>);
-
- pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
- rg.setInputCloud (empty_cloud);
- rg.setInputNormals (empty_normals);
-
- std::vector <pcl::PointIndices> clusters;
- rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingTest, SegmentWithDifferentNormalAndCloudSize)
-{
- pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
- rg.setInputCloud (another_cloud_);
- rg.setInputNormals (normals_);
-
- int first_cloud_size = static_cast<int> (cloud_->points.size ());
- int second_cloud_size = static_cast<int> (another_cloud_->points.size ());
- ASSERT_NE (first_cloud_size, second_cloud_size);
-
- std::vector <pcl::PointIndices> clusters;
- rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-
- rg.setInputCloud (cloud_);
- rg.setInputNormals (another_normals_);
-
- rg.extract (clusters);
- num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingTest, SegmentWithWrongThresholdParameters)
-{
- pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
- rg.setInputCloud (cloud_);
- rg.setInputNormals (normals_);
-
- rg.setNumberOfNeighbours (0);
-
- std::vector <pcl::PointIndices> clusters;
- rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-
- rg.setNumberOfNeighbours (30);
- rg.setResidualTestFlag (true);
- rg.setResidualThreshold (-10.0);
-
- rg.extract (clusters);
- num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-
- rg.setCurvatureTestFlag (true);
- rg.setCurvatureThreshold (-10.0f);
-
- rg.extract (clusters);
- num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (RegionGrowingTest, SegmentFromPoint)
-{
- pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
-
- pcl::PointIndices cluster;
- rg.getSegmentFromPoint (0, cluster);
- EXPECT_EQ (0, cluster.indices.size ());
-
- rg.setInputCloud (cloud_);
- rg.setInputNormals (normals_);
- rg.getSegmentFromPoint(0, cluster);
- EXPECT_NE (0, cluster.indices.size());
-}
-
-#if (BOOST_VERSION >= 104400)
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (MinCutSegmentationTest, Segment)
-{
- pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
-
- pcl::PointXYZ object_center;
- double radius = 0.0;
- double sigma = 0.0;
- double source_weight = 0.0;
- unsigned int neighbor_number = 0;
-
- object_center.x = -36.01f;
- object_center.y = -64.73f;
- object_center.z = -6.18f;
- radius = 3.8003856;
- sigma = 0.25;
- source_weight = 0.8;
- neighbor_number = 14;
-
- pcl::PointCloud<pcl::PointXYZ>::Ptr foreground_points(new pcl::PointCloud<pcl::PointXYZ> ());
- foreground_points->points.push_back (object_center);
-
- mcSeg.setForegroundPoints (foreground_points);
- mcSeg.setInputCloud (another_cloud_);
- mcSeg.setRadius (radius);
- mcSeg.setSigma (sigma);
- mcSeg.setSourceWeight (source_weight);
- mcSeg.setNumberOfNeighbours (neighbor_number);
-
- std::vector <pcl::PointIndices> clusters;
- mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (2, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (MinCutSegmentationTest, SegmentWithoutForegroundPoints)
-{
- pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
- mcSeg.setInputCloud (another_cloud_);
- mcSeg.setRadius (3.8003856);
-
- std::vector <pcl::PointIndices> clusters;
- mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (MinCutSegmentationTest, SegmentWithoutCloud)
-{
- pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
-
- std::vector <pcl::PointIndices> clusters;
- mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (MinCutSegmentationTest, SegmentEmptyCloud)
-{
- pcl::PointCloud<pcl::PointXYZ>::Ptr empty_cloud (new pcl::PointCloud<pcl::PointXYZ>);
- pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
- mcSeg.setInputCloud (empty_cloud);
-
- std::vector <pcl::PointIndices> clusters;
- mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (0, num_of_segments);
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (MinCutSegmentationTest, SegmentWithWrongParameters)
-{
- pcl::MinCutSegmentation<pcl::PointXYZ> mcSeg;
- mcSeg.setInputCloud (another_cloud_);
- pcl::PointXYZ object_center;
- object_center.x = -36.01f;
- object_center.y = -64.73f;
- object_center.z = -6.18f;
- pcl::PointCloud<pcl::PointXYZ>::Ptr foreground_points(new pcl::PointCloud<pcl::PointXYZ> ());
- foreground_points->points.push_back (object_center);
- mcSeg.setForegroundPoints (foreground_points);
-
- unsigned int prev_neighbor_number = mcSeg.getNumberOfNeighbours ();
- EXPECT_LT (0, prev_neighbor_number);
-
- mcSeg.setNumberOfNeighbours (0);
- unsigned int curr_neighbor_number = mcSeg.getNumberOfNeighbours ();
- EXPECT_EQ (prev_neighbor_number, curr_neighbor_number);
-
- double prev_radius = mcSeg.getRadius ();
- EXPECT_LT (0.0, prev_radius);
-
- mcSeg.setRadius (0.0);
- double curr_radius = mcSeg.getRadius ();
- EXPECT_EQ (prev_radius, curr_radius);
-
- mcSeg.setRadius (-10.0);
- curr_radius = mcSeg.getRadius ();
- EXPECT_EQ (prev_radius, curr_radius);
-
- double prev_sigma = mcSeg.getSigma ();
- EXPECT_LT (0.0, prev_sigma);
-
- mcSeg.setSigma (0.0);
- double curr_sigma = mcSeg.getSigma ();
- EXPECT_EQ (prev_sigma, curr_sigma);
-
- mcSeg.setSigma (-10.0);
- curr_sigma = mcSeg.getSigma ();
- EXPECT_EQ (prev_sigma, curr_sigma);
-
- double prev_source_weight = mcSeg.getSourceWeight ();
- EXPECT_LT (0.0, prev_source_weight);
-
- mcSeg.setSourceWeight (0.0);
- double curr_source_weight = mcSeg.getSourceWeight ();
- EXPECT_EQ (prev_source_weight, curr_source_weight);
-
- mcSeg.setSourceWeight (-10.0);
- curr_source_weight = mcSeg.getSourceWeight ();
- EXPECT_EQ (prev_source_weight, curr_source_weight);
-
- mcSeg.setRadius (3.8003856);
-
- std::vector <pcl::PointIndices> clusters;
- mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
- EXPECT_EQ (2, num_of_segments);
-}
-#endif
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (SegmentDifferences, Segmentation)
-{
- SegmentDifferences<PointXYZ> sd;
- sd.setInputCloud (cloud_);
- sd.setDistanceThreshold (0.00005);
-
- // Set the target as itself
- sd.setTargetCloud (cloud_);
-
- PointCloud<PointXYZ> output;
- sd.segment (output);
-
- EXPECT_EQ (static_cast<int> (output.points.size ()), 0);
-
- // Set a different target
- sd.setTargetCloud (cloud_t_);
- sd.segment (output);
- EXPECT_EQ (static_cast<int> (output.points.size ()), 126);
- //savePCDFile ("./test/0-t.pcd", output);
-
- // Reverse
- sd.setInputCloud (cloud_t_);
- sd.setTargetCloud (cloud_);
- sd.segment (output);
- EXPECT_EQ (static_cast<int> (output.points.size ()), 127);
- //savePCDFile ("./test/t-0.pcd", output);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (ExtractPolygonalPrism, Segmentation)
-{
- PointCloud<PointXYZ>::Ptr hull (new PointCloud<PointXYZ>);
- hull->points.resize (5);
-
- for (size_t i = 0; i < hull->points.size (); ++i)
- {
- hull->points[i].x = hull->points[i].y = static_cast<float> (i);
- hull->points[i].z = 0.0f;
- }
-
- ExtractPolygonalPrismData<PointXYZ> ex;
- ex.setInputCloud (cloud_);
- ex.setInputPlanarHull (hull);
-
- PointIndices output;
- ex.segment (output);
-
- EXPECT_EQ (static_cast<int> (output.indices.size ()), 0);
-}
-
-/* ---[ */
-int
-main (int argc, char** argv)
-{
- if (argc < 4)
- {
- std::cerr << "This test requires three point clouds. The first one must be 'bun0.pcd'." << std::endl;
- std::cerr << "The second must be 'car6.pcd'. The last one must be 'colored_cloud.pcd'." << std::endl;
- std::cerr << "Please download and pass them in the specified order(including the path to them)." << std::endl;
- return (-1);
- }
-
- // Load a standard PCD file from disk
- PointCloud<PointXYZ> cloud, cloud_t, another_cloud;
- PointCloud<PointXYZRGB> colored_cloud_1;
- if (loadPCDFile (argv[1], cloud) < 0)
- {
- std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
- if (pcl::io::loadPCDFile (argv[2], another_cloud) < 0)
- {
- std::cerr << "Failed to read test file. Please download `car6.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
- if (pcl::io::loadPCDFile (argv[3], colored_cloud_1) < 0)
- {
- std::cerr << "Failed to read test file. Please download `colored_cloud.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
-
- colored_cloud = colored_cloud_1.makeShared();
-
- // Tranpose the cloud
- cloud_t = cloud;
- for (size_t i = 0; i < cloud.points.size (); ++i)
- cloud_t.points[i].x += 0.01f;
-
- cloud_ = cloud.makeShared ();
- cloud_t_ = cloud_t.makeShared ();
-
- another_cloud_ = another_cloud.makeShared();
- normals_ = (new pcl::PointCloud<pcl::Normal>)->makeShared();
- pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimator;
- normal_estimator.setInputCloud(cloud_);
- normal_estimator.setKSearch(30);
- normal_estimator.compute(*normals_);
-
- another_normals_ = (new pcl::PointCloud<pcl::Normal>)->makeShared();
- normal_estimator.setInputCloud(another_cloud_);
- normal_estimator.setKSearch(30);
- normal_estimator.compute(*another_normals_);
-
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
-/* ]--- */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2010, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- *
- */
-#include <gtest/gtest.h>
-
-#include <iostream> // For debug
-
-#include <pcl/PCLPointCloud2.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/features/feature.h>
-#include <pcl/common/eigen.h>
-#include <pcl/point_types.h>
-#include <pcl/point_cloud.h>
-#include <pcl/common/transforms.h>
-
-#include <pcl/pcl_tests.h>
-
-using namespace pcl;
-using namespace pcl::io;
-using namespace std;
-
-const float PI = 3.14159265f;
-const float rho = sqrtf (2.0f) / 2.0f; // cos(PI/4) == sin(PI/4)
-
-PointCloud<PointXYZ> cloud;
-pcl::PCLPointCloud2 cloud_blob;
-
-void
-init ()
-{
- PointXYZ p0 (0, 0, 0);
- PointXYZ p1 (1, 0, 0);
- PointXYZ p2 (0, 1, 0);
- PointXYZ p3 (0, 0, 1);
- cloud.points.push_back (p0);
- cloud.points.push_back (p1);
- cloud.points.push_back (p2);
- cloud.points.push_back (p3);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, DeMean)
-{
- PointCloud<PointXYZ> cloud, cloud_demean;
- fromPCLPointCloud2 (cloud_blob, cloud);
-
- Eigen::Vector4f centroid;
- compute3DCentroid (cloud, centroid);
- EXPECT_NEAR (centroid[0], -0.0290809, 1e-4);
- EXPECT_NEAR (centroid[1], 0.102653, 1e-4);
- EXPECT_NEAR (centroid[2], 0.027302, 1e-4);
- EXPECT_NEAR (centroid[3], 1, 1e-4);
-
- // Check standard demean
- demeanPointCloud (cloud, centroid, cloud_demean);
- EXPECT_EQ (cloud_demean.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud_demean.width, cloud.width);
- EXPECT_EQ (cloud_demean.height, cloud.height);
- EXPECT_EQ (cloud_demean.points.size (), cloud.points.size ());
-
- EXPECT_NEAR (cloud_demean.points[0].x, 0.034503, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].y, 0.010837, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].z, 0.013447, 1e-4);
-
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].x, -0.048849, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].y, 0.072507, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].z, -0.071702, 1e-4);
-
- vector<int> indices (cloud.points.size ());
- for (int i = 0; i < static_cast<int> (indices.size ()); ++i) { indices[i] = i; }
-
- // Check standard demean w/ indices
- demeanPointCloud (cloud, indices, centroid, cloud_demean);
- EXPECT_EQ (cloud_demean.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud_demean.width, cloud.width);
- EXPECT_EQ (cloud_demean.height, cloud.height);
- EXPECT_EQ (cloud_demean.points.size (), cloud.points.size ());
-
- EXPECT_NEAR (cloud_demean.points[0].x, 0.034503, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].y, 0.010837, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].z, 0.013447, 1e-4);
-
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].x, -0.048849, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].y, 0.072507, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].z, -0.071702, 1e-4);
-
- // Check eigen demean
- Eigen::MatrixXf mat_demean;
- demeanPointCloud (cloud, centroid, mat_demean);
- EXPECT_EQ (mat_demean.cols (), int (cloud.points.size ()));
- EXPECT_EQ (mat_demean.rows (), 4);
-
- EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
- EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
- EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
-
- EXPECT_NEAR (mat_demean (0, cloud_demean.points.size () - 1), -0.048849, 1e-4);
- EXPECT_NEAR (mat_demean (1, cloud_demean.points.size () - 1), 0.072507, 1e-4);
- EXPECT_NEAR (mat_demean (2, cloud_demean.points.size () - 1), -0.071702, 1e-4);
-
- // Check eigen demean + indices
- demeanPointCloud (cloud, indices, centroid, mat_demean);
- EXPECT_EQ (mat_demean.cols (), int (cloud.points.size ()));
- EXPECT_EQ (mat_demean.rows (), 4);
-
- EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
- EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
- EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
-
- EXPECT_NEAR (mat_demean (0, cloud_demean.points.size () - 1), -0.048849, 1e-4);
- EXPECT_NEAR (mat_demean (1, cloud_demean.points.size () - 1), 0.072507, 1e-4);
- EXPECT_NEAR (mat_demean (2, cloud_demean.points.size () - 1), -0.071702, 1e-4);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, Transform)
-{
- Eigen::Vector3f offset (100, 0, 0);
- float angle = PI/4;
- Eigen::Quaternionf rotation (cos (angle / 2), 0, 0, sin (angle / 2));
-
- PointCloud<PointXYZ> cloud_out;
- const vector<PointXYZ, Eigen::aligned_allocator<PointXYZ> > &points (cloud_out.points);
- transformPointCloud (cloud, cloud_out, offset, rotation);
-
- EXPECT_EQ (cloud.points.size (), cloud_out.points.size ());
- EXPECT_EQ (100, points[0].x);
- EXPECT_EQ (0, points[0].y);
- EXPECT_EQ (0, points[0].z);
- EXPECT_NEAR (100+rho, points[1].x, 1e-4);
- EXPECT_NEAR (rho, points[1].y, 1e-4);
- EXPECT_EQ (0, points[1].z);
- EXPECT_NEAR (100-rho, points[2].x, 1e-4);
- EXPECT_NEAR (rho, points[2].y, 1e-4);
- EXPECT_EQ (0, points[2].z);
- EXPECT_EQ (100, points[3].x);
- EXPECT_EQ (0, points[3].y);
- EXPECT_EQ (1, points[3].z);
-
- PointCloud<PointXYZ> cloud_out2;
- const vector<PointXYZ, Eigen::aligned_allocator<PointXYZ> > &points2 (cloud_out2.points);
- Eigen::Translation3f translation (offset);
- Eigen::Affine3f transform;
- transform = translation * rotation;
- transformPointCloud (cloud, cloud_out2, transform);
-
- EXPECT_EQ (cloud.points.size (), cloud_out2.points.size ());
- EXPECT_EQ (100, points2[0].x);
- EXPECT_EQ (0, points2[0].y);
- EXPECT_EQ (0, points2[0].z);
- EXPECT_NEAR (100+rho, points2[1].x, 1e-4);
- EXPECT_NEAR (rho, points2[1].y, 1e-4);
- EXPECT_EQ (0, points2[1].z);
- EXPECT_NEAR (100-rho, points2[2].x, 1e-4);
- EXPECT_NEAR (rho, points2[2].y, 1e-4);
- EXPECT_EQ (0, points2[2].z);
- EXPECT_EQ (100, points2[3].x);
- EXPECT_EQ (0, points2[3].y);
- EXPECT_EQ (1, points2[3].z);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, TransformCopyFields)
-{
- Eigen::Affine3f transform;
- transform = Eigen::Translation3f (100, 0, 0);
-
- PointXYZRGBNormal empty_point;
- std::vector<int> indices (1);
-
- PointCloud<PointXYZRGBNormal> cloud (2, 1);
- cloud.points[0].rgba = 0xFF0000;
- cloud.points[1].rgba = 0x00FF00;
-
- // Preserve data in all fields
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, cloud_out, transform, true);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
- EXPECT_RGBA_EQ (cloud.points[1], cloud_out.points[1]);
- }
- // Preserve data in all fields (with indices)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, indices, cloud_out, transform, true);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
- }
- // Do not preserve data in all fields
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, cloud_out, transform, false);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[1]);
- }
- // Do not preserve data in all fields (with indices)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, indices, cloud_out, transform, false);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
- }
- // Preserve data in all fields (with normals version)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, cloud_out, transform, true);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
- EXPECT_RGBA_EQ (cloud.points[1], cloud_out.points[1]);
- }
- // Preserve data in all fields (with normals and indices version)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, indices, cloud_out, transform, true);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
- }
- // Do not preserve data in all fields (with normals version)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, cloud_out, transform, false);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[1]);
- }
- // Do not preserve data in all fields (with normals and indices version)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, indices, cloud_out, transform, false);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
- }
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, Matrix4Affine3Transform)
-{
- float rot_x = 2.8827f;
- float rot_y = -0.31190f;
- float rot_z = -0.93058f;
- Eigen::Affine3f affine;
- pcl::getTransformation (0, 0, 0, rot_x, rot_y, rot_z, affine);
-
- EXPECT_NEAR (affine (0, 0), 0.56854731f, 1e-4); EXPECT_NEAR (affine (0, 1), -0.82217032f, 1e-4); EXPECT_NEAR (affine (0, 2), -0.028107658f, 1e-4);
- EXPECT_NEAR (affine (1, 0), -0.76327348f, 1e-4); EXPECT_NEAR (affine (1, 1), -0.51445758f, 1e-4); EXPECT_NEAR (affine (1, 2), -0.39082864f, 1e-4);
- EXPECT_NEAR (affine (2, 0), 0.30686751f, 1e-4); EXPECT_NEAR (affine (2, 1), 0.24365838f, 1e-4); EXPECT_NEAR (affine (2, 2), -0.920034f, 1e-4);
-
- // Approximative!!! Uses SVD internally! See http://eigen.tuxfamily.org/dox/Transform_8h_source.html
- Eigen::Matrix3f rotation = affine.rotation ();
-
- EXPECT_NEAR (rotation (0, 0), 0.56854731f, 1e-4); EXPECT_NEAR (rotation (0, 1), -0.82217032f, 1e-4); EXPECT_NEAR (rotation (0, 2), -0.028107658f, 1e-4);
- EXPECT_NEAR (rotation (1, 0), -0.76327348f, 1e-4); EXPECT_NEAR (rotation (1, 1), -0.51445758f, 1e-4); EXPECT_NEAR (rotation (1, 2), -0.39082864f, 1e-4);
- EXPECT_NEAR (rotation (2, 0), 0.30686751f, 1e-4); EXPECT_NEAR (rotation (2, 1), 0.24365838f, 1e-4); EXPECT_NEAR (rotation (2, 2), -0.920034f, 1e-4);
-
- float trans_x, trans_y, trans_z;
- pcl::getTransformation (0.1f, 0.2f, 0.3f, rot_x, rot_y, rot_z, affine);
- pcl::getTranslationAndEulerAngles (affine, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z);
- EXPECT_FLOAT_EQ (trans_x, 0.1f);
- EXPECT_FLOAT_EQ (trans_y, 0.2f);
- EXPECT_FLOAT_EQ (trans_z, 0.3f);
- EXPECT_FLOAT_EQ (rot_x, 2.8827f);
- EXPECT_FLOAT_EQ (rot_y, -0.31190f);
- EXPECT_FLOAT_EQ (rot_z, -0.93058f);
-
- Eigen::Matrix4f transformation (Eigen::Matrix4f::Identity ());
- transformation.block<3, 3> (0, 0) = affine.rotation ();
- transformation.block<3, 1> (0, 3) = affine.translation ();
-
- PointXYZ p (1.f, 2.f, 3.f);
- Eigen::Vector3f v3 = p.getVector3fMap ();
- Eigen::Vector4f v4 = p.getVector4fMap ();
-
- Eigen::Vector3f v3t (affine * v3);
- Eigen::Vector4f v4t (transformation * v4);
-
- PointXYZ pt = pcl::transformPoint (p, affine);
-
- EXPECT_NEAR (pt.x, v3t.x (), 1e-4); EXPECT_NEAR (pt.x, v4t.x (), 1e-4);
- EXPECT_NEAR (pt.y, v3t.y (), 1e-4); EXPECT_NEAR (pt.y, v4t.y (), 1e-4);
- EXPECT_NEAR (pt.z, v3t.z (), 1e-4); EXPECT_NEAR (pt.z, v4t.z (), 1e-4);
-
- PointNormal pn;
- pn.getVector3fMap () = p.getVector3fMap ();
- pn.getNormalVector3fMap () = Eigen::Vector3f (0.60f, 0.48f, 0.64f);
- Eigen::Vector3f n3 = pn.getNormalVector3fMap ();
- Eigen::Vector4f n4 = pn.getNormalVector4fMap ();
-
- Eigen::Vector3f n3t (affine.rotation() * n3);
- Eigen::Vector4f n4t (transformation * n4);
-
- PointNormal pnt = pcl::transformPointWithNormal (pn, affine);
-
- EXPECT_NEAR (pnt.x, v3t.x (), 1e-4); EXPECT_NEAR (pnt.x, v4t.x (), 1e-4);
- EXPECT_NEAR (pnt.y, v3t.y (), 1e-4); EXPECT_NEAR (pnt.y, v4t.y (), 1e-4);
- EXPECT_NEAR (pnt.z, v3t.z (), 1e-4); EXPECT_NEAR (pnt.z, v4t.z (), 1e-4);
- EXPECT_NEAR (pnt.normal_x, n3t.x (), 1e-4); EXPECT_NEAR (pnt.normal_x, n4t.x (), 1e-4);
- EXPECT_NEAR (pnt.normal_y, n3t.y (), 1e-4); EXPECT_NEAR (pnt.normal_y, n4t.y (), 1e-4);
- EXPECT_NEAR (pnt.normal_z, n3t.z (), 1e-4); EXPECT_NEAR (pnt.normal_z, n4t.z (), 1e-4);
-
- PointCloud<PointXYZ> c, ct;
- c.push_back (p);
- pcl::transformPointCloud (c, ct, affine);
- EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
- EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
- EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
-
- pcl::transformPointCloud (c, ct, transformation);
- EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
- EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
- EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
-
- affine = transformation;
-
- std::vector<int> indices (1); indices[0] = 0;
-
- pcl::transformPointCloud (c, indices, ct, affine);
- EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
- EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
- EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
-
- pcl::transformPointCloud (c, indices, ct, transformation);
- EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
- EXPECT_NEAR (pt.y, ct[0].y, 1e-4);
- EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, commonTransform)
-{
- Eigen::Vector3f xaxis (1,0,0), yaxis (0,1,0), zaxis (0,0,1);
- Eigen::Affine3f trans = pcl::getTransFromUnitVectorsZY (zaxis, yaxis);
- Eigen::Vector3f xaxistrans=trans*xaxis, yaxistrans=trans*yaxis, zaxistrans=trans*zaxis;
- //std::cout << xaxistrans<<"\n"<<yaxistrans<<"\n"<<zaxistrans<<"\n";
- EXPECT_NEAR ((xaxistrans-xaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((yaxistrans-yaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((zaxistrans-zaxis).norm(), 0.0f, 1e-6);
-
- trans = pcl::getTransFromUnitVectorsXY (xaxis, yaxis);
- xaxistrans=trans*xaxis, yaxistrans=trans*yaxis, zaxistrans=trans*zaxis;
- //std::cout << xaxistrans<<"\n"<<yaxistrans<<"\n"<<zaxistrans<<"\n";
- EXPECT_NEAR ((xaxistrans-xaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((yaxistrans-yaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((zaxistrans-zaxis).norm(), 0.0f, 1e-6);
-}
-
-/* ---[ */
-int
- main (int argc, char** argv)
-{
- if (argc < 2)
- {
- std::cerr << "No test file given. Please download `bun0.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
- if (loadPCDFile (argv[1], cloud_blob) < 0)
- {
- std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
-
- testing::InitGoogleTest (&argc, argv);
- init();
- return (RUN_ALL_TESTS ());
-}
-/* ]--- */
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2009-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#include <gtest/gtest.h>
-
-#include <pcl/point_types.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/features/normal_3d.h>
-#include <pcl/visualization/pcl_visualizer.h>
-
-using namespace pcl;
-using namespace pcl::io;
-using namespace pcl::visualization;
-using namespace std;
-
-PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
-PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
-search::KdTree<PointXYZ>::Ptr tree;
-search::KdTree<PointNormal>::Ptr tree2;
-
-// add by ktran to test update functions
-PointCloud<PointXYZ>::Ptr cloud1 (new PointCloud<PointXYZ>);
-PointCloud<PointNormal>::Ptr cloud_with_normals1 (new PointCloud<PointNormal>);
-search::KdTree<PointXYZ>::Ptr tree3;
-search::KdTree<PointNormal>::Ptr tree4;
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, PCLVisualizer_camera)
-{
- PCLVisualizer visualizer;
- visualizer.initCameraParameters ();
-
- // First test if the intrinsic+extrinsic to OpenGL conversion works back and forth
- Eigen::Matrix3f given_intrinsics (Eigen::Matrix3f::Identity ());
- given_intrinsics (0, 0) = 525.f;
- given_intrinsics (1, 1) = 525.f;
- given_intrinsics (0, 2) = 320.f;
- given_intrinsics (1, 2) = 240.f;
-
- float M_PIf = static_cast<float> (M_PI);
- Eigen::Matrix4f given_extrinsics (Eigen::Matrix4f::Identity ());
- given_extrinsics.block<3, 3> (0, 0) = Eigen::AngleAxisf (30.f * M_PIf / 180.f, Eigen::Vector3f (1.f, 0.f, 0.f)).matrix ();
- given_extrinsics.block<3, 1> (0, 3) = Eigen::Vector3f (10.f, 15.f, 20.f);
-
- visualizer.setCameraParameters (given_intrinsics, given_extrinsics);
- Eigen::Matrix4f viewer_pose = visualizer.getViewerPose ().matrix ();
-
- for (size_t i = 0; i < 4; ++i)
- for (size_t j = 0; j < 4; ++j)
- EXPECT_NEAR (given_extrinsics (i, j), viewer_pose (i, j), 1e-6);
-
-
- // Next, check if setting the OpenGL settings translate well back
- // Look towards the x-axis, which equates to a 90 degree rotation around the y-axis
- Eigen::Vector3f trans (10.f, 2.f, 20.f);
- visualizer.setCameraPosition (trans[0], trans[1], trans[2], trans[0] + 1., trans[1], trans[2], 0., 1., 0.);
- viewer_pose = visualizer.getViewerPose ().matrix ();
- Eigen::Matrix3f expected_rotation = Eigen::AngleAxisf (M_PIf / 2.0f, Eigen::Vector3f (0.f, 1.f, 0.f)).matrix ();
- for (size_t i = 0; i < 3; ++i)
- for (size_t j = 0; j < 3; ++j)
- EXPECT_NEAR (viewer_pose (i, j), expected_rotation (i, j), 1e-6);
- for (size_t i = 0; i < 3; ++i)
- EXPECT_NEAR (viewer_pose (i, 3), trans[i], 1e-6);
-
-
- // Now add the bunny point cloud and reset the camera based on the scene (i.e., VTK will compute a new camera pose
- // so that it includes the whole scene in the window)
- /// TODO stuck here, resetCamera () does not seem to work if there is no window present - can't do that on a Mac
-// visualizer.addPointCloud (cloud1);
-// visualizer.resetCamera ();
-// visualizer.spinOnce ();
-// viewer_pose = visualizer.getViewerPose ().matrix ();
-
-// cerr << "reset camera viewer pose:" << endl << viewer_pose << endl;
-}
-
-
-
-
-/* ---[ */
-int
-main (int argc, char** argv)
-{
- if (argc < 2)
- {
- std::cerr << "No test file given. Please download `bunny.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
-
- // Load file
- pcl::PCLPointCloud2 cloud_blob;
- loadPCDFile (argv[1], cloud_blob);
- fromPCLPointCloud2 (cloud_blob, *cloud);
-
- // Create search tree
- tree.reset (new search::KdTree<PointXYZ> (false));
- tree->setInputCloud (cloud);
-
- // Normal estimation
- NormalEstimation<PointXYZ, Normal> n;
- PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ());
- n.setInputCloud (cloud);
- //n.setIndices (indices[B);
- n.setSearchMethod (tree);
- n.setKSearch (20);
- n.compute (*normals);
-
- // Concatenate XYZ and normal information
- pcl::concatenateFields (*cloud, *normals, *cloud_with_normals);
-
- // Create search tree
- tree2.reset (new search::KdTree<PointNormal>);
- tree2->setInputCloud (cloud_with_normals);
-
- // Process for update cloud
- if (argc == 3)
- {
- pcl::PCLPointCloud2 cloud_blob1;
- loadPCDFile (argv[2], cloud_blob1);
- fromPCLPointCloud2 (cloud_blob1, *cloud1);
- // Create search tree
- tree3.reset (new search::KdTree<PointXYZ> (false));
- tree3->setInputCloud (cloud1);
-
- // Normal estimation
- NormalEstimation<PointXYZ, Normal> n1;
- PointCloud<Normal>::Ptr normals1 (new PointCloud<Normal> ());
- n1.setInputCloud (cloud1);
-
- n1.setSearchMethod (tree3);
- n1.setKSearch (20);
- n1.compute (*normals1);
-
- // Concatenate XYZ and normal information
- pcl::concatenateFields (*cloud1, *normals1, *cloud_with_normals1);
- // Create search tree
- tree4.reset (new search::KdTree<PointNormal>);
- tree4->setInputCloud (cloud_with_normals1);
- }
-
- // Testing
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
-/* ]--- */
--- /dev/null
+set(SUBSYS_NAME tests_visualization)
+set(SUBSYS_DESC "Point cloud library visualization module unit tests")
+PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS visualization)
+set(OPT_DEPS features) # module does not depend on these
+
+set(DEFAULT ON)
+set(build TRUE)
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if (build)
+ if(BUILD_features AND NOT UNIX OR (UNIX AND DEFINED ENV{DISPLAY}))
+ PCL_ADD_TEST(a_visualization_test test_visualization
+ FILES test_visualization.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_visualization pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bunny.pcd")
+ endif (BUILD_features AND NOT UNIX OR (UNIX AND DEFINED ENV{DISPLAY}))
+endif (build)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2009-2012, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id$
+ *
+ */
+
+#include <gtest/gtest.h>
+
+#include <pcl/point_types.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/features/normal_3d.h>
+#include <pcl/visualization/pcl_visualizer.h>
+
+using namespace pcl;
+using namespace pcl::io;
+using namespace pcl::visualization;
+using namespace std;
+
+PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
+PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
+search::KdTree<PointXYZ>::Ptr tree;
+search::KdTree<PointNormal>::Ptr tree2;
+
+// add by ktran to test update functions
+PointCloud<PointXYZ>::Ptr cloud1 (new PointCloud<PointXYZ>);
+PointCloud<PointNormal>::Ptr cloud_with_normals1 (new PointCloud<PointNormal>);
+search::KdTree<PointXYZ>::Ptr tree3;
+search::KdTree<PointNormal>::Ptr tree4;
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, PCLVisualizer_camera)
+{
+ PCLVisualizer visualizer;
+ visualizer.initCameraParameters ();
+
+ // First test if the intrinsic+extrinsic to OpenGL conversion works back and forth
+ Eigen::Matrix3f given_intrinsics (Eigen::Matrix3f::Identity ());
+ given_intrinsics (0, 0) = 525.f;
+ given_intrinsics (1, 1) = 525.f;
+ given_intrinsics (0, 2) = 320.f;
+ given_intrinsics (1, 2) = 240.f;
+
+ float M_PIf = static_cast<float> (M_PI);
+ Eigen::Matrix4f given_extrinsics (Eigen::Matrix4f::Identity ());
+ given_extrinsics.block<3, 3> (0, 0) = Eigen::AngleAxisf (30.f * M_PIf / 180.f, Eigen::Vector3f (1.f, 0.f, 0.f)).matrix ();
+ given_extrinsics.block<3, 1> (0, 3) = Eigen::Vector3f (10.f, 15.f, 20.f);
+
+ visualizer.setCameraParameters (given_intrinsics, given_extrinsics);
+ Eigen::Matrix4f viewer_pose = visualizer.getViewerPose ().matrix ();
+
+ for (size_t i = 0; i < 4; ++i)
+ for (size_t j = 0; j < 4; ++j)
+ EXPECT_NEAR (given_extrinsics (i, j), viewer_pose (i, j), 1e-6);
+
+
+ // Next, check if setting the OpenGL settings translate well back
+ // Look towards the x-axis, which equates to a 90 degree rotation around the y-axis
+ Eigen::Vector3f trans (10.f, 2.f, 20.f);
+ visualizer.setCameraPosition (trans[0], trans[1], trans[2], trans[0] + 1., trans[1], trans[2], 0., 1., 0.);
+ viewer_pose = visualizer.getViewerPose ().matrix ();
+ Eigen::Matrix3f expected_rotation = Eigen::AngleAxisf (M_PIf / 2.0f, Eigen::Vector3f (0.f, 1.f, 0.f)).matrix ();
+ for (size_t i = 0; i < 3; ++i)
+ for (size_t j = 0; j < 3; ++j)
+ EXPECT_NEAR (viewer_pose (i, j), expected_rotation (i, j), 1e-6);
+ for (size_t i = 0; i < 3; ++i)
+ EXPECT_NEAR (viewer_pose (i, 3), trans[i], 1e-6);
+
+
+ // Now add the bunny point cloud and reset the camera based on the scene (i.e., VTK will compute a new camera pose
+ // so that it includes the whole scene in the window)
+ /// TODO stuck here, resetCamera () does not seem to work if there is no window present - can't do that on a Mac
+// visualizer.addPointCloud (cloud1);
+// visualizer.resetCamera ();
+// visualizer.spinOnce ();
+// viewer_pose = visualizer.getViewerPose ().matrix ();
+
+// cerr << "reset camera viewer pose:" << endl << viewer_pose << endl;
+}
+
+
+
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ if (argc < 2)
+ {
+ std::cerr << "No test file given. Please download `bunny.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ // Load file
+ pcl::PCLPointCloud2 cloud_blob;
+ loadPCDFile (argv[1], cloud_blob);
+ fromPCLPointCloud2 (cloud_blob, *cloud);
+
+ // Create search tree
+ tree.reset (new search::KdTree<PointXYZ> (false));
+ tree->setInputCloud (cloud);
+
+ // Normal estimation
+ NormalEstimation<PointXYZ, Normal> n;
+ PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ());
+ n.setInputCloud (cloud);
+ //n.setIndices (indices[B);
+ n.setSearchMethod (tree);
+ n.setKSearch (20);
+ n.compute (*normals);
+
+ // Concatenate XYZ and normal information
+ pcl::concatenateFields (*cloud, *normals, *cloud_with_normals);
+
+ // Create search tree
+ tree2.reset (new search::KdTree<PointNormal>);
+ tree2->setInputCloud (cloud_with_normals);
+
+ // Process for update cloud
+ if (argc == 3)
+ {
+ pcl::PCLPointCloud2 cloud_blob1;
+ loadPCDFile (argv[2], cloud_blob1);
+ fromPCLPointCloud2 (cloud_blob1, *cloud1);
+ // Create search tree
+ tree3.reset (new search::KdTree<PointXYZ> (false));
+ tree3->setInputCloud (cloud1);
+
+ // Normal estimation
+ NormalEstimation<PointXYZ, Normal> n1;
+ PointCloud<Normal>::Ptr normals1 (new PointCloud<Normal> ());
+ n1.setInputCloud (cloud1);
+
+ n1.setSearchMethod (tree3);
+ n1.setKSearch (20);
+ n1.compute (*normals1);
+
+ // Concatenate XYZ and normal information
+ pcl::concatenateFields (*cloud1, *normals1, *cloud_with_normals1);
+ // Create search tree
+ tree4.reset (new search::KdTree<PointNormal>);
+ tree4->setInputCloud (cloud_with_normals1);
+ }
+
+ // Testing
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
output_xyzi->points[point_i].z = xyz_source->points[point_i].z;
output_xyzi->points[point_i].intensity = dist;
}
- rmse = sqrtf (rmse / static_cast<float> (xyz_source->points.size ()));
+ rmse = std::sqrt (rmse / static_cast<float> (xyz_source->points.size ()));
}
else if (correspondence_type == "nn")
{
output_xyzi->points[point_i].z = xyz_source->points[point_i].z;
output_xyzi->points[point_i].intensity = dist;
}
- rmse = sqrtf (rmse / static_cast<float> (xyz_source->points.size ()));
+ rmse = std::sqrt (rmse / static_cast<float> (xyz_source->points.size ()));
}
else if (correspondence_type == "nnplane")
output_xyzi->points[point_i].z = xyz_source->points[point_i].z;
output_xyzi->points[point_i].intensity = dist * dist;
}
- rmse = sqrtf (rmse / static_cast<float> (xyz_source->points.size ()));
+ rmse = std::sqrt (rmse / static_cast<float> (xyz_source->points.size ()));
}
else
{
" -leaf_size X = the XYZ leaf size for the VoxelGrid -- for data reduction (default: ");
print_value ("%f", default_leaf_size);
print_info (" m)\n");
+ print_info (
+ " -no_vis_result = flag to stop visualizing the generated pcd\n");
}
/* ---[ */
parse_argument (argc, argv, "-resolution", resolution);
float leaf_size = default_leaf_size;
parse_argument (argc, argv, "-leaf_size", leaf_size);
+ bool vis_result = ! find_switch (argc, argv, "-no_vis_result");
// Parse the command line arguments for .ply and PCD files
std::vector<int> pcd_file_indices = parse_file_extension_argument (argc, argv, ".pcd");
}
bool INTER_VIS = false;
- bool VIS = true;
visualization::PCLVisualizer vis;
vis.addModelFromPolyData (polydata1, "mesh1", 0);
for (size_t i = 0; i < aligned_clouds.size (); i++)
*big_boy += *aligned_clouds[i];
- if (VIS)
+ if (vis_result)
{
visualization::PCLVisualizer vis2 ("visualize");
vis2.addPointCloud (big_boy);
grid_.setLeafSize (leaf_size, leaf_size, leaf_size);
grid_.filter (*big_boy);
- if (VIS)
+ if (vis_result)
{
visualization::PCLVisualizer vis3 ("visualize");
vis3.addPointCloud (big_boy);
{
float r1 = static_cast<float> (uniform_deviate (rand ()));
float r2 = static_cast<float> (uniform_deviate (rand ()));
- float r1sqr = sqrtf (r1);
+ float r1sqr = std::sqrt (r1);
float OneMinR1Sqr = (1 - r1sqr);
float OneMinR2 = (1 - r2);
a1 *= OneMinR1Sqr;
}
inline void
-randPSurface (vtkPolyData * polydata, std::vector<double> * cumulativeAreas, double totalArea, Eigen::Vector4f& p)
+randPSurface (vtkPolyData * polydata, std::vector<double> * cumulativeAreas, double totalArea, Eigen::Vector4f& p, bool calcNormal, Eigen::Vector3f& n)
{
float r = static_cast<float> (uniform_deviate (rand ()) * totalArea);
polydata->GetPoint (ptIds[0], A);
polydata->GetPoint (ptIds[1], B);
polydata->GetPoint (ptIds[2], C);
- randomPointTriangle (float (A[0]), float (A[1]), float (A[2]),
- float (B[0]), float (B[1]), float (B[2]),
+ if (calcNormal)
+ {
+ // OBJ: Vertices are stored in a counter-clockwise order by default
+ Eigen::Vector3f v1 = Eigen::Vector3f (A[0], A[1], A[2]) - Eigen::Vector3f (C[0], C[1], C[2]);
+ Eigen::Vector3f v2 = Eigen::Vector3f (B[0], B[1], B[2]) - Eigen::Vector3f (C[0], C[1], C[2]);
+ n = v1.cross (v2);
+ n.normalize ();
+ }
+ randomPointTriangle (float (A[0]), float (A[1]), float (A[2]),
+ float (B[0]), float (B[1]), float (B[2]),
float (C[0]), float (C[1]), float (C[2]), p);
}
void
-uniform_sampling (vtkSmartPointer<vtkPolyData> polydata, size_t n_samples, pcl::PointCloud<pcl::PointXYZ> & cloud_out)
+uniform_sampling (vtkSmartPointer<vtkPolyData> polydata, size_t n_samples, bool calc_normal, pcl::PointCloud<pcl::PointNormal> & cloud_out)
{
polydata->BuildCells ();
vtkSmartPointer<vtkCellArray> cells = polydata->GetPolys ();
for (i = 0; i < n_samples; i++)
{
Eigen::Vector4f p;
- randPSurface (polydata, &cumulativeAreas, totalArea, p);
+ Eigen::Vector3f n;
+ randPSurface (polydata, &cumulativeAreas, totalArea, p, calc_normal, n);
cloud_out.points[i].x = p[0];
cloud_out.points[i].y = p[1];
cloud_out.points[i].z = p[2];
+ if (calc_normal)
+ {
+ cloud_out.points[i].normal_x = n[0];
+ cloud_out.points[i].normal_y = n[1];
+ cloud_out.points[i].normal_z = n[2];
+ }
}
}
using namespace pcl::io;
using namespace pcl::console;
-int default_number_samples = 100000;
-float default_leaf_size = 0.01f;
+const int default_number_samples = 100000;
+const float default_leaf_size = 0.01f;
void
printHelp (int, char **argv)
" -leaf_size X = the XYZ leaf size for the VoxelGrid -- for data reduction (default: ");
print_value ("%f", default_leaf_size);
print_info (" m)\n");
+ print_info (" -write_normals = flag to write normals to the output pcd\n");
+ print_info (
+ " -no_vis_result = flag to stop visualizing the generated pcd\n");
}
/* ---[ */
parse_argument (argc, argv, "-n_samples", SAMPLE_POINTS_);
float leaf_size = default_leaf_size;
parse_argument (argc, argv, "-leaf_size", leaf_size);
+ bool vis_result = ! find_switch (argc, argv, "-no_vis_result");
+ const bool write_normals = find_switch (argc, argv, "-write_normals");
// Parse the command line arguments for .ply and PCD files
std::vector<int> pcd_file_indices = parse_file_extension_argument (argc, argv, ".pcd");
return (-1);
}
- vtkSmartPointer<vtkPolyData> polydata1 = vtkSmartPointer<vtkPolyData>::New ();;
+ vtkSmartPointer<vtkPolyData> polydata1 = vtkSmartPointer<vtkPolyData>::New ();
if (ply_file_indices.size () == 1)
{
pcl::PolygonMesh mesh;
vtkSmartPointer<vtkPolyDataMapper> triangleMapper = vtkSmartPointer<vtkPolyDataMapper>::New ();
triangleMapper->SetInputConnection (triangleFilter->GetOutputPort ());
- triangleMapper->Update();
- polydata1 = triangleMapper->GetInput();
+ triangleMapper->Update ();
+ polydata1 = triangleMapper->GetInput ();
bool INTER_VIS = false;
- bool VIS = true;
if (INTER_VIS)
{
visualization::PCLVisualizer vis;
vis.addModelFromPolyData (polydata1, "mesh1", 0);
vis.setRepresentationToSurfaceForAllActors ();
- vis.spin();
+ vis.spin ();
}
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointXYZ>);
- uniform_sampling (polydata1, SAMPLE_POINTS_, *cloud_1);
+ pcl::PointCloud<pcl::PointNormal>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointNormal>);
+ uniform_sampling (polydata1, SAMPLE_POINTS_, write_normals, *cloud_1);
if (INTER_VIS)
{
visualization::PCLVisualizer vis_sampled;
- vis_sampled.addPointCloud (cloud_1);
+ vis_sampled.addPointCloud<pcl::PointNormal> (cloud_1);
+ if (write_normals)
+ vis_sampled.addPointCloudNormals<pcl::PointNormal> (cloud_1, 1, 0.02f, "cloud_normals");
vis_sampled.spin ();
}
// Voxelgrid
- VoxelGrid<PointXYZ> grid_;
+ VoxelGrid<PointNormal> grid_;
grid_.setInputCloud (cloud_1);
grid_.setLeafSize (leaf_size, leaf_size, leaf_size);
- pcl::PointCloud<pcl::PointXYZ>::Ptr res(new pcl::PointCloud<pcl::PointXYZ>);
- grid_.filter (*res);
+ pcl::PointCloud<pcl::PointNormal>::Ptr voxel_cloud (new pcl::PointCloud<pcl::PointNormal>);
+ grid_.filter (*voxel_cloud);
- if (VIS)
+ if (vis_result)
{
visualization::PCLVisualizer vis3 ("VOXELIZED SAMPLES CLOUD");
- vis3.addPointCloud (res);
+ vis3.addPointCloud<pcl::PointNormal> (voxel_cloud);
+ if (write_normals)
+ vis3.addPointCloudNormals<pcl::PointNormal> (voxel_cloud, 1, 0.02f, "cloud_normals");
vis3.spin ();
}
- savePCDFileASCII (argv[pcd_file_indices[0]], *res);
+ if (!write_normals)
+ {
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_xyz (new pcl::PointCloud<pcl::PointXYZ>);
+ // Strip uninitialized normals from cloud:
+ pcl::copyPointCloud (*voxel_cloud, *cloud_xyz);
+ savePCDFileASCII (argv[pcd_file_indices[0]], *cloud_xyz);
+ }
+ else
+ {
+ savePCDFileASCII (argv[pcd_file_indices[0]], *voxel_cloud);
+ }
}
#include <pcl/visualization/point_cloud_handlers.h>
#include <pcl/visualization/common/common.h>
-#include <pcl/octree/octree.h>
-#include <pcl/octree/octree_impl.h>
+#include <pcl/octree/octree_pointcloud_voxelcentroid.h>
#include <pcl/filters/filter.h>
#include "boost.h"
#include <pcl/tracking/tracker.h>
#include <pcl/tracking/coherence.h>
#include <pcl/filters/passthrough.h>
-#include <pcl/octree/octree.h>
+#include <pcl/octree/octree_pointcloud_changedetector.h>
#include <Eigen/Dense>
namespace pcl
{
-
namespace tracking
{
/** \brief @b ParticleFilterTracker tracks the PointCloud which is given by
* // Note: The height of the cone is set using the magnitude of the axis_direction vector.
*
* pcl::ModelCoefficients cone_coeff;
- * plane_coeff.values.resize (7); // We need 7 values
- * plane_coeff.values[0] = cone_apex.x ();
- * plane_coeff.values[1] = cone_apex.y ();
- * plane_coeff.values[2] = cone_apex.z ();
- * plane_coeff.values[3] = axis_direction.x ();
- * plane_coeff.values[4] = axis_direction.y ();
- * plane_coeff.values[5] = axis_direction.z ();
- * plane_coeff.values[6] = angle (); // degrees
+ * cone_coeff.values.resize (7); // We need 7 values
+ * cone_coeff.values[0] = cone_apex.x ();
+ * cone_coeff.values[1] = cone_apex.y ();
+ * cone_coeff.values[2] = cone_apex.z ();
+ * cone_coeff.values[3] = axis_direction.x ();
+ * cone_coeff.values[4] = axis_direction.y ();
+ * cone_coeff.values[5] = axis_direction.z ();
+ * cone_coeff.values[6] = angle (); // degrees
*
* vtkSmartPointer<vtkDataSet> data = pcl::visualization::createCone (cone_coeff);
* \endcode
PCL_EXPORTS vtkSmartPointer<vtkDataSet>
createCone (const pcl::ModelCoefficients &coefficients);
- /** \brief Creaet a cube shape from a set of model coefficients.
+ /** \brief Create a cube shape from a set of model coefficients.
* \param[in] coefficients the cube coefficients (Tx, Ty, Tz, Qx, Qy, Qz, Qw, width, height, depth)
* \ingroup visualization
*/
PCL_EXPORTS vtkSmartPointer<vtkDataSet>
createCube (const pcl::ModelCoefficients &coefficients);
- /** \brief Creaet a cube shape from a set of model coefficients.
+ /** \brief Create a cube shape from a set of model coefficients.
*
* \param[in] translation a translation to apply to the cube from 0,0,0
* \param[in] rotation a quaternion-based rotation to apply to the cube
#include <pcl/visualization/common/shapes.h>
+// Support for VTK 7.1 upwards
+#ifdef vtkGenericDataArray_h
+#define SetTupleValue SetTypedTuple
+#define InsertNextTupleValue InsertNextTypedTuple
+#define GetTupleValue GetTypedTuple
+#endif
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::visualization::PCLVisualizer::addPointCloud (
// Since each follower may follow a different camera, we need different followers
rens_->InitTraversal ();
vtkRenderer* renderer = NULL;
- int i = 1;
+ int i = 0;
while ((renderer = rens_->GetNextItem ()) != NULL)
{
// Should we add the actor to all renderers or just to i-nth renderer?
vtkSmartPointer<vtkLODActor> actor = vtkSmartPointer<vtkLODActor>::New ();
actor->SetMapper (mapper);
+ // Use cloud view point info
+ vtkSmartPointer<vtkMatrix4x4> transformation = vtkSmartPointer<vtkMatrix4x4>::New ();
+ convertToVtkMatrix (cloud->sensor_origin_, cloud->sensor_orientation_, transformation);
+ actor->SetUserMatrix (transformation);
+
// Add it to all renderers
addActorToRenderer (actor, viewport);
return (true);
}
+#ifdef vtkGenericDataArray_h
+#undef SetTupleValue
+#undef InsertNextTupleValue
+#undef GetTupleValue
+#endif
+
#endif
* \param[in] ypos the Y position on screen where the text should be added
* \param[in] r the red color value
* \param[in] g the green color value
- * \param[in] b the blue color vlaue
+ * \param[in] b the blue color value
* \param[in] id the text object id (default: equal to the "text" parameter)
* \param[in] viewport the view port (default: all)
*/
* \param[in] fontsize the fontsize of the text
* \param[in] r the red color value
* \param[in] g the green color value
- * \param[in] b the blue color vlaue
+ * \param[in] b the blue color value
* \param[in] id the text object id (default: equal to the "text" parameter)
* \param[in] viewport the view port (default: all)
*/
* \param[in] ypos the new Y position on screen
* \param[in] r the red color value
* \param[in] g the green color value
- * \param[in] b the blue color vlaue
+ * \param[in] b the blue color value
* \param[in] id the text object id (default: equal to the "text" parameter)
*/
bool
* \param[in] fontsize the fontsize of the text
* \param[in] r the red color value
* \param[in] g the green color value
- * \param[in] b the blue color vlaue
+ * \param[in] b the blue color value
* \param[in] id the text object id (default: equal to the "text" parameter)
*/
bool
double r = 1.0, double g = 1.0, double b = 1.0,
const std::string &id = "", int viewport = 0);
- /** \brief Check if the cloud, shape, or coordinate with the given id was already added to this vizualizer.
+ /** \brief Check if the cloud, shape, or coordinate with the given id was already added to this visualizer.
* \param[in] id the id of the cloud, shape, or coordinate to check
* \return true if a cloud, shape, or coordinate with the specified id was found
*/
int
getGeometryHandlerIndex (const std::string &id);
- /** \brief Update/set the color index of a renderered PointCloud based on its ID
+ /** \brief Update/set the color index of a rendered PointCloud based on its ID
* \param[in] id the point cloud object id
* \param[in] index the color handler index to use
*/
addArrow (const P1 &pt1, const P2 &pt2, double r, double g, double b,
const std::string &id = "arrow", int viewport = 0);
- /** \brief Add a line arrow segment between two points, and (optianally) display the distance between them
+ /** \brief Add a line arrow segment between two points, and (optionally) display the distance between them
*
* Arrow head is attached on the **start** point (\c pt1) of the arrow.
*
renderView (int xres, int yres, pcl::PointCloud<pcl::PointXYZ>::Ptr & cloud);
/** \brief The purpose of this method is to render a CAD model added to the visualizer from different viewpoints
- * in order to simulate partial views of model. The viewpoint locations are the vertices of a tesselated sphere
- * build from an icosaheadron. The tesselation paremeter controls how many times the triangles of the original
+ * in order to simulate partial views of model. The viewpoint locations are the vertices of a tessellated sphere
+ * build from an icosaheadron. The tessellation parameter controls how many times the triangles of the original
* icosahedron are divided to approximate the sphere and thus the number of partial view generated for a model,
* with a tesselation_level of 0, 12 views are generated if use_vertices=true and 20 views if use_vertices=false
*
* \param[out] enthropies are values between 0 and 1 representing which percentage of the model is seen from the respective viewpoint.
* \param[in] tesselation_level represents the number of subdivisions applied to the triangles of original icosahedron.
* \param[in] view_angle field of view of the virtual camera. Default: 45
- * \param[in] radius_sphere the tesselated sphere radius. Default: 1
- * \param[in] use_vertices if true, use the vertices of tesselated icosahedron (12,42,...) or if false, use the faces of tesselated
+ * \param[in] radius_sphere the tessellated sphere radius. Default: 1
+ * \param[in] use_vertices if true, use the vertices of tessellated icosahedron (12,42,...) or if false, use the faces of tessellated
* icosahedron (20,80,...). Default: true
*/
void
namespace pcl
{
/** \brief @b RegistrationVisualizer represents the base class for rendering
- * the intermediate positions ocupied by the source point cloud during it's registration
+ * the intermediate positions occupied by the source point cloud during it's registration
* to the target point cloud. A registration algorithm is considered as input and
- * it's covergence is rendered.
+ * it's convergence is rendered.
* \author Gheorghe Lisca
* \ingroup visualization
*/
/** \brief Set the registration algorithm whose intermediate steps will be rendered.
* The method creates the local callback function pcl::RegistrationVisualizer::update_visualizer_ and
- * binds it to the local biffers update function pcl::RegistrationVisualizer::updateIntermediateCloud().
+ * binds it to the local buffers update function pcl::RegistrationVisualizer::updateIntermediateCloud().
* The local callback function pcl::RegistrationVisualizer::update_visualizer_ is then linked to
* the pcl::Registration::update_visualizer_ callback function.
* \param registration represents the registration method whose intermediate steps will be rendered.
bool
setRegistration (pcl::Registration<PointSource, PointTarget> ®istration)
{
- // Update the name of the registration method to be desplayed
+ // Update the name of the registration method to be displayed
registration_method_name_ = registration.getClassName();
- // Create the local callback function and bind it to the local function resposable for updating
+ // Create the local callback function and bind it to the local function responsible for updating
// the local buffers
update_visualizer_ = boost::bind (&RegistrationVisualizer<PointSource, PointTarget>::updateIntermediateCloud,
this, _1, _2, _3, _4);
/** \brief Updates visualizer local buffers cloud_intermediate, cloud_intermediate_indices, cloud_target_indices with
* the newest registration intermediate results.
* \param cloud_src represents the initial source point cloud
- * \param indices_src represents the incices of the intermediate source points used for the estimation of rigid transformation
+ * \param indices_src represents the indices of the intermediate source points used for the estimation of rigid transformation
* \param cloud_tgt represents the target point cloud
- * \param indices_tgt represents the incices of the target points used for the estimation of rigid transformation
+ * \param indices_tgt represents the indices of the target points used for the estimation of rigid transformation
*/
void
updateIntermediateCloud (const pcl::PointCloud<PointSource> &cloud_src, const std::vector<int> &indices_src,
const pcl::PointCloud<PointTarget> &cloud_tgt, const std::vector<int> &indices_tgt);
- /** \brief Set maximum number of corresponcence lines whch will be rendered. */
+ /** \brief Set maximum number of correspondence lines which will be rendered. */
inline void
setMaximumDisplayedCorrespondences (const int maximum_displayed_correspondences)
{
- // This method is usualy called form other thread than visualizer thread
+ // This method is usually called form other thread than visualizer thread
// therefore same visualizer_updating_mutex_ will be used
// Lock maximum_displayed_correspondences_
visualizer_updating_mutex_.unlock();
}
- /** \brief Return maximum number of corresponcence lines which are rendered. */
+ /** \brief Return maximum number of correspondence lines which are rendered. */
inline size_t
getMaximumDisplayedCorrespondences()
{
}
private:
- /** \brief Initialize and run the visualization loop. This function will be runned in the internal thread viewer_thread_ */
+ /** \brief Initialize and run the visualization loop. This function will run in the internal thread viewer_thread_ */
void
runDisplay ();
/** \brief The local buffer for target point cloud. */
pcl::PointCloud<PointTarget> cloud_target_;
- /** \brief The mutex used for the sincronization of updating and rendering of the local buffers. */
+ /** \brief The mutex used for the synchronization of updating and rendering of the local buffers. */
boost::mutex visualizer_updating_mutex_;
/** \brief The local buffer for intermediate point cloud obtained during registration process. */
bool ret;
fs.open (file.c_str ());
+ if (!fs.is_open ())
+ {
+ return (false);
+ }
while (!fs.eof ())
{
getline (fs, line);
// Get the width and height of the image - assume the calibrated centers are at the center of the image
Eigen::Vector2i window_size;
- window_size[0] = static_cast<int> (intrinsics (0, 2));
- window_size[1] = static_cast<int> (intrinsics (1, 2));
+ window_size[0] = 2 * static_cast<int> (intrinsics (0, 2));
+ window_size[1] = 2 * static_cast<int> (intrinsics (1, 2));
// Compute the vertical field of view based on the focal length and image heigh
double fovy = 2 * atan (window_size[1] / (2. * intrinsics (1, 1))) * 180.0 / M_PI;
#include <boost/filesystem.hpp>
#include <pcl/console/parse.h>
+// Support for VTK 7.1 upwards
+#ifdef vtkGenericDataArray_h
+#define SetTupleValue SetTypedTuple
+#define InsertNextTupleValue InsertNextTypedTuple
+#define GetTupleValue GetTypedTuple
+#endif
+
#if defined(_WIN32)
// Remove macros defined in Windows.h
#undef near
actor->GetMapper ()->ScalarVisibilityOn ();
actor->GetMapper ()->SetScalarRange (range[0], range[1]);
vtkSmartPointer<vtkLookupTable> table;
- if (!pcl::visualization::getColormapLUT (static_cast<LookUpTableRepresentationProperties>(value), table))
+ if (!pcl::visualization::getColormapLUT (static_cast<LookUpTableRepresentationProperties>(static_cast<int>(value)), table))
break;
table->SetRange (range[0], range[1]);
actor->GetMapper ()->SetLookupTable (table);
actor->GetMapper ()->ScalarVisibilityOn ();
actor->GetMapper ()->SetScalarRange (range[0], range[1]);
vtkSmartPointer<vtkLookupTable> table = vtkSmartPointer<vtkLookupTable>::New ();
- getColormapLUT (static_cast<LookUpTableRepresentationProperties>(value), table);
+ getColormapLUT (static_cast<LookUpTableRepresentationProperties>(static_cast<int>(value)), table);
table->SetRange (range[0], range[1]);
actor->GetMapper ()->SetLookupTable (table);
style_->updateLookUpTableDisplay (false);
{
FPS_CALC ("drawing");
//the call to get() sets the cloud_ to null;
- viewer.showCloud (getLatestCloud ());
+ viewer.showCloud (getLatestCloud (), "cloud");
}
}
if (pcl::console::find_argument (argc, argv, "-xyz") != -1)
xyz = true;
- pcl::io::OpenNI2Grabber grabber (device_id, depth_mode, image_mode);
-
- if (xyz || !grabber.providesCallback<pcl::io::OpenNI2Grabber::sig_cb_openni_point_cloud_rgb> ())
+ try
{
- OpenNI2Viewer<pcl::PointXYZ> openni_viewer (grabber);
- openni_viewer.run ();
+ pcl::io::OpenNI2Grabber grabber (device_id, depth_mode, image_mode);
+
+ if (xyz || !grabber.providesCallback<pcl::io::OpenNI2Grabber::sig_cb_openni_point_cloud_rgb> ())
+ {
+ OpenNI2Viewer<pcl::PointXYZ> openni_viewer (grabber);
+ openni_viewer.run ();
+ }
+ else
+ {
+ OpenNI2Viewer<pcl::PointXYZRGBA> openni_viewer (grabber);
+ openni_viewer.run ();
+ }
}
- else
+ catch (pcl::IOException& e)
{
- OpenNI2Viewer<pcl::PointXYZRGBA> openni_viewer (grabber);
- openni_viewer.run ();
+ pcl::console::print_error ("Failed to create a grabber: %s\n", e.what ());
+ return (1);
}
return (0);
image_viewer_->addRGBImage (reinterpret_cast<unsigned char*> (&rgb_data[0]),
frame->image->getWidth (),
- frame->image->getHeight ());
+ frame->image->getHeight (),
+ "rgb_image");
}
if (frame->depth_image)
depth_image_viewer_->addRGBImage (data,
frame->depth_image->getWidth (),
- frame->depth_image->getHeight ());
+ frame->depth_image->getHeight (),
+ "rgb_image");
if (!depth_image_cld_init_)
{
depth_image_viewer_->setPosition (frame->depth_image->getWidth (), 0);
}
if (image->getEncoding() == openni_wrapper::Image::RGB)
- image_viewer_->addRGBImage (image->getMetaData ().Data (), image->getWidth (), image->getHeight ());
+ image_viewer_->addRGBImage (image->getMetaData ().Data (), image->getWidth (), image->getHeight (), "rgb_image");
else
- image_viewer_->addRGBImage (rgb_data_, image->getWidth (), image->getHeight ());
+ image_viewer_->addRGBImage (rgb_data_, image->getWidth (), image->getHeight (), "rgb_image");
image_viewer_->spinOnce ();
}
if (pcl::console::find_argument (argc, argv, "-xyz") != -1)
xyz = true;
- pcl::OpenNIGrabber grabber (device_id, depth_mode, image_mode);
-
- if (xyz || !grabber.providesCallback<pcl::OpenNIGrabber::sig_cb_openni_point_cloud_rgb> ())
+ try
{
- OpenNIViewer<pcl::PointXYZ> openni_viewer (grabber);
- openni_viewer.run ();
+ pcl::OpenNIGrabber grabber (device_id, depth_mode, image_mode);
+
+ if (xyz || !grabber.providesCallback<pcl::OpenNIGrabber::sig_cb_openni_point_cloud_rgb> ())
+ {
+ OpenNIViewer<pcl::PointXYZ> openni_viewer (grabber);
+ openni_viewer.run ();
+ }
+ else
+ {
+ OpenNIViewer<pcl::PointXYZRGBA> openni_viewer (grabber);
+ openni_viewer.run ();
+ }
}
- else
+ catch (pcl::IOException& e)
{
- OpenNIViewer<pcl::PointXYZRGBA> openni_viewer (grabber);
- openni_viewer.run ();
+ pcl::console::print_error ("Failed to create a grabber: %s\n", e.what ());
+ return (1);
}
return (0);
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/visualization/image_viewer.h>
#include <pcl/io/openni_camera/openni_driver.h>
+#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/visualization/boost.h>
#include <pcl/visualization/mouse_event.h>
if (pcl::console::find_argument(argc, argv, "-xyz") != -1)
xyz = true;
- pcl::OpenNIGrabber grabber (device_id, depth_mode, image_mode);
-
- if (xyz) // only if xyz flag is set, since grabber provides at least XYZ and XYZI pointclouds
- {
- SimpleOpenNIViewer<pcl::PointXYZ> v (grabber);
- v.run ();
- }
- else if (grabber.providesCallback<pcl::OpenNIGrabber::sig_cb_openni_point_cloud_rgb> ())
+ try
{
- SimpleOpenNIViewer<pcl::PointXYZRGBA> v (grabber);
- v.run ();
+ pcl::OpenNIGrabber grabber (device_id, depth_mode, image_mode);
+
+ if (xyz) // only if xyz flag is set, since grabber provides at least XYZ and XYZI pointclouds
+ {
+ SimpleOpenNIViewer<pcl::PointXYZ> v (grabber);
+ v.run ();
+ }
+ else if (grabber.providesCallback<pcl::OpenNIGrabber::sig_cb_openni_point_cloud_rgb> ())
+ {
+ SimpleOpenNIViewer<pcl::PointXYZRGBA> v (grabber);
+ v.run ();
+ }
+ else
+ {
+ SimpleOpenNIViewer<pcl::PointXYZI> v (grabber);
+ v.run ();
+ }
}
- else
+ catch (pcl::IOException& e)
{
- SimpleOpenNIViewer<pcl::PointXYZI> v (grabber);
- v.run ();
+ pcl::console::print_error ("Failed to create a grabber: %s\n", e.what ());
+ return (1);
}
return (0);