image: pointcloudlibrary/fmt
- container: env1604
image: pointcloudlibrary/env:16.04
+ - container: env1804
+ image: pointcloudlibrary/env:18.04
- container: env2004
image: pointcloudlibrary/env:20.04
- container: doc
displayName: Formatting
jobs:
- template: formatting.yaml
- - stage: build_ubuntu
- displayName: Build Ubuntu
+
+ - stage: build_gcc
+ displayName: Build GCC
dependsOn: formatting
jobs:
- - template: build-ubuntu.yaml
- - stage: build_osx
- displayName: Build macOS
+ - job: ubuntu
+ displayName: Ubuntu
+ pool:
+ vmImage: 'Ubuntu 16.04'
+ strategy:
+ matrix:
+ 16.04 GCC:
+ CONTAINER: env1604
+ CC: gcc
+ CXX: g++
+ BUILD_GPU: OFF
+ CMAKE_ARGS: '-DPCL_WARNINGS_ARE_ERRORS=ON'
+ 20.04 GCC:
+ CONTAINER: env2004
+ CC: gcc
+ CXX: g++
+ BUILD_GPU: OFF
+ CMAKE_ARGS: ''
+ container: $[ variables['CONTAINER'] ]
+ timeoutInMinutes: 0
+ variables:
+ BUILD_DIR: '$(Agent.BuildDirectory)/build'
+ CMAKE_CXX_FLAGS: '-Wall -Wextra'
+ steps:
+ - template: build/ubuntu.yaml
+
+ - stage: build_clang
+ displayName: Build Clang
dependsOn: formatting
jobs:
- - template: build-macos.yaml
- - stage: build_windows
- displayName: Build Windows
+ - job: osx
+ displayName: macOS
+ pool:
+ vmImage: '$(VMIMAGE)'
+ strategy:
+ matrix:
+ Catalina 10.15:
+ VMIMAGE: 'macOS-10.15'
+ OSX_VERSION: '10.15'
+ Mojave 10.14:
+ VMIMAGE: 'macOS-10.14'
+ OSX_VERSION: '10.14'
+ timeoutInMinutes: 0
+ variables:
+ BUILD_DIR: '$(Agent.WorkFolder)/build'
+ GOOGLE_TEST_DIR: '$(Agent.WorkFolder)/googletest'
+ CMAKE_CXX_FLAGS: '-Wall -Wextra -Wabi -Werror -Wno-error=deprecated-declarations'
+ steps:
+ - template: build/macos.yaml
+ - job: ubuntu
+ displayName: Ubuntu
+ # Placement of Ubuntu Clang job after macOS ensures an extra parallel job doesn't need to be created.
+ # Total time per run remains same since macOS is quicker so it finishes earlier, and remaining time is used by this job
+ # Therefore, number of parallel jobs and total run time of entire pipeline remains unchanged even after addition of this job
+ dependsOn: osx
+ condition: succeededOrFailed()
+ pool:
+ vmImage: 'Ubuntu 16.04'
+ strategy:
+ matrix:
+ 18.04 Clang:
+ CONTAINER: env1804
+ CC: clang
+ CXX: clang++
+ BUILD_GPU: ON
+ CMAKE_ARGS: ''
+ container: $[ variables['CONTAINER'] ]
+ timeoutInMinutes: 0
+ variables:
+ BUILD_DIR: '$(Agent.BuildDirectory)/build'
+ CMAKE_CXX_FLAGS: '-Wall -Wextra'
+ steps:
+ - template: build/ubuntu.yaml
+ - job: ubuntu_indices
+ displayName: Ubuntu Indices
+ # Test 64 bit & unsigned indices
+ dependsOn: osx
+ condition: succeededOrFailed()
+ pool:
+ vmImage: 'Ubuntu 16.04'
+ strategy:
+ matrix:
+ 18.04 Clang:
+ CONTAINER: env1804
+ CC: clang
+ CXX: clang++
+ INDEX_SIGNED: OFF
+ INDEX_SIZE: 64
+ CMAKE_ARGS: ''
+ container: $[ variables['CONTAINER'] ]
+ timeoutInMinutes: 0
+ variables:
+ BUILD_DIR: '$(Agent.BuildDirectory)/build'
+ CMAKE_CXX_FLAGS: '-Wall -Wextra'
+ steps:
+ - template: build/ubuntu_indices.yaml
+
+ - stage: build_msvc
+ displayName: Build MSVC
dependsOn: formatting
jobs:
- - template: build-windows.yaml
+ - job: vs2017
+ displayName: Windows VS2017 Build
+ pool:
+ vmImage: 'vs2017-win2016'
+ strategy:
+ matrix:
+ x86:
+ PLATFORM: 'x86'
+ ARCHITECTURE: 'x86'
+ GENERATOR: 'Visual Studio 15 2017'
+ x64:
+ PLATFORM: 'x64'
+ ARCHITECTURE: 'x86_amd64'
+ GENERATOR: 'Visual Studio 15 2017 Win64'
+ timeoutInMinutes: 0
+ variables:
+ BUILD_DIR: '$(Agent.WorkFolder)\build'
+ CONFIGURATION: 'Release'
+ VCPKG_ROOT: 'C:\vcpkg'
+ steps:
+ - template: build/windows.yaml
+
- stage: documentation
displayName: Documentation
dependsOn: []
jobs:
- template: documentation.yaml
+
- stage: tutorials
displayName: Tutorials
- dependsOn: build_ubuntu
+ dependsOn: build_gcc
jobs:
- template: tutorials.yaml
+++ /dev/null
-jobs:
- - job: osx
- displayName: macOS
- pool:
- vmImage: '$(VMIMAGE)'
- strategy:
- matrix:
- Catalina 10.15:
- VMIMAGE: 'macOS-10.15'
- OSX_VERSION: '10.15'
- Mojave 10.14:
- VMIMAGE: 'macOS-10.14'
- OSX_VERSION: '10.14'
- timeoutInMinutes: 0
- variables:
- BUILD_DIR: '$(Agent.WorkFolder)/build'
- GOOGLE_TEST_DIR: '$(Agent.WorkFolder)/googletest'
- CMAKE_CXX_FLAGS: '-Wall -Wextra -Werror -Wabi'
- steps:
- - checkout: self
- # find the commit hash on a quick non-forced update too
- fetchDepth: 10
- - script: |
- brew install pkg-config qt5 libpcap brewsci/science/openni libomp
- brew install vtk --with-qt --without-python@2
- brew install --only-dependencies pcl
- git clone https://github.com/abseil/googletest.git $GOOGLE_TEST_DIR # the official endpoint changed to abseil/googletest
- cd $GOOGLE_TEST_DIR && git checkout release-1.8.1
- displayName: 'Install Dependencies'
- - script: |
- mkdir $BUILD_DIR && cd $BUILD_DIR
- # Surface switched off due to OpenGL deprecation on Mac
- cmake $(Build.SourcesDirectory) \
- -DCMAKE_BUILD_TYPE="Release" \
- -DCMAKE_OSX_SYSROOT="/Library/Developer/CommandLineTools/SDKs/MacOSX$(OSX_VERSION).sdk" \
- -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DGTEST_SRC_DIR="$GOOGLE_TEST_DIR/googletest" \
- -DGTEST_INCLUDE_DIR="$GOOGLE_TEST_DIR/googletest/include" \
- -DQt5_DIR=/usr/local/opt/qt5/lib/cmake/Qt5 \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DBUILD_simulation=ON \
- -DBUILD_surface=OFF \
- -DBUILD_global_tests=ON \
- -DBUILD_examples=ON \
- -DBUILD_tools=ON \
- -DBUILD_apps=ON \
- -DBUILD_apps_3d_rec_framework=ON \
- -DBUILD_apps_cloud_composer=ON \
- -DBUILD_apps_in_hand_scanner=ON \
- -DBUILD_apps_modeler=ON \
- -DBUILD_apps_point_cloud_editor=ON
- displayName: 'CMake Configuration'
- - script: |
- cd $BUILD_DIR
- cmake --build . -- -j2
- displayName: 'Build Library'
- - script: cd $BUILD_DIR && cmake --build . -- tests
- displayName: 'Run Unit Tests'
- - task: PublishTestResults@2
- inputs:
- testResultsFormat: 'CTest'
- testResultsFiles: '**/Test*.xml'
- searchFolder: '$(Agent.WorkFolder)/build'
- condition: succeededOrFailed()
-
+++ /dev/null
-jobs:
- - job: ubuntu
- displayName: Ubuntu
- pool:
- vmImage: 'Ubuntu 16.04'
- strategy:
- matrix:
- # 16.04 Clang:
- # CONTAINER: env1604
- # CC: clang
- # CXX: clang++
- # CMAKE_ARGS: '-DPCL_WARNINGS_ARE_ERRORS=ON'
- # 20.04 Clang:
- # CONTAINER: env2004
- # CC: clang
- # CXX: clang++
- # CMAKE_ARGS: ''
- 16.04 GCC:
- CONTAINER: env1604
- CC: gcc
- CXX: g++
- CMAKE_ARGS: '-DPCL_WARNINGS_ARE_ERRORS=ON'
- 20.04 GCC:
- CONTAINER: env2004
- CC: gcc
- CXX: g++
- CMAKE_ARGS: ''
- container: $[ variables['CONTAINER'] ]
- timeoutInMinutes: 0
- variables:
- BUILD_DIR: '$(Agent.BuildDirectory)/build'
- CMAKE_CXX_FLAGS: '-Wall -Wextra'
- steps:
- - checkout: self
- # find the commit hash on a quick non-forced update too
- fetchDepth: 10
- - script: |
- mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake $(Build.SourcesDirectory) $(CMAKE_ARGS) \
- -DCMAKE_BUILD_TYPE="Release" \
- -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DBUILD_simulation=ON \
- -DBUILD_surface_on_nurbs=ON \
- -DBUILD_global_tests=ON \
- -DBUILD_examples=ON \
- -DBUILD_tools=ON \
- -DBUILD_apps=ON \
- -DBUILD_apps_3d_rec_framework=ON \
- -DBUILD_apps_cloud_composer=ON \
- -DBUILD_apps_in_hand_scanner=ON \
- -DBUILD_apps_modeler=ON \
- -DBUILD_apps_point_cloud_editor=ON
- # Temporary fix to ensure no tests are skipped
- cmake $(Build.SourcesDirectory)
- displayName: 'CMake Configuration'
- - script: |
- cd $BUILD_DIR
- cmake --build . -- -j2
- displayName: 'Build Library'
- - script: |
- cd $BUILD_DIR && cmake --build . -- tests
- displayName: 'Run Unit Tests'
- - task: PublishTestResults@2
- inputs:
- testResultsFormat: 'CTest'
- testResultsFiles: '**/Test*.xml'
- searchFolder: '$(Agent.BuildDirectory)/build'
- condition: succeededOrFailed()
+++ /dev/null
-jobs:
- - job: vs2017
- displayName: Windows VS2017 Build
- pool:
- vmImage: 'vs2017-win2016'
- strategy:
- matrix:
- x86:
- PLATFORM: 'x86'
- ARCHITECTURE: 'x86'
- GENERATOR: 'Visual Studio 15 2017'
- x64:
- PLATFORM: 'x64'
- ARCHITECTURE: 'x86_amd64'
- GENERATOR: 'Visual Studio 15 2017 Win64'
- timeoutInMinutes: 0
- variables:
- BUILD_DIR: '$(Agent.WorkFolder)\build'
- CONFIGURATION: 'Release'
- VCPKG_ROOT: 'C:\vcpkg'
- steps:
- - checkout: self
- # find the commit hash on a quick non-forced update too
- fetchDepth: 10
- - script: |
- echo ##vso[task.setvariable variable=BOOST_ROOT]%BOOST_ROOT_1_69_0%
- displayName: 'Set BOOST_ROOT Environment Variable'
- - script: |
- echo ##vso[task.prependpath]%BOOST_ROOT_1_69_0%\lib
- displayName: 'Include Boost Libraries In System PATH'
- - script: |
- set
- displayName: 'Print Environment Variables'
- - script: |
- vcpkg.exe install eigen3 flann gtest qhull --triplet %PLATFORM%-windows && vcpkg.exe list
- displayName: 'Install C++ Dependencies Via Vcpkg'
- - script: |
- rmdir %VCPKG_ROOT%\downloads /S /Q
- rmdir %VCPKG_ROOT%\packages /S /Q
- displayName: 'Free Up Space'
- - script: |
- mkdir %BUILD_DIR% && cd %BUILD_DIR%
- cmake $(Build.SourcesDirectory) ^
- -G"%GENERATOR%" ^
- -DCMAKE_BUILD_TYPE="Release" ^
- -DCMAKE_TOOLCHAIN_FILE=%VCPKG_ROOT%\scripts\buildsystems\vcpkg.cmake ^
- -DVCPKG_APPLOCAL_DEPS=ON ^
- -DPCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32=ON ^
- -DPCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32=ON ^
- -DPCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32=ON ^
- -DBUILD_global_tests=ON ^
- -DBUILD_tools=OFF ^
- -DBUILD_surface_on_nurbs=ON
- displayName: 'CMake Configuration'
- - script: cd %BUILD_DIR% && cmake --build . --config %CONFIGURATION%
- displayName: 'Build Library'
- - script: cd %BUILD_DIR% && cmake --build . --target tests --config %CONFIGURATION%
- displayName: 'Run Unit Tests'
- - task: PublishTestResults@2
- inputs:
- testResultsFormat: 'CTest'
- testResultsFiles: '**/Test*.xml'
- searchFolder: '$(Agent.WorkFolder)\build'
- condition: succeededOrFailed()
-
--- /dev/null
+steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - script: |
+ brew install cmake pkg-config boost eigen flann glew libusb qhull vtk glew qt5 libpcap libomp brewsci/science/openni
+ git clone https://github.com/abseil/googletest.git $GOOGLE_TEST_DIR # the official endpoint changed to abseil/googletest
+ cd $GOOGLE_TEST_DIR && git checkout release-1.8.1
+ displayName: 'Install Dependencies'
+ - script: |
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake $(Build.SourcesDirectory) \
+ -DCMAKE_BUILD_TYPE="MinSizeRel" \
+ -DCMAKE_OSX_SYSROOT="/Library/Developer/CommandLineTools/SDKs/MacOSX$(OSX_VERSION).sdk" \
+ -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DGTEST_SRC_DIR="$GOOGLE_TEST_DIR/googletest" \
+ -DGTEST_INCLUDE_DIR="$GOOGLE_TEST_DIR/googletest/include" \
+ -DQt5_DIR=/usr/local/opt/qt5/lib/cmake/Qt5 \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DBUILD_simulation=ON \
+ -DBUILD_global_tests=ON \
+ -DBUILD_examples=ON \
+ -DBUILD_tools=ON \
+ -DBUILD_apps=ON \
+ -DBUILD_apps_3d_rec_framework=ON \
+ -DBUILD_apps_cloud_composer=ON \
+ -DBUILD_apps_in_hand_scanner=ON \
+ -DBUILD_apps_modeler=ON \
+ -DBUILD_apps_point_cloud_editor=ON
+ displayName: 'CMake Configuration'
+ - script: |
+ cd $BUILD_DIR
+ cmake --build . -- -j2
+ displayName: 'Build Library'
+ - script: cd $BUILD_DIR && cmake --build . -- tests
+ displayName: 'Run Unit Tests'
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFormat: 'CTest'
+ testResultsFiles: '**/Test*.xml'
+ searchFolder: '$(Agent.WorkFolder)/build'
+ condition: succeededOrFailed()
+
--- /dev/null
+steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - script: |
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake $(Build.SourcesDirectory) $(CMAKE_ARGS) \
+ -DCMAKE_BUILD_TYPE="MinSizeRel" \
+ -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_DISABLE_GPU_TESTS=ON \
+ -DBUILD_simulation=ON \
+ -DBUILD_surface_on_nurbs=ON \
+ -DBUILD_global_tests=ON \
+ -DBUILD_examples=ON \
+ -DBUILD_tools=ON \
+ -DBUILD_apps=ON \
+ -DBUILD_apps_3d_rec_framework=ON \
+ -DBUILD_apps_cloud_composer=ON \
+ -DBUILD_apps_in_hand_scanner=ON \
+ -DBUILD_apps_modeler=ON \
+ -DBUILD_apps_point_cloud_editor=ON \
+ -DBUILD_CUDA=$BUILD_GPU \
+ -DBUILD_GPU=$BUILD_GPU \
+ -DBUILD_cuda_io=$BUILD_GPU \
+ -DBUILD_gpu_tracking=$BUILD_GPU \
+ -DBUILD_gpu_surface=$BUILD_GPU \
+ -DBUILD_gpu_people=$BUILD_GPU
+ # Temporary fix to ensure no tests are skipped
+ cmake $(Build.SourcesDirectory)
+ displayName: 'CMake Configuration'
+ - script: |
+ cd $BUILD_DIR
+ cmake --build . -- -j2
+ displayName: 'Build Library'
+ - script: |
+ cd $BUILD_DIR && cmake --build . -- tests
+ displayName: 'Run Unit Tests'
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFormat: 'CTest'
+ testResultsFiles: '**/Test*.xml'
+ searchFolder: '$(Agent.BuildDirectory)/build'
+ condition: succeededOrFailed()
--- /dev/null
+steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - script: |
+ mkdir $BUILD_DIR && cd $BUILD_DIR
+ cmake $(Build.SourcesDirectory) $(CMAKE_ARGS) \
+ -DCMAKE_BUILD_TYPE="MinSizeRel" \
+ -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DPCL_INDEX_SIGNED=$INDEX_SIGNED \
+ -DPCL_INDEX_SIZE=$INDEX_SIZE \
+ -DBUILD_geometry=OFF \
+ -DBUILD_tools=OFF \
+ -DBUILD_kdtree=OFF \
+ -DBUILD_ml=OFF \
+ -DBUILD_octree=OFF \
+ -DBUILD_global_tests=ON
+ # Temporary fix to ensure no tests are skipped
+ cmake $(Build.SourcesDirectory)
+ displayName: 'CMake Configuration'
+ - script: |
+ cd $BUILD_DIR
+ cmake --build . -- -j2
+ displayName: 'Build Library'
+ - script: |
+ cd $BUILD_DIR && cmake --build . -- tests
+ displayName: 'Run Unit Tests'
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFormat: 'CTest'
+ testResultsFiles: '**/Test*.xml'
+ searchFolder: '$(Agent.BuildDirectory)/build'
+ condition: succeededOrFailed()
--- /dev/null
+steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - script: |
+ echo ##vso[task.setvariable variable=BOOST_ROOT]%BOOST_ROOT_1_69_0%
+ displayName: 'Set BOOST_ROOT Environment Variable'
+ - script: |
+ echo ##vso[task.prependpath]%BOOST_ROOT_1_69_0%\lib
+ displayName: 'Include Boost Libraries In System PATH'
+ - script: |
+ set
+ displayName: 'Print Environment Variables'
+ - script: |
+ vcpkg.exe install eigen3 flann gtest qhull --triplet %PLATFORM%-windows && vcpkg.exe list
+ displayName: 'Install C++ Dependencies Via Vcpkg'
+ - script: |
+ rmdir %VCPKG_ROOT%\downloads /S /Q
+ rmdir %VCPKG_ROOT%\packages /S /Q
+ displayName: 'Free Up Space'
+ - script: |
+ mkdir %BUILD_DIR% && cd %BUILD_DIR%
+ cmake $(Build.SourcesDirectory) ^
+ -G"%GENERATOR%" ^
+ -DCMAKE_BUILD_TYPE="MinSizeRel" ^
+ -DCMAKE_TOOLCHAIN_FILE=%VCPKG_ROOT%\scripts\buildsystems\vcpkg.cmake ^
+ -DVCPKG_APPLOCAL_DEPS=ON ^
+ -DPCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32=ON ^
+ -DPCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32=ON ^
+ -DPCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32=ON ^
+ -DBUILD_global_tests=ON ^
+ -DBUILD_tools=OFF ^
+ -DBUILD_surface_on_nurbs=ON
+ displayName: 'CMake Configuration'
+ - script: cd %BUILD_DIR% && cmake --build . --config %CONFIGURATION%
+ displayName: 'Build Library'
+ - script: cd %BUILD_DIR% && cmake --build . --target tests --config %CONFIGURATION%
+ displayName: 'Run Unit Tests'
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFormat: 'CTest'
+ testResultsFiles: '**/Test*.xml'
+ searchFolder: '$(Agent.WorkFolder)\build'
+ condition: succeededOrFailed()
+
mkdir $BUILD_DIR && cd $BUILD_DIR
cmake $(Build.SourcesDirectory) \
-DDOXYGEN_USE_SHORT_NAMES=OFF \
- -DSPHINX_HTML_FILE_SUFFIX=php \
-DWITH_DOCS=ON \
-DWITH_TUTORIALS=ON
displayName: 'CMake Configuration'
Ubuntu 16.04:
CUDA_VERSION: 9.2
UBUNTU_DISTRO: 16.04
- USE_CUDA: true
+ USE_CUDA: false
VTK_VERSION: 6
tag: 16.04
Ubuntu 18.04:
--- /dev/null
+# Release PCL steps:
+# 1. Check tests:
+# * debian:latest
+# * perception_pcl
+# 2. Package for
+# * MacOS
+# * tar source release
+# * Windows Installers
+
+trigger: none
+
+resources:
+- repo: self
+
+variables:
+ dockerHub: "PointCloudLibrary@hub.docker.com"
+ dockerHubID: "pointcloudlibrary"
+ # VERSION and RC need to be provided via the UI
+ # UI priority is the lowest and can't override the file data
+ # RC: 0 # number of pre-release
+ # VERSION: 1.99
+
+stages:
+- stage: Info
+ displayName: "Version"
+ jobs:
+ - job: additional_info
+ displayName: "More Info"
+ pool:
+ vmImage: 'ubuntu-latest'
+ steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - script: |
+ echo "Prev release: $(git tag | sort -rV | head -1 | cut -d- -f 2)"
+ echo "Current release: "$(VERSION)-rc$(RC)
+ if [ -z $VERSION ] || [ -z $RC ]; then
+ echo "Bad version and release candidate number"
+ exit 3
+ fi
+ displayName: "Test for and display version info"
+- stage: Debian
+ dependsOn: ["Info"]
+ jobs:
+ - job: BuildDebian
+ displayName: "Build Debian Latest"
+ timeoutInMinutes: 360
+ pool:
+ vmImage: 'ubuntu-latest'
+ variables:
+ tag: "release"
+ steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - task: Docker@2
+ displayName: "Build docker image"
+ inputs:
+ command: build
+ arguments: |
+ --no-cache
+ -t $(dockerHubID)/release:$(tag)
+ dockerfile: '$(Build.SourcesDirectory)/.dev/docker/release/Dockerfile'
+ tags: "$(tag)"
+- stage: ROS
+ dependsOn: ["Info"]
+ jobs:
+ - job: PerceptionPCL
+ displayName: "perception_pcl compile"
+ timeoutInMinutes: 360
+ pool:
+ vmImage: 'ubuntu-latest'
+ strategy:
+ matrix:
+ # Only ROS Melodic works with current releases of PCL
+ # Kinetic has stopped supporting any version later than PCL 1.7.2
+ ROS Melodic:
+ flavor: "melodic"
+ variables:
+ tag: "ros"
+ steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - task: Docker@2
+ displayName: "Build docker image"
+ inputs:
+ command: build
+ arguments: |
+ --no-cache
+ --build-arg flavor=$(flavor)
+ -t $(dockerHubID)/release:$(tag)
+ dockerfile: '$(Build.SourcesDirectory)/.dev/docker/perception_pcl_ros/Dockerfile'
+ buildContext: '$(Build.SourcesDirectory)/.dev/docker/perception_pcl_ros'
+ tags: "$(tag)"
+- stage: Prepare
+ dependsOn: ["Debian", "ROS"]
+ jobs:
+ - job: src_code
+ displayName: "Package Source Code"
+ timeoutInMinutes: 360
+ pool:
+ vmImage: 'ubuntu-latest'
+ variables:
+ PUBLISH_LOCATION: '$(Build.ArtifactStagingDirectory)'
+ SOURCE_LOCATION: '$(Build.SourcesDirectory)'
+ steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ path: 'pcl'
+ - task: Bash@3
+ displayName: "Remove git files"
+ inputs:
+ targetType: 'inline'
+ script: 'rm -fr ./.git'
+ workingDirectory: '$(SOURCE_LOCATION)'
+ failOnStderr: true
+ - task: ArchiveFiles@2
+ displayName: "Create release archive (.tar.gz)"
+ inputs:
+ rootFolderOrFile: '$(SOURCE_LOCATION)'
+ includeRootFolder: true
+ archiveType: 'tar'
+ archiveFile: '$(PUBLISH_LOCATION)/source.tar.gz'
+ replaceExistingArchive: true
+ verbose: true
+ - task: ArchiveFiles@2
+ displayName: "Create release archive (.zip)"
+ inputs:
+ rootFolderOrFile: '$(SOURCE_LOCATION)'
+ includeRootFolder: true
+ archiveType: 'zip'
+ archiveFile: '$(PUBLISH_LOCATION)/source.zip'
+ replaceExistingArchive: true
+ verbose: true
+ - script: |
+ for file in $(ls "$(PUBLISH_LOCATION)/"); do
+ sha256sum "$(PUBLISH_LOCATION)"/"${file}" >> $(PUBLISH_LOCATION)/sha256_checksums.txt
+ sha512sum "$(PUBLISH_LOCATION)"/"${file}" >> $(PUBLISH_LOCATION)/sha512_checksums.txt
+ done
+ echo "SHA 256 Checksums:"
+ cat $(PUBLISH_LOCATION)/sha256_checksums.txt
+ echo "SHA 512 Checksums:"
+ cat $(PUBLISH_LOCATION)/sha512_checksums.txt
+ displayName: "Generate checksum"
+ - task: PublishBuildArtifacts@1
+ displayName: "Publish archive"
+ inputs:
+ PathtoPublish: '$(PUBLISH_LOCATION)/'
+ ArtifactName: 'source'
+ publishLocation: 'Container'
+ - job: changelog
+ displayName: Generate Change Log
+ pool:
+ vmImage: 'ubuntu-latest'
+ variables:
+ CHANGELOG: '$(Build.ArtifactStagingDirectory)/changelog.md'
+ steps:
+ - checkout: none
+ - script: |
+ cat > $(CHANGELOG) <<EOF
+ ## Insert Summary Here
+
+ For an exhaustive list of newly added features, deprecations and other changes
+ in PCL $(VERSION), please see [CHANGES.md](https://github.com/PointCloudLibrary/pcl/blob/master/CHANGES.md). **Remember to edit the URL**
+ EOF
+ displayName: 'Generate Changelog'
+ - task: PublishBuildArtifacts@1
+ displayName: "Publish Changelog"
+ inputs:
+ PathtoPublish: '$(CHANGELOG)'
+ ArtifactName: 'changelog'
+ publishLocation: 'Container'
+- stage: Release
+ dependsOn: ["Prepare"]
+ jobs:
+ - job: release
+ displayName: "Release the kraken"
+ timeoutInMinutes: 360
+ variables:
+ DOWNLOAD_LOCATION: '$(Build.ArtifactStagingDirectory)'
+ pool:
+ vmImage: 'ubuntu-latest'
+ steps:
+ - checkout: self
+ # find the commit hash on a quick non-forced update too
+ fetchDepth: 10
+ - bash: |
+ if [ -z $RC ] || [ $RC -eq 0 ]; then isPreRelease=false; else isPreRelease=true; fi
+ echo "##vso[task.setvariable variable=isPreRelease]${isPreRelease}"
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ downloadType: 'all' # can be anything except single
+ downloadPath: '$(DOWNLOAD_LOCATION)'
+ - task: GitHubRelease@1
+ inputs:
+ action: 'create'
+ addChangeLog: false
+ # assets, one pattern per line
+ assets: |
+ $(DOWNLOAD_LOCATION)/source/*
+ isDraft: true
+ isPreRelease: $(isPreRelease)
+ gitHubConnection: 'Release to GitHub'
+ releaseNotesFilePath: '$(DOWNLOAD_LOCATION)/changelog/changelog.md'
+ repositoryName: $(Build.Repository.Name)
+ tagSource: 'userSpecifiedTag'
+ tag: "pcl-$(VERSION)-rc$(RC)"
+ tagPattern: 'pcl-*'
+ target: '$(Build.SourceVersion)'
+ title: 'PCL $(VERSION)'
&& apt-get install -y \
doxygen-latex \
dvipng \
+ graphviz \
git \
python3-pip \
pandoc \
&& rm -rf /var/lib/apt/lists/*
RUN pip3 install Jinja2 sphinx sphinxcontrib-doxylink sphinx_rtd_theme requests grip
+
+# Use eps2eps to solve https://github.com/doxygen/doxygen/issues/7484 before Doxygen 1.8.18
+RUN update-alternatives --install /usr/local/bin/ps2epsi ps2epsi /usr/bin/ps2epsi 1 \
+ && update-alternatives --install /usr/local/bin/ps2epsi ps2epsi /usr/bin/eps2eps 1000
# https://gitlab.com/nvidia/container-images/cuda/tree/master/dist
# To enable cuda, use "--build-arg USE_CUDA=true" during image build process
ARG USE_CUDA
-ARG CUDA_VERSION="9.0"
+ARG CUDA_VERSION="9.2"
ARG UBUNTU_DISTRO="16.04"
-# Known conflicts:
-# * 9.1 is an issue with 16.04 for Eigen
ARG BASE_CUDA_IMAGE=${USE_CUDA:+"nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_DISTRO}"}
ARG BASE_IMAGE=${BASE_CUDA_IMAGE:-"ubuntu:${UBUNTU_DISTRO}"}
libboost-filesystem-dev \
libboost-iostreams-dev \
libeigen3-dev \
+ libblas-dev \
libflann-dev \
libglew-dev \
libgtest-dev \
libvtk${VTK_VERSION}-dev \
libvtk${VTK_VERSION}-qt-dev \
qtbase5-dev \
+ software-properties-common \
&& rm -rf /var/lib/apt/lists/*
+# Eigen patch (https://eigen.tuxfamily.org/bz/show_bug.cgi?id=1462) to fix issue metioned
+# in https://github.com/PointCloudLibrary/pcl/issues/3729 is available in Eigen 3.3.7
+# Not needed from 20.04 since it is the default version from apt
+RUN if [ `pkg-config --modversion eigen3 | cut -d. -f3` -lt 7 ]; then \
+ wget -qO- https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.tar.gz | tar xz \
+ && apt install -y libblas-dev \
+ && cd eigen-3.3.7 \
+ && mkdir build \
+ && cd build \
+ && cmake .. \
+ && make install \
+ && cd ../.. \
+ && rm -rf eigen-3.3.7/ \
+ && rm -f eigen-3.3.7.tar.gz ; \
+ fi
+
+# To avoid CUDA build errors on CUDA 9.2+ GCC 7 is required
+RUN if [ `gcc -dumpversion | cut -d. -f1` -lt 7 ]; then add-apt-repository ppa:ubuntu-toolchain-r/test \
+ && apt update \
+ && apt install g++-7 -y \
+ && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 --slave /usr/bin/g++ g++ /usr/bin/g++-7 \
+ && update-alternatives --config gcc ; \
+ fi
+
RUN wget -qO- https://github.com/IntelRealSense/librealsense/archive/v2.23.0.tar.gz | tar xz \
&& cd librealsense-2.23.0 \
&& mkdir build \
--- /dev/null
+# flavor appears twice, once for the FOR, once for the contents since
+# Dockerfile seems to reset arguments after a FOR appears
+ARG flavor="melodic"
+FROM ros:${flavor}-robot
+
+ARG flavor="melodic"
+ARG workspace="/root/catkin_ws"
+
+COPY ${flavor}_rosinstall.yaml ${workspace}/src/.rosinstall
+
+# Be careful:
+# * ROS uses Python2
+# * source ROS setup file in evey RUN snippet
+#
+# The dependencies of PCL can be reduced since
+# * we don't need to build visualization or docs
+RUN sed -i "s/^# deb-src/deb-src/" /etc/apt/sources.list \
+ && apt update \
+ && apt install -y \
+ libeigen3-dev \
+ libflann-dev \
+ libqhull-dev \
+ python-pip \
+ ros-${flavor}-tf2-eigen \
+ && pip install -U pip \
+ && pip install catkin_tools \
+ && cd ${workspace}/src \
+ && . "/opt/ros/${flavor}/setup.sh" \
+ && catkin_init_workspace \
+ && cd .. \
+ && wstool update -t src
+
+COPY package.xml ${workspace}/src/pcl/
+
+RUN cd ${workspace} \
+ && . "/opt/ros/${flavor}/setup.sh" \
+ && catkin config --install --link-devel \
+ && catkin build -j2 libpcl-all-dev --cmake-args -DWITH_OPENGL:BOOL=OFF \
+ && rm -fr build/libpcl-all-dev \
+ && catkin build --start-with pcl_msgs
--- /dev/null
+# flavor appears twice, once for the FOR, once for the contents since
+# Dockerfile seems to reset arguments after a FOR appears
+ARG flavor="dashing"
+FROM ros:${flavor}-ros-base
+
+ARG flavor="dashing"
+ARG workspace="/root/catkin_ws"
+
+COPY ${flavor}_rosinstall.yaml ${workspace}/src/.rosinstall
+
+# Be careful:
+# * source ROS setup file in evey RUN snippet
+#
+# TODO: The dependencies of PCL can be reduced since
+# * we don't need to build visualization or docs
+RUN sed -i "s/^# deb-src/deb-src/" /etc/apt/sources.list \
+ && apt update \
+ && apt install -y \
+ libeigen3-dev \
+ libflann-dev \
+ ros-${flavor}-tf2-eigen \
+ && apt build-dep pcl -y \
+ && pip3 install -U pip \
+ && pip3 install wstool \
+ && cd ${workspace}/src \
+ && . "/opt/ros/${flavor}/setup.sh" \
+ && catkin_init_workspace \
+ && cd .. \
+ && wstool update -t src
+
+COPY package.xml ${workspace}/src/pcl/
+
+RUN cd ${workspace} \
+ && . "/opt/ros/${flavor}/setup.sh" \
+ && catkin config --install \
+ && catkin build -j2 libpcl-all-dev --cmake-args -DWITH_OPENGL:BOOL=OFF \
+ && catkin build
--- /dev/null
+- git:
+ local-name: 'kinetic/perception_pcl'
+ uri: 'https://github.com/ros-perception/perception_pcl'
+ version: 'kinetic-devel'
+- git:
+ local-name: 'kinetic/pcl_conversions'
+ uri: 'https://github.com/ros-perception/pcl_conversions'
+ version: 'indigo-devel'
+- git:
+ local-name: 'kinetic/pcl_msgs'
+ uri: 'https://github.com/ros-perception/pcl_msgs'
+ version: 'indigo-devel'
+- git:
+ local-name: 'pcl'
+ uri: 'https://github.com/PointCloudLibrary/pcl'
+ # Kinetic doesn't support PCL 1.8 onwards
+ version: '1.7.2'
--- /dev/null
+- git:
+ local-name: 'melodic/perception_pcl'
+ uri: 'https://github.com/ros-perception/perception_pcl'
+ version: 'melodic-devel'
+- git:
+ local-name: 'melodic/pcl_msgs'
+ uri: 'https://github.com/ros-perception/pcl_msgs'
+ version: 'indigo-devel'
+- git:
+ local-name: 'pcl'
+ uri: 'https://github.com/PointCloudLibrary/pcl'
+ version: 'master'
--- /dev/null
+<?xml version="1.0"?>
+<package format="2">
+ <name>libpcl-all-dev</name>
+ <version>0.1.0</version>
+ <description>
+ Standalone, large scale, open project for 2D/3D image and point cloud processing
+ </description>
+
+ <!-- One maintainer tag required, multiple allowed, one person per tag -->
+ <maintainer email="tyagi.kunal@live.com">kunal_tyagi</maintainer>
+
+ <license>BSD</license>
+
+ <url type="website">http://github.com/PointCloudLibrary/pcl</url>
+
+ <author email="tyagi.kunal@live.com">kunal_tyagi</author>
+
+ <buildtool_depend>catkin</buildtool_depend>
+ <build_depend>libeigen3-dev</build_depend>
+ <build_depend>libflann-dev</build_depend>
+ <!--libvtk6-dev -->
+ <test_depend>gtest</test_depend>
+ <doc_depend>doxygen</doc_depend>
+ <buildtool_depend>catkin</buildtool_depend>
+
+ <!-- The export tag contains other, unspecified, tags -->
+ <export>
+ <!-- Other tools can request additional information be placed here -->
+ </export>
+</package>
--- /dev/null
+FROM debian:testing
+
+ARG VTK_VERSION=7
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Add sources so we can just install build-dependencies of PCL
+RUN sed -i 's/^deb \(.*\)$/deb \1\ndeb-src \1/' /etc/apt/sources.list \
+ && apt update \
+ && apt install -y \
+ bash \
+ cmake \
+ dpkg-dev \
+ git \
+ g++ \
+ libboost-date-time-dev \
+ libboost-filesystem-dev \
+ libboost-iostreams-dev \
+ libeigen3-dev \
+ libflann-dev \
+ libglew-dev \
+ libgtest-dev \
+ libopenni-dev \
+ libopenni2-dev \
+ libproj-dev \
+ libqhull-dev \
+ libqt5opengl5-dev \
+ libusb-1.0-0-dev \
+ libvtk${VTK_VERSION}-dev \
+ libvtk${VTK_VERSION}-qt-dev \
+ qtbase5-dev \
+ wget \
+ && rm -rf /var/lib/apt/lists/*
+
+# CMake flags are from dpkg helper
+# PCL config is from debian repo:
+# https://salsa.debian.org/science-team/pcl/-/blob/master/debian/rules
+# MinSizeRel is used for the CI and should have no impact on releaseability
+RUN cd \
+ && git clone --depth=1 https://github.com/PointCloudLibrary/pcl \
+ && mkdir pcl/build \
+ && cd pcl/build \
+ && export DEB_BUILD_MAINT_OPTIONS='hardening=+all' \
+ && export DEB_CFLAGS_MAINT_APPEND="-Wall -pedantic" \
+ && export DEB_LDFLAGS_MAINT_APPEND="-Wl,--as-needed" \
+ && cmake .. \
+ -DCMAKE_BUILD_TYPE=MinSizeRel \
+ -DCMAKE_CXX_FLAGS:STRING="`dpkg-buildflags --get CXXFLAGS`" \
+ -DCMAKE_EXE_LINKER_FLAGS:STRING="`dpkg-buildflags --get LDFLAGS`" \
+ -DCMAKE_SHARED_LINKER_FLAGS:STRING="`dpkg-buildflags --get LDFLAGS`" \
+ -DCMAKE_SKIP_RPATH=ON -DPCL_ENABLE_SSE=OFF \
+ -DBUILD_TESTS=OFF -DBUILD_apps=ON -DBUILD_common=ON \
+ -DBUILD_examples=ON -DBUILD_features=ON -DBUILD_filters=ON \
+ -DBUILD_geometry=ON -DBUILD_global_tests=OFF -DBUILD_io=ON \
+ -DBUILD_kdtree=ON -DBUILD_keypoints=ON -DBUILD_octree=ON \
+ -DBUILD_registration=ON -DBUILD_sample_consensus=ON \
+ -DBUILD_search=ON -DBUILD_segmentation=ON -DBUILD_surface=ON \
+ -DBUILD_tools=ON -DBUILD_tracking=ON -DBUILD_visualization=ON \
+ -DBUILD_apps_cloud_composer=OFF -DBUILD_apps_modeler=ON \
+ -DBUILD_apps_point_cloud_editor=ON -DBUILD_apps_in_hand_scanner=ON \
+ && make install -j2 \
+ && cd \
+ && rm -fr pcl
format() {
# don't use a directory with whitespace
- local whitelist="apps/modeler 2d ml octree simulation stereo"
+ local whitelist="apps/3d_rec_framework apps/modeler 2d ml octree simulation stereo tracking"
local PCL_DIR="${2}"
local formatter="${1}"
--- /dev/null
+#! /usr/bin/env bash
+
+new_version=$(git tag | sort -rV | head -1 | cut -d- -f 2).99
+
+# Mac users either use gsed or add "" after -i
+if ls | grep README -q; then
+ sed -i "s,VERSION [0-9.]*),VERSION ${new_version})," CMakeLists.txt
+else
+ echo "Don't think this is the root directory" 1>&2
+ exit 4
+fi
--- /dev/null
+#! /usr/bin/env bash
+
+if [ $# != 1 ]; then
+ echo "Need (only) the release version" 1>&2
+ exit 3
+fi
+
+# Mac users either use gsed or add "" after -i
+new_version="$1"
+if ls | grep README -q; then
+ sed -i "s,[0-9]\+\.[0-9]\+\.[0-9]\+,${new_version}," README.md
+ sed -i "s,VERSION [0-9.]*),VERSION ${new_version})," CMakeLists.txt
+else
+ echo "Don't think this is the root directory" 1>&2
+ exit 4
+fi
CATEGORIES = {
- "new feature": ("New features", ""),
+ "new feature": ("New features", "added to PCL"),
"deprecation": (
"Deprecation",
"of public APIs, scheduled to be removed after two minor releases",
--- /dev/null
+blank_issues_enabled: true
+
+contact_links:
+- name: Learn using Tutorials
+ url: https://pcl-tutorials.readthedocs.io/en/master/
+ about: Read the tutorials to learn how to use PCL better
+- name: Get help on Discord
+ url: https://discord.gg/JFFMAXS
+ about: Have a quick question about PCL or how to use PCL in your project?
+- name: Get help on StackOverflow
+ url: https://stackoverflow.com/tags/point-cloud-library/
+ about: Need some support to use PCL in your project?
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
-daysUntilClose: 7
+daysUntilClose: false
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
-onlyLabels:
- - "needs: author reply"
- - "needs: more work"
+onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
-exemptLabels: []
+exemptLabels:
+ - "kind: todo"
+ - "needs: code review"
+ - "needs: feedback"
+ - "needs: pr merge"
+ - "needs: testing"
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Comment to post when marking as stale. Set to `false` to disable
markComment: >
- This issue has been automatically marked as stale because it has not had any activity in the past month. It will be closed if no further activity occurs.
+ Marking this as stale due to 30 days of inactivity. It will be closed in 7 days if no further activity occurs.
# Comment to post when removing the stale label.
# unmarkComment: >
pulls:
daysUntilClose: false
markComment: >
- This pull request has been automatically marked as stale because it has not had any activity in the past month. Commenting or adding a new commit to the pull request will revert this.
+ Marking this as stale due to 30 days of inactivity. Commenting or adding a new commit to the pull request will revert this.
#include <pcl/2d/convolution.h>
#include <pcl/2d/edge.h>
-#include <pcl/common/common_headers.h> // rad2deg()
+#include <pcl/common/angles.h> // for rad2deg
namespace pcl {
# ChangeList
+## = 1.11.1 (13.08.2020) =
+
+Apart from the usual serving of bug-fixes and speed improvements, PCL 1.11.1 brings in
+better support for CUDA, more uniform behavior in code as well as cmake, and an
+experimental feature: `functor_filter`.
+
+### Notable changes
+
+**New features** *added to PCL*
+
+* **[ci]** Add support for CUDA in CI [[#4101](https://github.com/PointCloudLibrary/pcl/pull/4101)]
+* **[common]** Add always-unsigned index type `uindex_t` dependent on `index_t` [[#4205](https://github.com/PointCloudLibrary/pcl/pull/4205)]
+* **[filters]** Add a filter accepting a functor to reduce boiler plate code for simple filters [[#3890](https://github.com/PointCloudLibrary/pcl/pull/3890)]
+
+### Changes grouped by module
+
+#### CMake:
+
+* Update `pcl_find_boost` to allow compilation with Boost 1.73 and 1.72 [[#4080](https://github.com/PointCloudLibrary/pcl/pull/4080)]
+* Fix NSIS Template for NSIS3 [[#4093](https://github.com/PointCloudLibrary/pcl/pull/4093)]
+* Add option to choose `pcl::index_t` while compiling [[#4166](https://github.com/PointCloudLibrary/pcl/pull/4166)]
+* Fix name warnings for PCAP, libusb and Ensenso. [[#4182](https://github.com/PointCloudLibrary/pcl/pull/4182)]
+* Fixes compile error on MSVC by disabling Whole Program Optimization [[#4197](https://github.com/PointCloudLibrary/pcl/pull/4197)]
+
+#### libpcl_common:
+
+* Remove use of dynamic allocation in `GaussianKernel::convolve{Rows,Cols}` [[#4092](https://github.com/PointCloudLibrary/pcl/pull/4092)]
+* Replace usage of `std::vector<int>` with `Indices` [[#3989](https://github.com/PointCloudLibrary/pcl/pull/3989)]
+* Update `PointCloud` to conform to requirements of `ReversibleContainer` [[#3980](https://github.com/PointCloudLibrary/pcl/pull/3980)]
+* **[new feature]** Add always-unsigned index type `uindex_t` dependent on `index_t` [[#4205](https://github.com/PointCloudLibrary/pcl/pull/4205)]
+* Adding `data` member function to `PointCloud` [[#4216](https://github.com/PointCloudLibrary/pcl/pull/4216)]
+* Switch `int` to `index_t` for field index variables in `pclBase`, add `pcl::UNAVAILABLE` for same [[#4211](https://github.com/PointCloudLibrary/pcl/pull/4211)]
+* Added `resize` with 2 arguments to `PointCloud` [[#4225](https://github.com/PointCloudLibrary/pcl/pull/4225)]
+* Adding `max_size` to PointCloud [[#4254](https://github.com/PointCloudLibrary/pcl/pull/4254)]
+* Support multiple arguments in `pcl::utils::ignore()` [[#4269](https://github.com/PointCloudLibrary/pcl/pull/4269)]
+
+#### libpcl_features:
+
+* Fix feature histogram constructor bug [[#4234](https://github.com/PointCloudLibrary/pcl/pull/4234)]
+* Fix regression in Organized Edge Detection (introduced in PCL 1.10.1) [[#4311](https://github.com/PointCloudLibrary/pcl/pull/4311)]
+
+#### libpcl_filters:
+
+* reduce computations involved in iterating over the pointcloud for `FastBilateral{OMP}` [[#4134](https://github.com/PointCloudLibrary/pcl/pull/4134)]
+* **[new feature]** Add a filter accepting a functor to reduce boiler plate code for simple filters [[#3890](https://github.com/PointCloudLibrary/pcl/pull/3890)]
+* Refatoring VoxelGridCovariance to make it multi-thread safe (and more) [[#4251](https://github.com/PointCloudLibrary/pcl/pull/4251)]
+
+#### libpcl_gpu:
+
+* Replace volatile shared memory with shfl_sync in KNNSearch [[#4306](https://github.com/PointCloudLibrary/pcl/pull/4306)]
+* Fix octree radiusSearch [[#4146](https://github.com/PointCloudLibrary/pcl/pull/4146)]
+
+#### libpcl_io:
+
+* Better conversion of depth data + focal length into X,Y,Z point data [[#4128](https://github.com/PointCloudLibrary/pcl/pull/4128)]
+* Add iterator include for back_iterator to support VS2019 [[#4319](https://github.com/PointCloudLibrary/pcl/pull/4319)]
+
+#### libpcl_outofcore:
+
+* Fix compile issue in OutofcoreOctreeBase::setLODFilter after switching from boost::shared_ptr to std::shared_ptr [[#4081](https://github.com/PointCloudLibrary/pcl/pull/4081)]
+
+#### libpcl_registration:
+
+* Fix bug in NDT step interval convergence criteria [[#4181](https://github.com/PointCloudLibrary/pcl/pull/4181)]
+
+#### libpcl_segmentation:
+
+* Clean FLANN includes [[#4025](https://github.com/PointCloudLibrary/pcl/pull/4025)]
+* Update angle between 2 planes to be the smallest angle between them [[#4161](https://github.com/PointCloudLibrary/pcl/pull/4161)]
+
+#### libpcl_simulation:
+
+* Don't prefix in shaders. [[#4209](https://github.com/PointCloudLibrary/pcl/pull/4209)]
+
+#### libpcl_visualization:
+
+* Fix error: misplaced deprecation attributes in `vtkVertexBufferObject{,Mapper}` [[#4079](https://github.com/PointCloudLibrary/pcl/pull/4079)]
+
+#### PCL Docs:
+
+* Fix typo in FPFH documentation, $$p_k$$ was used instead of $$p_i$$ [[#4112](https://github.com/PointCloudLibrary/pcl/pull/4112)]
+* Fix typos in PFH estimation tutorial [[#4111](https://github.com/PointCloudLibrary/pcl/pull/4111)]
+
+#### CI:
+
+* **[new feature]** Add support for CUDA in CI [[#4101](https://github.com/PointCloudLibrary/pcl/pull/4101)]
+
## = 1.11.0 (11.05.2020) =
Starting with PCL 1.11, PCL uses `std::shared_ptr` and `std::weak_ptr` instead of the
* added new templated methods for `nearestKSearch` and `radiusSearch` for situations when PointT is different than the one the KdTree object was created with (e.g., KdTree<PointT1> vs nearestKSearch (PointT2 &p...)
* added two new methods for `getApproximateIndices` where given a reference cloud of point type T1 we're trying to find the corresponding indices in a different cloud of point type T2
* refactorized a lot of code in search and octree to make it look more consistent with the rest of the API
-* fixed a bug in octree_search which was semantically doing something bad: for each `radiusSearch`/`nearestKSearch`/`approxNearestSearch` call with a PointCloudConstPtr, the octree was getting recreated. Changed the API to be consistent with the rest of PCL (including pcl_search and pcl_kdtree) where we pass in a PointCloud instead of a PointCloudConstPtr which simply calls searchMethod (cloud.points[i], ...)
+* fixed a bug in octree_search which was semantically doing something bad: for each `radiusSearch`/`nearestKSearch`/`approxNearestSearch` call with a PointCloudConstPtr, the octree was getting recreated. Changed the API to be consistent with the rest of PCL (including pcl_search and pcl_kdtree) where we pass in a PointCloud instead of a PointCloudConstPtr which simply calls searchMethod (cloud[i], ...)
* minor code optimizations
* renamed organized_neighbor.h header in pcl_search (unreleased, therefore API changes OK!) to organized.h
* disabled the auto-search tuning as it wasn't really working. must clean this up further
* added unit test for `getIntersectedVoxelCentersRecursive`
* added method `getIntersectedVoxelIndices` for getting indices of intersected voxels and updated unit test
* refactorized a lot of code in search and octree to make it look more consistent with the rest of the API
-* fixed a bug in octree_search which was semantically doing something bad: for each `radiusSearch`/`nearestKSearch`/`approxNearestSearch` call with a PointCloudConstPtr, the octree was getting recreated. Changed the API to be consistent with the rest of PCL (including pcl_search and pcl_kdtree) where we pass in a PointCloud instead of a PointCloudConstPtr which simply calls searchMethod (cloud.points[i], ...)
+* fixed a bug in octree_search which was semantically doing something bad: for each `radiusSearch`/`nearestKSearch`/`approxNearestSearch` call with a PointCloudConstPtr, the octree was getting recreated. Changed the API to be consistent with the rest of PCL (including pcl_search and pcl_kdtree) where we pass in a PointCloud instead of a PointCloudConstPtr which simply calls searchMethod (cloud[i], ...)
* minor code optimizations
* renamed organized_neighbor.h header in pcl_search (unreleased, therefore API changes OK!) to organized.h
* disabled the auto-search tuning as it wasn't really working. must clean this up further
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "build type default to RelWithDebInfo, set to Release to improve performance" FORCE)
endif()
-project(PCL VERSION 1.11.0)
+project(PCL VERSION 1.11.1)
string(TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER)
### ---[ Find universal dependencies
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /fp:precise /wd4800 /wd4521 /wd4251 /wd4275 /wd4305 /wd4355 ${SSE_FLAGS_STR}")
# Add extra code generation/link optimizations
- if(CMAKE_MSVC_CODE_LINK_OPTIMIZATION)
+ if(CMAKE_MSVC_CODE_LINK_OPTIMIZATION AND (NOT BUILD_CUDA) AND (NOT BUILD_GPU))
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
+ else()
+ message("Global optimizations /GL has been turned off, as it doesn't work with nvcc/thrust")
endif()
# /MANIFEST:NO") # please, don't disable manifest generation, otherwise crash at start for vs2008
### ---[ Find universal dependencies
# OpenMP (optional)
-find_package(OpenMP COMPONENTS C CXX)
+option(WITH_OPENMP "Build with parallelization using OpenMP" TRUE)
+if(WITH_OPENMP)
+ find_package(OpenMP COMPONENTS C CXX)
+endif()
if(OpenMP_FOUND)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
--- /dev/null
+pointclouds.org
\ No newline at end of file
endif()
set(Boost_ADDITIONAL_VERSIONS
"@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@.@Boost_SUBMINOR_VERSION@" "@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@"
- "1.71.0" "1.71" "1.70.0" "1.70"
+ "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
"1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65"
"1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
"1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55")
[![Release][release-image]][releases]
[![License][license-image]][license]
-[release-image]: https://img.shields.io/badge/release-1.11.0-green.svg?style=flat
+[release-image]: https://img.shields.io/badge/release-1.11.1-green.svg?style=flat
[releases]: https://github.com/PointCloudLibrary/pcl/releases
[license-image]: https://img.shields.io/badge/license-BSD-green.svg?style=flat
[license]: https://github.com/PointCloudLibrary/pcl/blob/master/LICENSE.txt
-:bangbang:Website:bangbang:
+Website
-------
-The original website (http://pointclouds.org) is down currently :broken_heart:, but a new one is back up https://pointcloudlibrary.github.io/ :heart: and open to [contributions](https://github.com/PointCloudLibrary/PointCloudLibrary.github.io) :hammer_and_wrench:.
+The new website is now online at https://pointclouds.org and is open to [contributions](https://github.com/PointCloudLibrary/PointCloudLibrary.github.io) :hammer_and_wrench:.
If you really need access to the old website, please use [the copy made by the internet archive](https://web.archive.org/web/20191017164724/http://www.pointclouds.org/). Please be aware that the website was hacked before and could still be hosting some malicious code.
Continuous integration
----------------------
[ci-latest-build]: https://dev.azure.com/PointCloudLibrary/pcl/_build/latest?definitionId=9&branchName=master
-[ci-ubuntu-16.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Ubuntu&jobName=Ubuntu&configuration=Ubuntu%2016.04%20GCC&label=Ubuntu%2016.04
-[ci-ubuntu-20.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Ubuntu&jobName=Ubuntu&configuration=Ubuntu%2020.04%20GCC&label=Ubuntu%2020.04
-[ci-windows-x86]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Windows&jobName=Windows%20VS2017%20Build&configuration=Windows%20VS2017%20Build%20x86&label=Windows%20VS2017%20x86
-[ci-windows-x64]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Windows&jobName=Windows%20VS2017%20Build&configuration=Windows%20VS2017%20Build%20x64&label=Windows%20VS2017%20x64
-[ci-macos-10.14]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20macOS&jobName=macOS&configuration=macOS%20Mojave%2010.14&label=macOS%20Mojave%2010.14
-[ci-macos-10.15]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20macOS&jobName=macOS&configuration=macOS%20Catalina%2010.15&label=macOS%20Catalina%2010.15
+[ci-ubuntu-16.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2016.04%20GCC&label=Ubuntu%2016.04%20GCC
+[ci-ubuntu-18.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=Ubuntu&configuration=Ubuntu%2018.04%20Clang&label=Ubuntu%2018.04%20Clang
+[ci-ubuntu-20.04]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20GCC&jobName=Ubuntu&configuration=Ubuntu%2020.04%20GCC&label=Ubuntu%2020.04%20GCC
+[ci-windows-x86]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20VS2017%20Build&configuration=Windows%20VS2017%20Build%20x86&label=Windows%20VS2017%20x86
+[ci-windows-x64]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20MSVC&jobName=Windows%20VS2017%20Build&configuration=Windows%20VS2017%20Build%20x64&label=Windows%20VS2017%20x64
+[ci-macos-10.14]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=macOS&configuration=macOS%20Mojave%2010.14&label=macOS%20Mojave%2010.14
+[ci-macos-10.15]: https://dev.azure.com/PointCloudLibrary/pcl/_apis/build/status/9?branchName=master&stageName=Build%20Clang&jobName=macOS&configuration=macOS%20Catalina%2010.15&label=macOS%20Catalina%2010.15
Build Platform | Status
------------------------ | ------------------------------------------------------------------------------------------------- |
-Ubuntu | [![Status][ci-ubuntu-16.04]][ci-latest-build] <br> [![Status][ci-ubuntu-20.04]][ci-latest-build] |
+Ubuntu | [![Status][ci-ubuntu-16.04]][ci-latest-build] <br> [![Status][ci-ubuntu-18.04]][ci-latest-build] <br> [![Status][ci-ubuntu-20.04]][ci-latest-build] |
Windows | [![Status][ci-windows-x86]][ci-latest-build] <br> [![Status][ci-windows-x64]][ci-latest-build] |
macOS | [![Status][ci-macos-10.14]][ci-latest-build] <br> [![Status][ci-macos-10.15]][ci-latest-build] |
[discord-image]: https://img.shields.io/discord/694824801977630762?color=7289da&label=community%20chat&logo=discord&style=plastic
[discord-server]: https://discord.gg/JFFMAXS
[website-status]: https://img.shields.io/website/https/pointcloudlibrary.github.io.svg?down_color=red&down_message=is%20down&up_color=green&up_message=is%20new
-[website]: https://pointcloudlibrary.github.io/
+[website]: https://pointclouds.org/
[so-question-count]: https://img.shields.io/stackexchange/stackoverflow/t/point-cloud-library.svg?logo=stackoverflow
[stackoverflow]: https://stackoverflow.com/questions/tagged/point-cloud-library
Compiling
---------
Please refer to the platform specific tutorials:
- - [Linux](http://www.pointclouds.org/documentation/tutorials/compiling_pcl_posix.php)
- - [Mac OS X](http://www.pointclouds.org/documentation/tutorials/compiling_pcl_macosx.php)
- - [Microsoft Windows](http://www.pointclouds.org/documentation/tutorials/compiling_pcl_windows.php)
+ - [Linux](https://pcl-tutorials.readthedocs.io/en/latest/compiling_pcl_posix.html)
+ - [Mac OS X](https://pcl-tutorials.readthedocs.io/en/latest/compiling_pcl_macosx.html)
+ - [Microsoft Windows](https://pcl-tutorials.readthedocs.io/en/latest/compiling_pcl_windows.html)
Documentation
-------------
-- [Tutorials](http://www.pointclouds.org/documentation/tutorials/)
-- [PCL trunk documentation](https://pointcloudlibrary.github.io/documentation/) (updated nightly)
+- [Tutorials](https://pcl-tutorials.readthedocs.io/)
+- [PCL trunk documentation](https://pointclouds.org/documentation/)
Contributing
------------
remember to tag your questions with the tag `point-cloud-library`.
* [Discord Server](https://discord.gg/JFFMAXS) for live chat with
other members of the PCL community and casual discussions
-
-<!--
- * Mailing list: The [PCL Google Group](https://groups.google.com/forum/#!forum/point-cloud-library)
--->
-
-<!-- There's an option of creating our own compatibility tracker
-
-API/ABI Compatibility Report
-------
-For details about API/ABI changes over the timeline please check PCL's page at [ABI Laboratory](https://abi-laboratory.pro/tracker/timeline/pcl/).
--->
#include <memory>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class CRHEstimation : public GlobalEstimator<PointInT, FeatureT> {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using GlobalEstimator<PointInT, FeatureT>::normals_;
+
+ std::shared_ptr<GlobalEstimator<PointInT, FeatureT>> feature_estimator_;
+ using CRHPointCloud = pcl::PointCloud<pcl::Histogram<90>>;
+ std::vector<CRHPointCloud::Ptr> crh_histograms_;
+
+public:
+ CRHEstimation() {}
+
+ void
+ setFeatureEstimator(
+ std::shared_ptr<GlobalEstimator<PointInT, FeatureT>>& feature_estimator)
+ {
+ feature_estimator_ = feature_estimator;
+ }
+
+ void
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ std::vector<pcl::PointCloud<FeatureT>,
+ Eigen::aligned_allocator<pcl::PointCloud<FeatureT>>>& signatures,
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>&
+ centroids) override
{
- template<typename PointInT, typename FeatureT>
- class CRHEstimation : public GlobalEstimator<PointInT, FeatureT>
- {
-
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
- using GlobalEstimator<PointInT, FeatureT>::normals_;
-
- std::shared_ptr<GlobalEstimator<PointInT, FeatureT>> feature_estimator_;
- using CRHPointCloud = pcl::PointCloud<pcl::Histogram<90> >;
- std::vector< CRHPointCloud::Ptr > crh_histograms_;
-
- public:
-
- CRHEstimation ()
- {
-
- }
-
- void
- setFeatureEstimator(std::shared_ptr<GlobalEstimator<PointInT, FeatureT>>& feature_estimator) {
- feature_estimator_ = feature_estimator;
- }
-
- void
- estimate (PointInTPtr & in, PointInTPtr & processed,
- std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > & signatures,
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > & centroids) override
- {
-
- if (!feature_estimator_)
- {
- PCL_ERROR("CRHEstimation needs a global estimator... please provide one\n");
- return;
- }
-
- feature_estimator_->estimate(in, processed, signatures, centroids);
-
- if(!feature_estimator_->computedNormals()) {
- normals_.reset(new pcl::PointCloud<pcl::Normal>);
- normal_estimator_->estimate (in, processed, normals_);
- } else {
- feature_estimator_->getNormals(normals_);
- }
-
- crh_histograms_.resize(signatures.size());
-
- using CRHEstimation = pcl::CRHEstimation<PointInT, pcl::Normal, pcl::Histogram<90> >;
- CRHEstimation crh;
- crh.setInputCloud(processed);
- crh.setInputNormals(normals_);
-
- for (std::size_t idx = 0; idx < signatures.size (); idx++)
- {
- Eigen::Vector4f centroid4f(centroids[idx][0],centroids[idx][1],centroids[idx][2],0);
- crh.setCentroid(centroid4f);
- crh_histograms_[idx].reset(new CRHPointCloud());
- crh.compute (*crh_histograms_[idx]);
- }
-
- }
-
- void getCRHHistograms(std::vector< CRHPointCloud::Ptr > & crh_histograms) {
- crh_histograms = crh_histograms_;
- }
-
- bool
- computedNormals () override
- {
- return true;
- }
- };
+
+ if (!feature_estimator_) {
+ PCL_ERROR("CRHEstimation needs a global estimator... please provide one\n");
+ return;
+ }
+
+ feature_estimator_->estimate(in, processed, signatures, centroids);
+
+ if (!feature_estimator_->computedNormals()) {
+ normals_.reset(new pcl::PointCloud<pcl::Normal>);
+ normal_estimator_->estimate(in, processed, normals_);
+ }
+ else {
+ feature_estimator_->getNormals(normals_);
+ }
+
+ crh_histograms_.resize(signatures.size());
+
+ using CRHEstimation = pcl::CRHEstimation<PointInT, pcl::Normal, pcl::Histogram<90>>;
+ CRHEstimation crh;
+ crh.setInputCloud(processed);
+ crh.setInputNormals(normals_);
+
+ for (std::size_t idx = 0; idx < signatures.size(); idx++) {
+ Eigen::Vector4f centroid4f(
+ centroids[idx][0], centroids[idx][1], centroids[idx][2], 0);
+ crh.setCentroid(centroid4f);
+ crh_histograms_[idx].reset(new CRHPointCloud());
+ crh.compute(*crh_histograms_[idx]);
+ }
+ }
+
+ void
+ getCRHHistograms(std::vector<CRHPointCloud::Ptr>& crh_histograms)
+ {
+ crh_histograms = crh_histograms_;
}
-}
+
+ bool
+ computedNormals() override
+ {
+ return true;
+ }
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/features/cvfh.h>
#include <pcl/surface/mls.h>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class CVFHEstimation : public GlobalEstimator<PointInT, FeatureT> {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using GlobalEstimator<PointInT, FeatureT>::normals_;
+ float eps_angle_threshold_;
+ float curvature_threshold_;
+ float cluster_tolerance_factor_;
+ bool normalize_bins_;
+ bool adaptative_MLS_;
+
+public:
+ CVFHEstimation()
{
- template<typename PointInT, typename FeatureT>
- class CVFHEstimation : public GlobalEstimator<PointInT, FeatureT>
+ eps_angle_threshold_ = 0.13f;
+ curvature_threshold_ = 0.035f;
+ normalize_bins_ = true;
+ cluster_tolerance_factor_ = 3.f;
+ }
+
+ void
+ setCVFHParams(float p1, float p2, float p3)
+ {
+ eps_angle_threshold_ = p1;
+ curvature_threshold_ = p2;
+ cluster_tolerance_factor_ = p3;
+ }
+
+ void
+ setAdaptativeMLS(bool b)
+ {
+ adaptative_MLS_ = b;
+ }
+
+ void
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ typename pcl::PointCloud<FeatureT>::CloudVectorType& signatures,
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>&
+ centroids) override
+ {
+
+ if (!normal_estimator_) {
+ PCL_ERROR("This feature needs normals... please provide a normal estimator\n");
+ return;
+ }
+
+ pcl::MovingLeastSquares<PointInT, PointInT> mls;
+ if (adaptative_MLS_) {
+ typename search::KdTree<PointInT>::Ptr tree;
+ Eigen::Vector4f centroid_cluster;
+ pcl::compute3DCentroid(*in, centroid_cluster);
+ float dist_to_sensor = centroid_cluster.norm();
+ float sigma = dist_to_sensor * 0.01f;
+ mls.setSearchMethod(tree);
+ mls.setSearchRadius(sigma);
+ mls.setUpsamplingMethod(mls.SAMPLE_LOCAL_PLANE);
+ mls.setUpsamplingRadius(0.002);
+ mls.setUpsamplingStepSize(0.001);
+ }
+
+ normals_.reset(new pcl::PointCloud<pcl::Normal>);
{
+ normal_estimator_->estimate(in, processed, normals_);
+ }
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
- using GlobalEstimator<PointInT, FeatureT>::normals_;
- float eps_angle_threshold_;
- float curvature_threshold_;
- float cluster_tolerance_factor_;
- bool normalize_bins_;
- bool adaptative_MLS_;
+ if (adaptative_MLS_) {
+ mls.setInputCloud(processed);
- public:
+ PointInTPtr filtered(new pcl::PointCloud<PointInT>);
+ mls.process(*filtered);
- CVFHEstimation ()
+ processed.reset(new pcl::PointCloud<PointInT>);
+ normals_.reset(new pcl::PointCloud<pcl::Normal>);
{
- eps_angle_threshold_ = 0.13f;
- curvature_threshold_ = 0.035f;
- normalize_bins_ = true;
- cluster_tolerance_factor_ = 3.f;
+ filtered->is_dense = false;
+ normal_estimator_->estimate(filtered, processed, normals_);
}
-
- void setCVFHParams(float p1, float p2, float p3) {
- eps_angle_threshold_ = p1;
- curvature_threshold_ = p2;
- cluster_tolerance_factor_ = p3;
+ }
+
+ using CVFHEstimation = pcl::CVFHEstimation<PointInT, pcl::Normal, FeatureT>;
+ pcl::PointCloud<FeatureT> cvfh_signatures;
+ typename pcl::search::KdTree<PointInT>::Ptr cvfh_tree(
+ new pcl::search::KdTree<PointInT>);
+
+ CVFHEstimation cvfh;
+ cvfh.setSearchMethod(cvfh_tree);
+ cvfh.setInputCloud(processed);
+ cvfh.setInputNormals(normals_);
+
+ cvfh.setEPSAngleThreshold(eps_angle_threshold_);
+ cvfh.setCurvatureThreshold(curvature_threshold_);
+ cvfh.setNormalizeBins(normalize_bins_);
+
+ float radius = normal_estimator_->normal_radius_;
+ float cluster_tolerance_radius =
+ normal_estimator_->grid_resolution_ * cluster_tolerance_factor_;
+
+ if (normal_estimator_->compute_mesh_resolution_) {
+ radius = normal_estimator_->mesh_resolution_ * normal_estimator_->factor_normals_;
+ cluster_tolerance_radius =
+ normal_estimator_->mesh_resolution_ * cluster_tolerance_factor_;
+
+ if (normal_estimator_->do_voxel_grid_) {
+ radius *= normal_estimator_->factor_voxel_grid_;
+ cluster_tolerance_radius *= normal_estimator_->factor_voxel_grid_;
}
+ }
- void setAdaptativeMLS(bool b) {
- adaptative_MLS_ = b;
- }
+ cvfh.setClusterTolerance(cluster_tolerance_radius);
+ cvfh.setRadiusNormals(radius);
+ cvfh.setMinPoints(100);
- void
- estimate (PointInTPtr & in, PointInTPtr & processed,
- typename pcl::PointCloud<FeatureT>::CloudVectorType & signatures,
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > & centroids) override
- {
+ cvfh.compute(cvfh_signatures);
- if (!normal_estimator_)
- {
- PCL_ERROR("This feature needs normals... please provide a normal estimator\n");
- return;
- }
-
- pcl::MovingLeastSquares<PointInT, PointInT> mls;
- if(adaptative_MLS_) {
- typename search::KdTree<PointInT>::Ptr tree;
- Eigen::Vector4f centroid_cluster;
- pcl::compute3DCentroid (*in, centroid_cluster);
- float dist_to_sensor = centroid_cluster.norm();
- float sigma = dist_to_sensor * 0.01f;
- mls.setSearchMethod(tree);
- mls.setSearchRadius (sigma);
- mls.setUpsamplingMethod (mls.SAMPLE_LOCAL_PLANE);
- mls.setUpsamplingRadius (0.002);
- mls.setUpsamplingStepSize (0.001);
- }
-
- normals_.reset (new pcl::PointCloud<pcl::Normal>);
- {
- normal_estimator_->estimate (in, processed, normals_);
- }
-
- if(adaptative_MLS_) {
- mls.setInputCloud(processed);
-
- PointInTPtr filtered(new pcl::PointCloud<PointInT>);
- mls.process(*filtered);
-
- processed.reset(new pcl::PointCloud<PointInT>);
- normals_.reset (new pcl::PointCloud<pcl::Normal>);
- {
- filtered->is_dense = false;
- normal_estimator_->estimate (filtered, processed, normals_);
- }
- }
-
- /*normals_.reset(new pcl::PointCloud<pcl::Normal>);
- normal_estimator_->estimate (in, processed, normals_);*/
-
- using CVFHEstimation = pcl::CVFHEstimation<PointInT, pcl::Normal, FeatureT>;
- pcl::PointCloud<FeatureT> cvfh_signatures;
- typename pcl::search::KdTree<PointInT>::Ptr cvfh_tree (new pcl::search::KdTree<PointInT>);
-
- CVFHEstimation cvfh;
- cvfh.setSearchMethod (cvfh_tree);
- cvfh.setInputCloud (processed);
- cvfh.setInputNormals (normals_);
-
- cvfh.setEPSAngleThreshold (eps_angle_threshold_);
- cvfh.setCurvatureThreshold (curvature_threshold_);
- cvfh.setNormalizeBins (normalize_bins_);
-
- float radius = normal_estimator_->normal_radius_;
- float cluster_tolerance_radius = normal_estimator_->grid_resolution_ * cluster_tolerance_factor_;
-
- if (normal_estimator_->compute_mesh_resolution_)
- {
- radius = normal_estimator_->mesh_resolution_ * normal_estimator_->factor_normals_;
- cluster_tolerance_radius = normal_estimator_->mesh_resolution_ * cluster_tolerance_factor_;
-
- if (normal_estimator_->do_voxel_grid_)
- {
- radius *= normal_estimator_->factor_voxel_grid_;
- cluster_tolerance_radius *= normal_estimator_->factor_voxel_grid_;
- }
- }
-
- cvfh.setClusterTolerance (cluster_tolerance_radius);
- cvfh.setRadiusNormals (radius);
- cvfh.setMinPoints (100);
-
- //std::cout << "Res:" << normal_estimator_->mesh_resolution_ << " Radius normals:" << radius << " Cluster tolerance:" << cluster_tolerance_radius << " " << eps_angle_threshold_ << " " << curvature_threshold_ << std::endl;
-
- cvfh.compute (cvfh_signatures);
-
- for (std::size_t i = 0; i < cvfh_signatures.points.size (); i++)
- {
- pcl::PointCloud<FeatureT> vfh_signature;
- vfh_signature.points.resize (1);
- vfh_signature.width = vfh_signature.height = 1;
- for (int d = 0; d < 308; ++d)
- vfh_signature.points[0].histogram[d] = cvfh_signatures.points[i].histogram[d];
-
- signatures.push_back (vfh_signature);
- }
-
- cvfh.getCentroidClusters (centroids);
- //std::cout << "centroids size:" << centroids.size () << std::endl;
+ for (std::size_t i = 0; i < cvfh_signatures.size(); i++) {
+ pcl::PointCloud<FeatureT> vfh_signature;
+ vfh_signature.points.resize(1);
+ vfh_signature.width = vfh_signature.height = 1;
+ for (int d = 0; d < 308; ++d)
+ vfh_signature[0].histogram[d] = cvfh_signatures[i].histogram[d];
- }
+ signatures.push_back(vfh_signature);
+ }
- bool
- computedNormals () override
- {
- return true;
- }
+ cvfh.getCentroidClusters(centroids);
+ }
- void setNormalizeBins(bool b) {
- normalize_bins_ = b;
- }
- };
+ bool
+ computedNormals() override
+ {
+ return true;
}
-}
+
+ void
+ setNormalizeBins(bool b)
+ {
+ normalize_bins_ = b;
+ }
+}; // namespace rec_3d_framework
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
#include <pcl/features/esf.h>
-namespace pcl
-{
- namespace rec_3d_framework
- {
- template<typename PointInT, typename FeatureT>
- class ESFEstimation : public GlobalEstimator<PointInT, FeatureT>
- {
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class ESFEstimation : public GlobalEstimator<PointInT, FeatureT> {
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- public:
- void
- estimate (PointInTPtr & in, PointInTPtr & processed,
- typename pcl::PointCloud<FeatureT>::CloudVectorType & signatures,
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > & centroids) override
- {
+public:
+ void
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ typename pcl::PointCloud<FeatureT>::CloudVectorType& signatures,
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>&
+ centroids) override
+ {
- using ESFEstimation = pcl::ESFEstimation<PointInT, FeatureT>;
- pcl::PointCloud<FeatureT> ESF_signature;
+ using ESFEstimation = pcl::ESFEstimation<PointInT, FeatureT>;
+ pcl::PointCloud<FeatureT> ESF_signature;
- ESFEstimation esf;
- esf.setInputCloud (in);
- esf.compute (ESF_signature);
+ ESFEstimation esf;
+ esf.setInputCloud(in);
+ esf.compute(ESF_signature);
- signatures.resize (1);
- centroids.resize (1);
+ signatures.resize(1);
+ centroids.resize(1);
- signatures[0] = ESF_signature;
+ signatures[0] = ESF_signature;
- Eigen::Vector4f centroid4f;
- pcl::compute3DCentroid (*in, centroid4f);
- centroids[0] = Eigen::Vector3f (centroid4f[0], centroid4f[1], centroid4f[2]);
+ Eigen::Vector4f centroid4f;
+ pcl::compute3DCentroid(*in, centroid4f);
+ centroids[0] = Eigen::Vector3f(centroid4f[0], centroid4f[1], centroid4f[2]);
- pcl::copyPointCloud(*in, *processed);
- //processed = in;
- }
+ pcl::copyPointCloud(*in, *processed);
+ }
- bool
- computedNormals () override
- {
- return false;
- }
- };
+ bool
+ computedNormals() override
+ {
+ return false;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <memory>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class GlobalEstimator {
+protected:
+ bool computed_normals_;
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
+
+ std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>>
+ normal_estimator_;
+
+ pcl::PointCloud<pcl::Normal>::Ptr normals_;
+
+public:
+ virtual ~GlobalEstimator() = default;
+
+ virtual void
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ std::vector<pcl::PointCloud<FeatureT>,
+ Eigen::aligned_allocator<pcl::PointCloud<FeatureT>>>& signatures,
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>&
+ centroids) = 0;
+
+ virtual bool
+ computedNormals() = 0;
+
+ void
+ setNormalEstimator(
+ std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>>& ne)
{
- template <typename PointInT, typename FeatureT>
- class GlobalEstimator {
- protected:
- bool computed_normals_;
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
-
- std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>> normal_estimator_;
-
- pcl::PointCloud<pcl::Normal>::Ptr normals_;
-
- public:
- virtual
- ~GlobalEstimator() = default;
-
- virtual void
- estimate (PointInTPtr & in, PointInTPtr & processed, std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<
- pcl::PointCloud<FeatureT> > > & signatures, std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > & centroids)=0;
-
- virtual bool computedNormals() = 0;
-
- void setNormalEstimator(std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>>& ne) {
- normal_estimator_ = ne;
- }
-
- void getNormals(pcl::PointCloud<pcl::Normal>::Ptr & normals) {
- normals = normals_;
- }
+ normal_estimator_ = ne;
+ }
- };
+ void
+ getNormals(pcl::PointCloud<pcl::Normal>::Ptr& normals)
+ {
+ normals = normals_;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/features/our_cvfh.h>
#include <pcl/surface/mls.h>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class OURCVFHEstimator : public GlobalEstimator<PointInT, FeatureT> {
+
+protected:
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using GlobalEstimator<PointInT, FeatureT>::normals_;
+ float eps_angle_threshold_;
+ float curvature_threshold_;
+ float cluster_tolerance_factor_;
+ bool normalize_bins_;
+ bool adaptative_MLS_;
+ float refine_factor_;
+
+ std::vector<bool> valid_roll_transforms_;
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>> transforms_;
+ std::vector<pcl::PointIndices> cluster_indices_;
+
+public:
+ OURCVFHEstimator()
{
- template<typename PointInT, typename FeatureT>
- class OURCVFHEstimator : public GlobalEstimator<PointInT, FeatureT>
+ eps_angle_threshold_ = 0.13f;
+ curvature_threshold_ = 0.035f;
+ normalize_bins_ = true;
+ cluster_tolerance_factor_ = 3.f;
+ adaptative_MLS_ = false;
+ }
+
+ void
+ setCVFHParams(float p1, float p2, float p3)
+ {
+ eps_angle_threshold_ = p1;
+ curvature_threshold_ = p2;
+ cluster_tolerance_factor_ = p3;
+ }
+
+ void
+ setAdaptativeMLS(bool b)
+ {
+ adaptative_MLS_ = b;
+ }
+
+ void
+ setRefineClustersParam(float p4)
+ {
+ refine_factor_ = p4;
+ }
+
+ void
+ getValidTransformsVec(std::vector<bool>& valid)
+ {
+ valid = valid_roll_transforms_;
+ }
+
+ void
+ getTransformsVec(
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>& trans)
+ {
+ trans = transforms_;
+ }
+
+ void
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ typename pcl::PointCloud<FeatureT>::CloudVectorType& signatures,
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>&
+ centroids) override
+ {
+
+ valid_roll_transforms_.clear();
+ transforms_.clear();
+
+ if (!normal_estimator_) {
+ PCL_ERROR("This feature needs normals... please provide a normal estimator\n");
+ return;
+ }
+
+ pcl::MovingLeastSquares<PointInT, PointInT> mls;
+ if (adaptative_MLS_) {
+ typename search::KdTree<PointInT>::Ptr tree;
+ Eigen::Vector4f centroid_cluster;
+ pcl::compute3DCentroid(*in, centroid_cluster);
+ float dist_to_sensor = centroid_cluster.norm();
+ float sigma = dist_to_sensor * 0.01f;
+ mls.setSearchMethod(tree);
+ mls.setSearchRadius(sigma);
+ mls.setUpsamplingMethod(mls.SAMPLE_LOCAL_PLANE);
+ mls.setUpsamplingRadius(0.002);
+ mls.setUpsamplingStepSize(0.001);
+ }
+
+ normals_.reset(new pcl::PointCloud<pcl::Normal>);
+ {
+ normal_estimator_->estimate(in, processed, normals_);
+ }
+
+ if (adaptative_MLS_) {
+ mls.setInputCloud(processed);
+
+ PointInTPtr filtered(new pcl::PointCloud<PointInT>);
+ mls.process(*filtered);
+
+ processed.reset(new pcl::PointCloud<PointInT>);
+ normals_.reset(new pcl::PointCloud<pcl::Normal>);
{
+ filtered->is_dense = false;
+ normal_estimator_->estimate(filtered, processed, normals_);
+ }
+ }
+
+ using OURCVFHEstimation =
+ pcl::OURCVFHEstimation<PointInT, pcl::Normal, pcl::VFHSignature308>;
+ pcl::PointCloud<pcl::VFHSignature308> cvfh_signatures;
+ typename pcl::search::KdTree<PointInT>::Ptr cvfh_tree(
+ new pcl::search::KdTree<PointInT>);
+
+ OURCVFHEstimation cvfh;
+ cvfh.setSearchMethod(cvfh_tree);
+ cvfh.setInputCloud(processed);
+ cvfh.setInputNormals(normals_);
+ cvfh.setEPSAngleThreshold(eps_angle_threshold_);
+ cvfh.setCurvatureThreshold(curvature_threshold_);
+ cvfh.setNormalizeBins(normalize_bins_);
+ cvfh.setRefineClusters(refine_factor_);
+
+ float radius = normal_estimator_->normal_radius_;
+ float cluster_tolerance_radius =
+ normal_estimator_->grid_resolution_ * cluster_tolerance_factor_;
+
+ if (normal_estimator_->compute_mesh_resolution_) {
+ radius = normal_estimator_->mesh_resolution_ * normal_estimator_->factor_normals_;
+ cluster_tolerance_radius =
+ normal_estimator_->mesh_resolution_ * cluster_tolerance_factor_;
+
+ if (normal_estimator_->do_voxel_grid_) {
+ radius *= normal_estimator_->factor_voxel_grid_;
+ cluster_tolerance_radius *= normal_estimator_->factor_voxel_grid_;
+ }
+ }
+
+ cvfh.setClusterTolerance(cluster_tolerance_radius);
+ cvfh.setRadiusNormals(radius);
+ cvfh.setMinPoints(100);
+ cvfh.compute(cvfh_signatures);
+
+ for (const auto& point : cvfh_signatures.points) {
+ pcl::PointCloud<FeatureT> vfh_signature;
+ vfh_signature.points.resize(1);
+ vfh_signature.width = vfh_signature.height = 1;
+ for (int d = 0; d < 308; ++d)
+ vfh_signature[0].histogram[d] = point.histogram[d];
+
+ signatures.push_back(vfh_signature);
+ }
+
+ cvfh.getCentroidClusters(centroids);
+ cvfh.getTransforms(transforms_);
+ cvfh.getValidTransformsVec(valid_roll_transforms_);
+ cvfh.getClusterIndices(cluster_indices_);
+ }
+
+ bool
+ computedNormals() override
+ {
+ return true;
+ }
- protected:
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
- using GlobalEstimator<PointInT, FeatureT>::normals_;
- float eps_angle_threshold_;
- float curvature_threshold_;
- float cluster_tolerance_factor_;
- bool normalize_bins_;
- bool adaptative_MLS_;
- float refine_factor_;
-
- std::vector<bool> valid_roll_transforms_;
- std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > transforms_;
- std::vector<pcl::PointIndices> cluster_indices_;
-
- public:
-
- OURCVFHEstimator ()
- {
- eps_angle_threshold_ = 0.13f;
- curvature_threshold_ = 0.035f;
- normalize_bins_ = true;
- cluster_tolerance_factor_ = 3.f;
- adaptative_MLS_ = false;
- }
-
- void
- setCVFHParams (float p1, float p2, float p3)
- {
- eps_angle_threshold_ = p1;
- curvature_threshold_ = p2;
- cluster_tolerance_factor_ = p3;
- }
-
- void
- setAdaptativeMLS (bool b)
- {
- adaptative_MLS_ = b;
- }
-
- void
- setRefineClustersParam (float p4)
- {
- refine_factor_ = p4;
- }
-
- void
- getValidTransformsVec (std::vector<bool> & valid)
- {
- valid = valid_roll_transforms_;
- }
-
- void
- getTransformsVec (std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > & trans)
- {
- trans = transforms_;
- }
-
- void
- estimate (PointInTPtr & in, PointInTPtr & processed, typename pcl::PointCloud<FeatureT>::CloudVectorType & signatures,
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > & centroids) override
- {
-
- valid_roll_transforms_.clear ();
- transforms_.clear ();
-
- if (!normal_estimator_)
- {
- PCL_ERROR("This feature needs normals... please provide a normal estimator\n");
- return;
- }
-
- pcl::MovingLeastSquares<PointInT, PointInT> mls;
- if (adaptative_MLS_)
- {
- typename search::KdTree<PointInT>::Ptr tree;
- Eigen::Vector4f centroid_cluster;
- pcl::compute3DCentroid (*in, centroid_cluster);
- float dist_to_sensor = centroid_cluster.norm ();
- float sigma = dist_to_sensor * 0.01f;
- mls.setSearchMethod (tree);
- mls.setSearchRadius (sigma);
- mls.setUpsamplingMethod (mls.SAMPLE_LOCAL_PLANE);
- mls.setUpsamplingRadius (0.002);
- mls.setUpsamplingStepSize (0.001);
- }
-
- normals_.reset (new pcl::PointCloud<pcl::Normal>);
- {
- normal_estimator_->estimate (in, processed, normals_);
- }
-
- if (adaptative_MLS_)
- {
- mls.setInputCloud (processed);
-
- PointInTPtr filtered (new pcl::PointCloud<PointInT>);
- mls.process (*filtered);
-
- processed.reset (new pcl::PointCloud<PointInT>);
- normals_.reset (new pcl::PointCloud<pcl::Normal>);
- {
- filtered->is_dense = false;
- normal_estimator_->estimate (filtered, processed, normals_);
- }
- }
-
- /*normals_.reset(new pcl::PointCloud<pcl::Normal>);
- normal_estimator_->estimate (in, processed, normals_);*/
-
- using OURCVFHEstimation = pcl::OURCVFHEstimation<PointInT, pcl::Normal, pcl::VFHSignature308>;
- pcl::PointCloud<pcl::VFHSignature308> cvfh_signatures;
- typename pcl::search::KdTree<PointInT>::Ptr cvfh_tree (new pcl::search::KdTree<PointInT>);
-
- OURCVFHEstimation cvfh;
- cvfh.setSearchMethod (cvfh_tree);
- cvfh.setInputCloud (processed);
- cvfh.setInputNormals (normals_);
- cvfh.setEPSAngleThreshold (eps_angle_threshold_);
- cvfh.setCurvatureThreshold (curvature_threshold_);
- cvfh.setNormalizeBins (normalize_bins_);
- cvfh.setRefineClusters(refine_factor_);
-
- float radius = normal_estimator_->normal_radius_;
- float cluster_tolerance_radius = normal_estimator_->grid_resolution_ * cluster_tolerance_factor_;
-
- if (normal_estimator_->compute_mesh_resolution_)
- {
- radius = normal_estimator_->mesh_resolution_ * normal_estimator_->factor_normals_;
- cluster_tolerance_radius = normal_estimator_->mesh_resolution_ * cluster_tolerance_factor_;
-
- if (normal_estimator_->do_voxel_grid_)
- {
- radius *= normal_estimator_->factor_voxel_grid_;
- cluster_tolerance_radius *= normal_estimator_->factor_voxel_grid_;
- }
- }
-
- cvfh.setClusterTolerance (cluster_tolerance_radius);
- cvfh.setRadiusNormals (radius);
- cvfh.setMinPoints (100);
- cvfh.compute (cvfh_signatures);
-
- //std::cout << "Res:" << normal_estimator_->mesh_resolution_ << " Radius normals:" << radius << " Cluster tolerance:" << cluster_tolerance_radius << " " << eps_angle_threshold_ << " " << curvature_threshold_ << std::endl;
-
- for (const auto &point : cvfh_signatures.points)
- {
- pcl::PointCloud<FeatureT> vfh_signature;
- vfh_signature.points.resize (1);
- vfh_signature.width = vfh_signature.height = 1;
- for (int d = 0; d < 308; ++d)
- vfh_signature.points[0].histogram[d] = point.histogram[d];
-
- signatures.push_back (vfh_signature);
- }
-
- cvfh.getCentroidClusters (centroids);
- cvfh.getTransforms(transforms_);
- cvfh.getValidTransformsVec(valid_roll_transforms_);
- cvfh.getClusterIndices(cluster_indices_);
- }
-
- bool
- computedNormals () override
- {
- return true;
- }
-
- void
- setNormalizeBins (bool b)
- {
- normalize_bins_ = b;
- }
- };
+ void
+ setNormalizeBins(bool b)
+ {
+ normalize_bins_ = b;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/features/vfh.h>
-namespace pcl
-{
- namespace rec_3d_framework
- {
- template<typename PointInT, typename FeatureT>
- class VFHEstimation : public GlobalEstimator<PointInT, FeatureT>
- {
+namespace pcl {
+namespace rec_3d_framework {
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
- using GlobalEstimator<PointInT, FeatureT>::normals_;
+template <typename PointInT, typename FeatureT>
+class VFHEstimation : public GlobalEstimator<PointInT, FeatureT> {
- public:
- void
- estimate (PointInTPtr & in, PointInTPtr & processed,
- typename pcl::PointCloud<FeatureT>::CloudVectorType & signatures,
- std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > & centroids) override
- {
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using GlobalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using GlobalEstimator<PointInT, FeatureT>::normals_;
- if (!normal_estimator_)
- {
- PCL_ERROR("This feature needs normals... please provide a normal estimator\n");
- return;
- }
+public:
+ void
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ typename pcl::PointCloud<FeatureT>::CloudVectorType& signatures,
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>&
+ centroids) override
+ {
- normals_.reset(new pcl::PointCloud<pcl::Normal>);
- normal_estimator_->estimate (in, processed, normals_);
+ if (!normal_estimator_) {
+ PCL_ERROR("This feature needs normals... please provide a normal estimator\n");
+ return;
+ }
- using VFHEstimation = pcl::VFHEstimation<PointInT, pcl::Normal, FeatureT>;
- pcl::PointCloud<FeatureT> vfh_signature;
+ normals_.reset(new pcl::PointCloud<pcl::Normal>);
+ normal_estimator_->estimate(in, processed, normals_);
- VFHEstimation vfh;
- typename pcl::search::KdTree<PointInT>::Ptr vfh_tree (new pcl::search::KdTree<PointInT>);
- vfh.setSearchMethod (vfh_tree);
- vfh.setInputCloud (processed);
- vfh.setInputNormals (normals_);
- vfh.setNormalizeBins (true);
- vfh.setNormalizeDistance (true);
- vfh.compute (vfh_signature);
+ using VFHEstimation = pcl::VFHEstimation<PointInT, pcl::Normal, FeatureT>;
+ pcl::PointCloud<FeatureT> vfh_signature;
- signatures.resize (1);
- centroids.resize (1);
+ VFHEstimation vfh;
+ typename pcl::search::KdTree<PointInT>::Ptr vfh_tree(
+ new pcl::search::KdTree<PointInT>);
+ vfh.setSearchMethod(vfh_tree);
+ vfh.setInputCloud(processed);
+ vfh.setInputNormals(normals_);
+ vfh.setNormalizeBins(true);
+ vfh.setNormalizeDistance(true);
+ vfh.compute(vfh_signature);
- signatures[0] = vfh_signature;
+ signatures.resize(1);
+ centroids.resize(1);
- Eigen::Vector4f centroid4f;
- pcl::compute3DCentroid (*in, centroid4f);
- centroids[0] = Eigen::Vector3f (centroid4f[0], centroid4f[1], centroid4f[2]);
+ signatures[0] = vfh_signature;
- }
+ Eigen::Vector4f centroid4f;
+ pcl::compute3DCentroid(*in, centroid4f);
+ centroids[0] = Eigen::Vector3f(centroid4f[0], centroid4f[1], centroid4f[2]);
+ }
- bool
- computedNormals () override
- {
- return true;
- }
- };
+ bool
+ computedNormals() override
+ {
+ return true;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/features/shot.h>
#include <pcl/io/pcd_io.h>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class ColorSHOTLocalEstimation : public LocalEstimator<PointInT, FeatureT> {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
+
+ using LocalEstimator<PointInT, FeatureT>::support_radius_;
+ using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
+
+public:
+ bool
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ PointInTPtr& keypoints,
+ FeatureTPtr& signatures)
{
- template<typename PointInT, typename FeatureT>
- class ColorSHOTLocalEstimation : public LocalEstimator<PointInT, FeatureT>
- {
-
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
-
- using LocalEstimator<PointInT, FeatureT>::support_radius_;
- using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
- using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
-
- public:
- bool
- estimate (PointInTPtr & in, PointInTPtr & processed, PointInTPtr & keypoints, FeatureTPtr & signatures)
- {
-
- if (!normal_estimator_)
- {
- PCL_ERROR("SHOTLocalEstimation :: This feature needs normals... please provide a normal estimator\n");
- return false;
- }
-
- if (keypoint_extractor_.size() == 0)
- {
- PCL_ERROR("SHOTLocalEstimation :: This feature needs a keypoint extractor... please provide one\n");
- return false;
- }
-
- pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
- normals.reset (new pcl::PointCloud<pcl::Normal>);
- {
- pcl::ScopeTime t ("Compute normals");
- normal_estimator_->estimate (in, processed, normals);
- }
-
- //compute keypoints
- computeKeypoints(processed, keypoints, normals);
-
- if (keypoints->points.size () == 0)
- {
- PCL_WARN("ColorSHOTLocalEstimation :: No keypoints were found\n");
- return false;
- }
-
- //compute signatures
- using SHOTEstimator = pcl::SHOTColorEstimation<PointInT, pcl::Normal, pcl::SHOT1344>;
- typename pcl::search::KdTree<PointInT>::Ptr tree (new pcl::search::KdTree<PointInT>);
-
- pcl::PointCloud<pcl::SHOT1344>::Ptr shots (new pcl::PointCloud<pcl::SHOT1344>);
- SHOTEstimator shot_estimate;
- shot_estimate.setSearchMethod (tree);
- shot_estimate.setInputNormals (normals);
- shot_estimate.setInputCloud (keypoints);
- shot_estimate.setSearchSurface(processed);
- shot_estimate.setRadiusSearch (support_radius_);
- shot_estimate.compute (*shots);
- signatures->resize (shots->points.size ());
- signatures->width = static_cast<int> (shots->points.size ());
- signatures->height = 1;
-
- int size_feat = sizeof(signatures->points[0].histogram) / sizeof(float);
-
- for (std::size_t k = 0; k < shots->points.size (); k++)
- for (int i = 0; i < size_feat; i++)
- signatures->points[k].histogram[i] = shots->points[k].descriptor[i];
-
- return true;
-
- }
-
- };
+
+ if (!normal_estimator_) {
+ PCL_ERROR("SHOTLocalEstimation :: This feature needs normals... "
+ "please provide a normal estimator\n");
+ return false;
+ }
+
+ if (keypoint_extractor_.size() == 0) {
+ PCL_ERROR("SHOTLocalEstimation :: This feature needs a keypoint extractor... "
+ "please provide one\n");
+ return false;
+ }
+
+ pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
+ normals.reset(new pcl::PointCloud<pcl::Normal>);
+ {
+ pcl::ScopeTime t("Compute normals");
+ normal_estimator_->estimate(in, processed, normals);
+ }
+
+ // compute keypoints
+ computeKeypoints(processed, keypoints, normals);
+
+ if (keypoints->size() == 0) {
+ PCL_WARN("ColorSHOTLocalEstimation :: No keypoints were found\n");
+ return false;
+ }
+
+ // compute signatures
+ using SHOTEstimator =
+ pcl::SHOTColorEstimation<PointInT, pcl::Normal, pcl::SHOT1344>;
+ typename pcl::search::KdTree<PointInT>::Ptr tree(new pcl::search::KdTree<PointInT>);
+
+ pcl::PointCloud<pcl::SHOT1344>::Ptr shots(new pcl::PointCloud<pcl::SHOT1344>);
+ SHOTEstimator shot_estimate;
+ shot_estimate.setSearchMethod(tree);
+ shot_estimate.setInputNormals(normals);
+ shot_estimate.setInputCloud(keypoints);
+ shot_estimate.setSearchSurface(processed);
+ shot_estimate.setRadiusSearch(support_radius_);
+ shot_estimate.compute(*shots);
+ signatures->resize(shots->size());
+ signatures->width = shots->size();
+ signatures->height = 1;
+
+ int size_feat = sizeof((*signatures)[0].histogram) / sizeof(float);
+
+ for (std::size_t k = 0; k < shots->size(); k++)
+ for (int i = 0; i < size_feat; i++)
+ (*signatures)[k].histogram[i] = (*shots)[k].descriptor[i];
+
+ return true;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/features/fpfh.h>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class FPFHLocalEstimation : public LocalEstimator<PointInT, FeatureT> {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
+ using KeypointCloud = pcl::PointCloud<pcl::PointXYZ>;
+
+ using LocalEstimator<PointInT, FeatureT>::support_radius_;
+ using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
+
+public:
+ bool
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ PointInTPtr& keypoints,
+ FeatureTPtr& signatures) override
{
- template<typename PointInT, typename FeatureT>
- class FPFHLocalEstimation : public LocalEstimator<PointInT, FeatureT>
- {
-
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
- using KeypointCloud = pcl::PointCloud<pcl::PointXYZ>;
-
- using LocalEstimator<PointInT, FeatureT>::support_radius_;
- using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
- using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
-
- public:
- bool
- estimate (PointInTPtr & in, PointInTPtr & processed, PointInTPtr & keypoints, FeatureTPtr & signatures) override
- {
-
- if (!normal_estimator_)
- {
- PCL_ERROR("FPFHLocalEstimation :: This feature needs normals... please provide a normal estimator\n");
- return false;
- }
-
- if (keypoint_extractor_.empty ())
- {
- PCL_ERROR("FPFHLocalEstimation :: This feature needs a keypoint extractor... please provide one\n");
- return false;
- }
-
- //compute normals
- pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
- normal_estimator_->estimate (in, processed, normals);
-
- this->computeKeypoints(processed, keypoints, normals);
- std::cout << " " << normals->points.size() << " " << processed->points.size() << std::endl;
-
- if (keypoints->points.empty ())
- {
- PCL_WARN("FPFHLocalEstimation :: No keypoints were found\n");
- return false;
- }
-
- assert (processed->points.size () == normals->points.size ());
-
- //compute signatures
- using FPFHEstimator = pcl::FPFHEstimation<PointInT, pcl::Normal, pcl::FPFHSignature33>;
- typename pcl::search::KdTree<PointInT>::Ptr tree (new pcl::search::KdTree<PointInT>);
-
- pcl::PointCloud<pcl::FPFHSignature33>::Ptr fpfhs (new pcl::PointCloud<pcl::FPFHSignature33>);
- FPFHEstimator fpfh_estimate;
- fpfh_estimate.setSearchMethod (tree);
- fpfh_estimate.setInputCloud (keypoints);
- fpfh_estimate.setSearchSurface(processed);
- fpfh_estimate.setInputNormals (normals);
- fpfh_estimate.setRadiusSearch (support_radius_);
- fpfh_estimate.compute (*fpfhs);
-
- signatures->resize (fpfhs->points.size ());
- signatures->width = static_cast<int> (fpfhs->points.size ());
- signatures->height = 1;
-
- int size_feat = 33;
- for (std::size_t k = 0; k < fpfhs->points.size (); k++)
- for (int i = 0; i < size_feat; i++)
- signatures->points[k].histogram[i] = fpfhs->points[k].histogram[i];
-
- return true;
-
- }
-
- };
+
+ if (!normal_estimator_) {
+ PCL_ERROR("FPFHLocalEstimation :: This feature needs normals... "
+ "please provide a normal estimator\n");
+ return false;
+ }
+
+ if (keypoint_extractor_.empty()) {
+ PCL_ERROR("FPFHLocalEstimation :: This feature needs a keypoint extractor... "
+ "please provide one\n");
+ return false;
+ }
+
+ // compute normals
+ pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
+ normal_estimator_->estimate(in, processed, normals);
+
+ this->computeKeypoints(processed, keypoints, normals);
+ std::cout << " " << normals->size() << " " << processed->size() << std::endl;
+
+ if (keypoints->points.empty()) {
+ PCL_WARN("FPFHLocalEstimation :: No keypoints were found\n");
+ return false;
+ }
+
+ assert(processed->size() == normals->size());
+
+ // compute signatures
+ using FPFHEstimator =
+ pcl::FPFHEstimation<PointInT, pcl::Normal, pcl::FPFHSignature33>;
+ typename pcl::search::KdTree<PointInT>::Ptr tree(new pcl::search::KdTree<PointInT>);
+
+ pcl::PointCloud<pcl::FPFHSignature33>::Ptr fpfhs(
+ new pcl::PointCloud<pcl::FPFHSignature33>);
+ FPFHEstimator fpfh_estimate;
+ fpfh_estimate.setSearchMethod(tree);
+ fpfh_estimate.setInputCloud(keypoints);
+ fpfh_estimate.setSearchSurface(processed);
+ fpfh_estimate.setInputNormals(normals);
+ fpfh_estimate.setRadiusSearch(support_radius_);
+ fpfh_estimate.compute(*fpfhs);
+
+ signatures->resize(fpfhs->size());
+ signatures->width = fpfhs->size();
+ signatures->height = 1;
+
+ int size_feat = 33;
+ for (std::size_t k = 0; k < fpfhs->size(); k++)
+ for (int i = 0; i < size_feat; i++)
+ (*signatures)[k].histogram[i] = (*fpfhs)[k].histogram[i];
+
+ return true;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/features/fpfh_omp.h>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class FPFHLocalEstimationOMP : public LocalEstimator<PointInT, FeatureT> {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
+ using KeypointCloud = pcl::PointCloud<pcl::PointXYZ>;
+
+ using LocalEstimator<PointInT, FeatureT>::support_radius_;
+ using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
+
+public:
+ bool
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ PointInTPtr& keypoints,
+ FeatureTPtr& signatures)
{
- template<typename PointInT, typename FeatureT>
- class FPFHLocalEstimationOMP : public LocalEstimator<PointInT, FeatureT>
- {
-
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
- using KeypointCloud = pcl::PointCloud<pcl::PointXYZ>;
-
- using LocalEstimator<PointInT, FeatureT>::support_radius_;
- using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
- using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
-
- public:
- bool
- estimate (PointInTPtr & in, PointInTPtr & processed, PointInTPtr & keypoints, FeatureTPtr & signatures)
- {
-
- if (!normal_estimator_)
- {
- PCL_ERROR("FPFHLocalEstimation :: This feature needs normals... please provide a normal estimator\n");
- return false;
- }
-
- if (keypoint_extractor_.size() == 0)
- {
- PCL_ERROR("FPFHLocalEstimation :: This feature needs a keypoint extractor... please provide one\n");
- return false;
- }
-
- //compute normals
- pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
- normal_estimator_->estimate (in, processed, normals);
-
- //compute keypoints
- computeKeypoints(processed, keypoints, normals);
- std::cout << " " << normals->points.size() << " " << processed->points.size() << std::endl;
-
- if (keypoints->points.size () == 0)
- {
- PCL_WARN("FPFHLocalEstimation :: No keypoints were found\n");
- return false;
- }
-
- assert (processed->points.size () == normals->points.size ());
-
- //compute signatures
- using FPFHEstimator = pcl::FPFHEstimationOMP<PointInT, pcl::Normal, pcl::FPFHSignature33>;
- typename pcl::search::KdTree<PointInT>::Ptr tree (new pcl::search::KdTree<PointInT>);
-
- pcl::PointCloud<pcl::FPFHSignature33>::Ptr fpfhs (new pcl::PointCloud<pcl::FPFHSignature33>);
- FPFHEstimator fpfh_estimate;
- fpfh_estimate.setNumberOfThreads(8);
- fpfh_estimate.setSearchMethod (tree);
- fpfh_estimate.setInputCloud (keypoints);
- fpfh_estimate.setSearchSurface(processed);
- fpfh_estimate.setInputNormals (normals);
- fpfh_estimate.setRadiusSearch (support_radius_);
- fpfh_estimate.compute (*fpfhs);
-
- signatures->resize (fpfhs->points.size ());
- signatures->width = static_cast<int> (fpfhs->points.size ());
- signatures->height = 1;
-
- int size_feat = 33;
- for (std::size_t k = 0; k < fpfhs->points.size (); k++)
- for (int i = 0; i < size_feat; i++)
- signatures->points[k].histogram[i] = fpfhs->points[k].histogram[i];
-
- return true;
-
- }
-
- };
+
+ if (!normal_estimator_) {
+ PCL_ERROR("FPFHLocalEstimation :: This feature needs normals... "
+ "please provide a normal estimator\n");
+ return false;
+ }
+
+ if (keypoint_extractor_.size() == 0) {
+ PCL_ERROR("FPFHLocalEstimation :: This feature needs a keypoint extractor... "
+ "please provide one\n");
+ return false;
+ }
+
+ // compute normals
+ pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
+ normal_estimator_->estimate(in, processed, normals);
+
+ // compute keypoints
+ computeKeypoints(processed, keypoints, normals);
+ std::cout << " " << normals->size() << " " << processed->size() << std::endl;
+
+ if (keypoints->size() == 0) {
+ PCL_WARN("FPFHLocalEstimation :: No keypoints were found\n");
+ return false;
+ }
+
+ assert(processed->size() == normals->size());
+
+ // compute signatures
+ using FPFHEstimator =
+ pcl::FPFHEstimationOMP<PointInT, pcl::Normal, pcl::FPFHSignature33>;
+ typename pcl::search::KdTree<PointInT>::Ptr tree(new pcl::search::KdTree<PointInT>);
+
+ pcl::PointCloud<pcl::FPFHSignature33>::Ptr fpfhs(
+ new pcl::PointCloud<pcl::FPFHSignature33>);
+ FPFHEstimator fpfh_estimate;
+ fpfh_estimate.setNumberOfThreads(8);
+ fpfh_estimate.setSearchMethod(tree);
+ fpfh_estimate.setInputCloud(keypoints);
+ fpfh_estimate.setSearchSurface(processed);
+ fpfh_estimate.setInputNormals(normals);
+ fpfh_estimate.setRadiusSearch(support_radius_);
+ fpfh_estimate.compute(*fpfhs);
+
+ signatures->resize(fpfhs->size());
+ signatures->width = fpfhs->size();
+ signatures->height = 1;
+
+ int size_feat = 33;
+ for (std::size_t k = 0; k < fpfhs->size(); k++)
+ for (int i = 0; i < size_feat; i++)
+ (*signatures)[k].histogram[i] = (*fpfhs)[k].histogram[i];
+
+ return true;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h>
#include <pcl/filters/uniform_sampling.h>
-#include <pcl/surface/mls.h>
#include <pcl/keypoints/harris_3d.h>
#include <pcl/keypoints/sift_keypoint.h>
#include <pcl/keypoints/susan.h>
+#include <pcl/surface/mls.h>
#include <memory>
-namespace pcl
-{
- template<>
- struct SIFTKeypointFieldSelector<PointXYZ>
- {
- inline float
- operator () (const PointXYZ & p) const
- {
- return p.z;
- }
- };
-}
+namespace pcl {
-namespace pcl
-{
- namespace rec_3d_framework
+template <>
+struct SIFTKeypointFieldSelector<PointXYZ> {
+ inline float
+ operator()(const PointXYZ& p) const
{
+ return p.z;
+ }
+};
- template<typename PointInT>
- class KeypointExtractor
- {
- protected:
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using PointOutTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- typename pcl::PointCloud<PointInT>::Ptr input_;
- float radius_;
-
- public:
- virtual
- ~KeypointExtractor() = default;
-
- void
- setInputCloud (PointInTPtr & input)
- {
- input_ = input;
- }
+} // namespace pcl
- void
- setSupportRadius (float f)
- {
- radius_ = f;
- }
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT>
+class KeypointExtractor {
+protected:
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using PointOutTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ typename pcl::PointCloud<PointInT>::Ptr input_;
+ float radius_;
+
+public:
+ virtual ~KeypointExtractor() = default;
+
+ void
+ setInputCloud(PointInTPtr& input)
+ {
+ input_ = input;
+ }
+
+ void
+ setSupportRadius(float f)
+ {
+ radius_ = f;
+ }
- virtual void
- compute (PointOutTPtr & keypoints) = 0;
+ virtual void
+ compute(PointOutTPtr& keypoints) = 0;
- virtual void
- setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & /*normals*/)
- {
+ virtual void
+ setNormals(const pcl::PointCloud<pcl::Normal>::Ptr& /*normals*/)
+ {}
+ virtual bool
+ needNormals()
+ {
+ return false;
+ }
+};
+
+template <typename PointInT>
+class UniformSamplingExtractor : public KeypointExtractor<PointInT> {
+private:
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ bool filter_planar_;
+ using KeypointExtractor<PointInT>::input_;
+ using KeypointExtractor<PointInT>::radius_;
+ float sampling_density_;
+ std::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_;
+ std::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_;
+
+ void
+ filterPlanar(PointInTPtr& input, PointInTPtr& keypoints_cloud)
+ {
+ pcl::PointCloud<int> filtered_keypoints;
+ // create a search object
+ typename pcl::search::Search<PointInT>::Ptr tree;
+ if (input->isOrganized())
+ tree.reset(new pcl::search::OrganizedNeighbor<PointInT>());
+ else
+ tree.reset(new pcl::search::KdTree<PointInT>(false));
+ tree->setInputCloud(input);
+
+ neighborhood_indices_.reset(new std::vector<std::vector<int>>);
+ neighborhood_indices_->resize(keypoints_cloud->size());
+ neighborhood_dist_.reset(new std::vector<std::vector<float>>);
+ neighborhood_dist_->resize(keypoints_cloud->size());
+
+ filtered_keypoints.points.resize(keypoints_cloud->size());
+ int good = 0;
+
+ for (std::size_t i = 0; i < keypoints_cloud->size(); i++) {
+
+ if (tree->radiusSearch((*keypoints_cloud)[i],
+ radius_,
+ (*neighborhood_indices_)[good],
+ (*neighborhood_dist_)[good])) {
+
+ EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix;
+ Eigen::Vector4f xyz_centroid;
+ EIGEN_ALIGN16 Eigen::Vector3f eigenValues;
+ EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors;
+
+ // compute planarity of the region
+ computeMeanAndCovarianceMatrix(
+ *input, (*neighborhood_indices_)[good], covariance_matrix, xyz_centroid);
+ pcl::eigen33(covariance_matrix, eigenVectors, eigenValues);
+
+ float eigsum = eigenValues.sum();
+ if (!std::isfinite(eigsum)) {
+ PCL_ERROR("Eigen sum is not finite\n");
}
- virtual bool
- needNormals ()
- {
- return false;
+ if ((std::abs(eigenValues[0] - eigenValues[1]) < 1.5e-4) ||
+ (eigsum != 0 && std::abs(eigenValues[0] / eigsum) > 1.e-2)) {
+ // region is not planar, add to filtered keypoint
+ (*keypoints_cloud)[good] = (*keypoints_cloud)[i];
+ good++;
}
+ }
+ }
- };
-
- template<typename PointInT>
- class UniformSamplingExtractor : public KeypointExtractor<PointInT>
- {
- private:
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- bool filter_planar_;
- using KeypointExtractor<PointInT>::input_;
- using KeypointExtractor<PointInT>::radius_;
- float sampling_density_;
- std::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_;
- std::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_;
-
- void
- filterPlanar (PointInTPtr & input, PointInTPtr & keypoints_cloud)
- {
- pcl::PointCloud<int> filtered_keypoints;
- //create a search object
- typename pcl::search::Search<PointInT>::Ptr tree;
- if (input->isOrganized ())
- tree.reset (new pcl::search::OrganizedNeighbor<PointInT> ());
- else
- tree.reset (new pcl::search::KdTree<PointInT> (false));
- tree->setInputCloud (input);
-
- neighborhood_indices_.reset (new std::vector<std::vector<int> >);
- neighborhood_indices_->resize (keypoints_cloud->points.size ());
- neighborhood_dist_.reset (new std::vector<std::vector<float> >);
- neighborhood_dist_->resize (keypoints_cloud->points.size ());
-
- filtered_keypoints.points.resize (keypoints_cloud->points.size ());
- int good = 0;
-
- for (std::size_t i = 0; i < keypoints_cloud->points.size (); i++)
- {
-
- if (tree->radiusSearch (keypoints_cloud->points[i], radius_, (*neighborhood_indices_)[good], (*neighborhood_dist_)[good]))
- {
-
- EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix;
- Eigen::Vector4f xyz_centroid;
- EIGEN_ALIGN16 Eigen::Vector3f eigenValues;
- EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors;
-
- //compute planarity of the region
- computeMeanAndCovarianceMatrix (*input, (*neighborhood_indices_)[good], covariance_matrix, xyz_centroid);
- pcl::eigen33 (covariance_matrix, eigenVectors, eigenValues);
-
- float eigsum = eigenValues.sum ();
- if (!std::isfinite(eigsum))
- {
- PCL_ERROR("Eigen sum is not finite\n");
- }
-
- if ((std::abs (eigenValues[0] - eigenValues[1]) < 1.5e-4) || (eigsum != 0 && std::abs (eigenValues[0] / eigsum) > 1.e-2))
- {
- //region is not planar, add to filtered keypoint
- keypoints_cloud->points[good] = keypoints_cloud->points[i];
- good++;
- }
- }
- }
-
- neighborhood_indices_->resize (good);
- neighborhood_dist_->resize (good);
- keypoints_cloud->points.resize (good);
-
- neighborhood_indices_->clear ();
- neighborhood_dist_->clear ();
+ neighborhood_indices_->resize(good);
+ neighborhood_dist_->resize(good);
+ keypoints_cloud->points.resize(good);
- }
+ neighborhood_indices_->clear();
+ neighborhood_dist_->clear();
+ }
- public:
+public:
+ void
+ setFilterPlanar(bool b)
+ {
+ filter_planar_ = b;
+ }
- void
- setFilterPlanar (bool b)
- {
- filter_planar_ = b;
- }
+ void
+ setSamplingDensity(float f)
+ {
+ sampling_density_ = f;
+ }
- void
- setSamplingDensity (float f)
- {
- sampling_density_ = f;
- }
+ void
+ compute(PointInTPtr& keypoints) override
+ {
+ keypoints.reset(new pcl::PointCloud<PointInT>);
- void
- compute (PointInTPtr & keypoints) override
- {
- keypoints.reset (new pcl::PointCloud<PointInT>);
+ pcl::UniformSampling<PointInT> keypoint_extractor;
+ keypoint_extractor.setRadiusSearch(sampling_density_);
+ keypoint_extractor.setInputCloud(input_);
- pcl::UniformSampling<PointInT> keypoint_extractor;
- keypoint_extractor.setRadiusSearch (sampling_density_);
- keypoint_extractor.setInputCloud (input_);
+ keypoint_extractor.filter(*keypoints);
- keypoint_extractor.filter (*keypoints);
+ if (filter_planar_)
+ filterPlanar(input_, keypoints);
+ }
+};
- if (filter_planar_)
- filterPlanar (input_, keypoints);
- }
- };
-
- template<typename PointInT>
- class SIFTKeypointExtractor : public KeypointExtractor<PointInT>
- {
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using KeypointExtractor<PointInT>::input_;
- using KeypointExtractor<PointInT>::radius_;
-
- public:
- void
- compute (PointInTPtr & keypoints)
- {
- keypoints.reset (new pcl::PointCloud<PointInT>);
-
- typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints (new pcl::PointCloud<pcl::PointXYZI>);
- pcl::SIFTKeypoint<PointInT, pcl::PointXYZI> sift3D;
- sift3D.setScales (0.003f, 3, 2);
- sift3D.setMinimumContrast (0.1f);
- sift3D.setInputCloud (input_);
- sift3D.setSearchSurface (input_);
- sift3D.compute (*intensity_keypoints);
- pcl::copyPointCloud (*intensity_keypoints, *keypoints);
- }
- };
-
- template<typename PointInT>
- class SIFTSurfaceKeypointExtractor : public KeypointExtractor<PointInT>
- {
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- pcl::PointCloud<pcl::Normal>::Ptr normals_;
- using KeypointExtractor<PointInT>::input_;
- using KeypointExtractor<PointInT>::radius_;
-
- bool
- needNormals ()
- {
- return true;
- }
+template <typename PointInT>
+class SIFTKeypointExtractor : public KeypointExtractor<PointInT> {
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using KeypointExtractor<PointInT>::input_;
+ using KeypointExtractor<PointInT>::radius_;
- void
- setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & normals)
- {
- normals_ = normals;
- }
+public:
+ void
+ compute(PointInTPtr& keypoints)
+ {
+ keypoints.reset(new pcl::PointCloud<PointInT>);
+
+ typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints(
+ new pcl::PointCloud<pcl::PointXYZI>);
+ pcl::SIFTKeypoint<PointInT, pcl::PointXYZI> sift3D;
+ sift3D.setScales(0.003f, 3, 2);
+ sift3D.setMinimumContrast(0.1f);
+ sift3D.setInputCloud(input_);
+ sift3D.setSearchSurface(input_);
+ sift3D.compute(*intensity_keypoints);
+ pcl::copyPointCloud(*intensity_keypoints, *keypoints);
+ }
+};
- public:
- void
- compute (PointInTPtr & keypoints)
- {
- if (normals_ == 0 || (normals_->points.size () != input_->points.size ()))
- PCL_WARN("SIFTSurfaceKeypointExtractor -- Normals are not valid\n");
-
- keypoints.reset (new pcl::PointCloud<PointInT>);
-
- typename pcl::PointCloud<pcl::PointNormal>::Ptr input_cloud (new pcl::PointCloud<pcl::PointNormal>);
- input_cloud->width = input_->width;
- input_cloud->height = input_->height;
- input_cloud->points.resize (input_->width * input_->height);
- for (std::size_t i = 0; i < input_->points.size (); i++)
- {
- input_cloud->points[i].getVector3fMap () = input_->points[i].getVector3fMap ();
- input_cloud->points[i].getNormalVector3fMap () = normals_->points[i].getNormalVector3fMap ();
- }
-
- typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints (new pcl::PointCloud<pcl::PointXYZI>);
- pcl::SIFTKeypoint<pcl::PointNormal, pcl::PointXYZI> sift3D;
- sift3D.setScales (0.003f, 3, 2);
- sift3D.setMinimumContrast (0.0);
- sift3D.setInputCloud (input_cloud);
- sift3D.setSearchSurface (input_cloud);
- sift3D.compute (*intensity_keypoints);
- pcl::copyPointCloud (*intensity_keypoints, *keypoints);
- }
- };
-
- template<typename PointInT, typename NormalT = pcl::Normal>
- class HarrisKeypointExtractor : public KeypointExtractor<PointInT>
- {
-
- pcl::PointCloud<pcl::Normal>::Ptr normals_;
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using KeypointExtractor<PointInT>::input_;
- using KeypointExtractor<PointInT>::radius_;
- typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m_;
- float non_max_radius_;
- float threshold_;
-
- public:
-
- HarrisKeypointExtractor ()
- {
- m_ = pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::HARRIS;
- non_max_radius_ = 0.01f;
- threshold_ = 0.f;
- }
+template <typename PointInT>
+class SIFTSurfaceKeypointExtractor : public KeypointExtractor<PointInT> {
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ pcl::PointCloud<pcl::Normal>::Ptr normals_;
+ using KeypointExtractor<PointInT>::input_;
+ using KeypointExtractor<PointInT>::radius_;
- bool
- needNormals ()
- {
- return true;
- }
+ bool
+ needNormals()
+ {
+ return true;
+ }
- void
- setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & normals)
- {
- normals_ = normals;
- }
+ void
+ setNormals(const pcl::PointCloud<pcl::Normal>::Ptr& normals)
+ {
+ normals_ = normals;
+ }
- void
- setThreshold(float t) {
- threshold_ = t;
- }
+public:
+ void
+ compute(PointInTPtr& keypoints)
+ {
+ if (normals_ == 0 || (normals_->size() != input_->size()))
+ PCL_WARN("SIFTSurfaceKeypointExtractor -- Normals are not valid\n");
+
+ keypoints.reset(new pcl::PointCloud<PointInT>);
+
+ typename pcl::PointCloud<pcl::PointNormal>::Ptr input_cloud(
+ new pcl::PointCloud<pcl::PointNormal>);
+ input_cloud->width = input_->width;
+ input_cloud->height = input_->height;
+ input_cloud->points.resize(input_->width * input_->height);
+ for (std::size_t i = 0; i < input_->points.size(); i++) {
+ (*input_cloud)[i].getVector3fMap() = (*input_)[i].getVector3fMap();
+ (*input_cloud)[i].getNormalVector3fMap() = (*normals_)[i].getNormalVector3fMap();
+ }
+
+ typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints(
+ new pcl::PointCloud<pcl::PointXYZI>);
+ pcl::SIFTKeypoint<pcl::PointNormal, pcl::PointXYZI> sift3D;
+ sift3D.setScales(0.003f, 3, 2);
+ sift3D.setMinimumContrast(0.0);
+ sift3D.setInputCloud(input_cloud);
+ sift3D.setSearchSurface(input_cloud);
+ sift3D.compute(*intensity_keypoints);
+ pcl::copyPointCloud(*intensity_keypoints, *keypoints);
+ }
+};
- void
- setResponseMethod (typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m)
- {
- m_ = m;
- }
+template <typename PointInT, typename NormalT = pcl::Normal>
+class HarrisKeypointExtractor : public KeypointExtractor<PointInT> {
- void
- setNonMaximaRadius(float r) {
- non_max_radius_ = r;
- }
+ pcl::PointCloud<pcl::Normal>::Ptr normals_;
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using KeypointExtractor<PointInT>::input_;
+ using KeypointExtractor<PointInT>::radius_;
+ typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m_;
+ float non_max_radius_;
+ float threshold_;
+
+public:
+ HarrisKeypointExtractor()
+ {
+ m_ = pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::HARRIS;
+ non_max_radius_ = 0.01f;
+ threshold_ = 0.f;
+ }
- void
- compute (PointInTPtr & keypoints)
- {
- keypoints.reset (new pcl::PointCloud<PointInT>);
+ bool
+ needNormals()
+ {
+ return true;
+ }
- if (normals_ == 0 || (normals_->points.size () != input_->points.size ()))
- PCL_WARN("HarrisKeypointExtractor -- Normals are not valid\n");
+ void
+ setNormals(const pcl::PointCloud<pcl::Normal>::Ptr& normals)
+ {
+ normals_ = normals;
+ }
- typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints (new pcl::PointCloud<pcl::PointXYZI>);
+ void
+ setThreshold(float t)
+ {
+ threshold_ = t;
+ }
- pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI> harris;
- harris.setNonMaxSupression (true);
- harris.setRefine (false);
- harris.setThreshold (threshold_);
- harris.setInputCloud (input_);
- harris.setNormals (normals_);
- harris.setRadius (non_max_radius_);
- harris.setRadiusSearch (non_max_radius_);
- harris.setMethod (m_);
- harris.compute (*intensity_keypoints);
+ void
+ setResponseMethod(
+ typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m)
+ {
+ m_ = m;
+ }
- pcl::copyPointCloud (*intensity_keypoints, *keypoints);
- }
- };
+ void
+ setNonMaximaRadius(float r)
+ {
+ non_max_radius_ = r;
+ }
- template<typename PointInT, typename NormalT = pcl::Normal>
- class SUSANKeypointExtractor : public KeypointExtractor<PointInT>
- {
+ void
+ compute(PointInTPtr& keypoints)
+ {
+ keypoints.reset(new pcl::PointCloud<PointInT>);
+
+ if (normals_ == 0 || (normals_->size() != input_->size()))
+ PCL_WARN("HarrisKeypointExtractor -- Normals are not valid\n");
+
+ typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints(
+ new pcl::PointCloud<pcl::PointXYZI>);
+
+ pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI> harris;
+ harris.setNonMaxSupression(true);
+ harris.setRefine(false);
+ harris.setThreshold(threshold_);
+ harris.setInputCloud(input_);
+ harris.setNormals(normals_);
+ harris.setRadius(non_max_radius_);
+ harris.setRadiusSearch(non_max_radius_);
+ harris.setMethod(m_);
+ harris.compute(*intensity_keypoints);
+
+ pcl::copyPointCloud(*intensity_keypoints, *keypoints);
+ }
+};
- pcl::PointCloud<pcl::Normal>::Ptr normals_;
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using KeypointExtractor<PointInT>::input_;
- using KeypointExtractor<PointInT>::radius_;
+template <typename PointInT, typename NormalT = pcl::Normal>
+class SUSANKeypointExtractor : public KeypointExtractor<PointInT> {
- public:
+ pcl::PointCloud<pcl::Normal>::Ptr normals_;
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using KeypointExtractor<PointInT>::input_;
+ using KeypointExtractor<PointInT>::radius_;
- SUSANKeypointExtractor ()
- {
+public:
+ SUSANKeypointExtractor() {}
- }
+ bool
+ needNormals()
+ {
+ return true;
+ }
- bool
- needNormals ()
- {
- return true;
- }
+ void
+ setNormals(const pcl::PointCloud<pcl::Normal>::Ptr& normals)
+ {
+ normals_ = normals;
+ }
- void
- setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & normals)
- {
- normals_ = normals;
- }
+ void
+ compute(PointInTPtr& keypoints)
+ {
+ keypoints.reset(new pcl::PointCloud<PointInT>);
+
+ if (normals_ == 0 || (normals_->size() != input_->size()))
+ PCL_WARN("SUSANKeypointExtractor -- Normals are not valid\n");
- void
- compute (PointInTPtr & keypoints)
- {
- keypoints.reset (new pcl::PointCloud<PointInT>);
+ typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints(
+ new pcl::PointCloud<pcl::PointXYZI>);
- if (normals_ == 0 || (normals_->points.size () != input_->points.size ()))
- PCL_WARN("SUSANKeypointExtractor -- Normals are not valid\n");
+ pcl::SUSANKeypoint<PointInT, pcl::PointXYZI> susan;
+ susan.setNonMaxSupression(true);
+ susan.setInputCloud(input_);
+ susan.setNormals(normals_);
+ susan.setRadius(0.01f);
+ susan.setRadiusSearch(0.01f);
+ susan.compute(*intensity_keypoints);
- typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints (new pcl::PointCloud<pcl::PointXYZI>);
+ pcl::copyPointCloud(*intensity_keypoints, *keypoints);
+ }
+};
- pcl::SUSANKeypoint<PointInT, pcl::PointXYZI> susan;
- susan.setNonMaxSupression (true);
- susan.setInputCloud (input_);
- susan.setNormals (normals_);
- susan.setRadius (0.01f);
- susan.setRadiusSearch (0.01f);
- susan.compute (*intensity_keypoints);
+template <typename PointInT, typename FeatureT>
+class LocalEstimator {
+protected:
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
- pcl::copyPointCloud (*intensity_keypoints, *keypoints);
- }
- };
-
- template<typename PointInT, typename FeatureT>
- class LocalEstimator
- {
- protected:
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
-
- std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>> normal_estimator_;
- std::vector<std::shared_ptr<KeypointExtractor<PointInT>>> keypoint_extractor_; //this should be a vector
- float support_radius_;
- //bool filter_planar_;
-
- bool adaptative_MLS_;
-
- std::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_;
- std::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_;
-
- void
- computeKeypoints (PointInTPtr & cloud, PointInTPtr & keypoints, pcl::PointCloud<pcl::Normal>::Ptr & normals)
- {
- keypoints.reset (new pcl::PointCloud<PointInT>);
- for (std::size_t i = 0; i < keypoint_extractor_.size (); i++)
- {
- keypoint_extractor_[i]->setInputCloud (cloud);
- if (keypoint_extractor_[i]->needNormals ())
- keypoint_extractor_[i]->setNormals (normals);
-
- keypoint_extractor_[i]->setSupportRadius (support_radius_);
-
- PointInTPtr detected_keypoints;
- keypoint_extractor_[i]->compute (detected_keypoints);
- *keypoints += *detected_keypoints;
- }
- }
+ std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>>
+ normal_estimator_;
+ std::vector<std::shared_ptr<KeypointExtractor<PointInT>>> keypoint_extractor_;
+ float support_radius_;
- public:
+ bool adaptative_MLS_;
- LocalEstimator ()
- {
- adaptative_MLS_ = false;
- keypoint_extractor_.clear ();
- }
+ std::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_;
+ std::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_;
- virtual
- ~LocalEstimator() = default;
+ void
+ computeKeypoints(PointInTPtr& cloud,
+ PointInTPtr& keypoints,
+ pcl::PointCloud<pcl::Normal>::Ptr& normals)
+ {
+ keypoints.reset(new pcl::PointCloud<PointInT>);
+ for (std::size_t i = 0; i < keypoint_extractor_.size(); i++) {
+ keypoint_extractor_[i]->setInputCloud(cloud);
+ if (keypoint_extractor_[i]->needNormals())
+ keypoint_extractor_[i]->setNormals(normals);
+
+ keypoint_extractor_[i]->setSupportRadius(support_radius_);
+
+ PointInTPtr detected_keypoints;
+ keypoint_extractor_[i]->compute(detected_keypoints);
+ *keypoints += *detected_keypoints;
+ }
+ }
- void
- setAdaptativeMLS (bool b)
- {
- adaptative_MLS_ = b;
- }
+public:
+ LocalEstimator()
+ {
+ adaptative_MLS_ = false;
+ keypoint_extractor_.clear();
+ }
- virtual bool
- estimate (PointInTPtr & in, PointInTPtr & processed, PointInTPtr & keypoints, FeatureTPtr & signatures)=0;
+ virtual ~LocalEstimator() = default;
- void
- setNormalEstimator (std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>> & ne)
- {
- normal_estimator_ = ne;
- }
+ void
+ setAdaptativeMLS(bool b)
+ {
+ adaptative_MLS_ = b;
+ }
- /**
- * \brief Right now only uniformSampling keypoint extractor is allowed
- */
- void
- addKeypointExtractor (std::shared_ptr<KeypointExtractor<PointInT>>& ke)
- {
- keypoint_extractor_.push_back (ke);
- }
+ virtual bool
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ PointInTPtr& keypoints,
+ FeatureTPtr& signatures) = 0;
- void
- setKeypointExtractors (std::vector<std::shared_ptr<KeypointExtractor<PointInT>>>& ke)
- {
- keypoint_extractor_ = ke;
- }
+ void
+ setNormalEstimator(
+ std::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>>& ne)
+ {
+ normal_estimator_ = ne;
+ }
- void
- setSupportRadius (float r)
- {
- support_radius_ = r;
- }
+ /**
+ * \brief Right now only uniformSampling keypoint extractor is allowed
+ */
+ void
+ addKeypointExtractor(std::shared_ptr<KeypointExtractor<PointInT>>& ke)
+ {
+ keypoint_extractor_.push_back(ke);
+ }
+
+ void
+ setKeypointExtractors(std::vector<std::shared_ptr<KeypointExtractor<PointInT>>>& ke)
+ {
+ keypoint_extractor_ = ke;
+ }
- };
+ void
+ setSupportRadius(float r)
+ {
+ support_radius_ = r;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/features/shot.h>
#include <pcl/io/pcd_io.h>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class SHOTLocalEstimation : public LocalEstimator<PointInT, FeatureT> {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
+
+ using LocalEstimator<PointInT, FeatureT>::support_radius_;
+ using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
+ using LocalEstimator<PointInT, FeatureT>::adaptative_MLS_;
+
+public:
+ bool
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ PointInTPtr& keypoints,
+ FeatureTPtr& signatures) override
{
- template<typename PointInT, typename FeatureT>
- class SHOTLocalEstimation : public LocalEstimator<PointInT, FeatureT>
- {
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
-
- using LocalEstimator<PointInT, FeatureT>::support_radius_;
- using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
- using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
- using LocalEstimator<PointInT, FeatureT>::adaptative_MLS_;
-
- public:
- bool
- estimate (PointInTPtr & in, PointInTPtr & processed, PointInTPtr & keypoints, FeatureTPtr & signatures) override
- {
-
- if (!normal_estimator_)
- {
- PCL_ERROR("SHOTLocalEstimation :: This feature needs normals... please provide a normal estimator\n");
- return false;
- }
-
- if (keypoint_extractor_.empty ())
- {
- PCL_ERROR("SHOTLocalEstimation :: This feature needs a keypoint extractor... please provide one\n");
- return false;
- }
-
- pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
- pcl::MovingLeastSquares<PointInT, PointInT> mls;
- if(adaptative_MLS_) {
- typename search::KdTree<PointInT>::Ptr tree;
- Eigen::Vector4f centroid_cluster;
- pcl::compute3DCentroid (*in, centroid_cluster);
- float dist_to_sensor = centroid_cluster.norm();
- float sigma = dist_to_sensor * 0.01f;
- mls.setSearchMethod(tree);
- mls.setSearchRadius (sigma);
- mls.setUpsamplingMethod (mls.SAMPLE_LOCAL_PLANE);
- mls.setUpsamplingRadius (0.002);
- mls.setUpsamplingStepSize (0.001);
- }
-
- normals.reset (new pcl::PointCloud<pcl::Normal>);
- {
- pcl::ScopeTime t ("Compute normals");
- normal_estimator_->estimate (in, processed, normals);
- }
-
- if(adaptative_MLS_) {
- mls.setInputCloud(processed);
-
- PointInTPtr filtered(new pcl::PointCloud<PointInT>);
- mls.process(*filtered);
-
- processed.reset(new pcl::PointCloud<PointInT>);
- normals.reset (new pcl::PointCloud<pcl::Normal>);
- {
- pcl::ScopeTime t ("Compute normals after MLS");
- filtered->is_dense = false;
- normal_estimator_->estimate (filtered, processed, normals);
- }
- }
-
- //compute normals
- //pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
- //normal_estimator_->estimate (in, processed, normals);
-
- //compute keypoints
- this->computeKeypoints(processed, keypoints, normals);
- std::cout << " " << normals->points.size() << " " << processed->points.size() << std::endl;
-
- //compute keypoints
- /*keypoint_extractor_->setInputCloud (processed);
- if(keypoint_extractor_->needNormals())
- keypoint_extractor_->setNormals(normals);
-
- std::cout << " " << normals->points.size() << " " << processed->points.size() << std::endl;
-
- keypoint_extractor_->setSupportRadius(support_radius_);
- keypoint_extractor_->compute (keypoints);*/
-
- if (keypoints->points.empty ())
- {
- PCL_WARN("SHOTLocalEstimation :: No keypoints were found\n");
- return false;
- }
-
- std::cout << keypoints->points.size() << " " << normals->points.size() << " " << processed->points.size() << std::endl;
- //compute signatures
- using SHOTEstimator = pcl::SHOTEstimation<PointInT, pcl::Normal, pcl::SHOT352>;
- typename pcl::search::KdTree<PointInT>::Ptr tree (new pcl::search::KdTree<PointInT>);
-
- pcl::PointCloud<pcl::SHOT352>::Ptr shots (new pcl::PointCloud<pcl::SHOT352>);
-
- SHOTEstimator shot_estimate;
- shot_estimate.setSearchMethod (tree);
- shot_estimate.setInputCloud (keypoints);
- shot_estimate.setSearchSurface(processed);
- shot_estimate.setInputNormals (normals);
- shot_estimate.setRadiusSearch (support_radius_);
- shot_estimate.compute (*shots);
- signatures->resize (shots->points.size ());
- signatures->width = static_cast<int> (shots->points.size ());
- signatures->height = 1;
-
- int size_feat = sizeof(signatures->points[0].histogram) / sizeof(float);
-
- for (std::size_t k = 0; k < shots->points.size (); k++)
- for (int i = 0; i < size_feat; i++)
- signatures->points[k].histogram[i] = shots->points[k].descriptor[i];
-
- return true;
-
- }
-
- private:
-
-
- };
+ if (!normal_estimator_) {
+ PCL_ERROR("SHOTLocalEstimation :: This feature needs normals... "
+ "please provide a normal estimator\n");
+ return false;
+ }
+
+ if (keypoint_extractor_.empty()) {
+ PCL_ERROR("SHOTLocalEstimation :: This feature needs a keypoint extractor... "
+ "please provide one\n");
+ return false;
+ }
+
+ pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
+ pcl::MovingLeastSquares<PointInT, PointInT> mls;
+ if (adaptative_MLS_) {
+ typename search::KdTree<PointInT>::Ptr tree;
+ Eigen::Vector4f centroid_cluster;
+ pcl::compute3DCentroid(*in, centroid_cluster);
+ float dist_to_sensor = centroid_cluster.norm();
+ float sigma = dist_to_sensor * 0.01f;
+ mls.setSearchMethod(tree);
+ mls.setSearchRadius(sigma);
+ mls.setUpsamplingMethod(mls.SAMPLE_LOCAL_PLANE);
+ mls.setUpsamplingRadius(0.002);
+ mls.setUpsamplingStepSize(0.001);
+ }
+
+ normals.reset(new pcl::PointCloud<pcl::Normal>);
+ {
+ pcl::ScopeTime t("Compute normals");
+ normal_estimator_->estimate(in, processed, normals);
+ }
+
+ if (adaptative_MLS_) {
+ mls.setInputCloud(processed);
+
+ PointInTPtr filtered(new pcl::PointCloud<PointInT>);
+ mls.process(*filtered);
+
+ processed.reset(new pcl::PointCloud<PointInT>);
+ normals.reset(new pcl::PointCloud<pcl::Normal>);
+ {
+ pcl::ScopeTime t("Compute normals after MLS");
+ filtered->is_dense = false;
+ normal_estimator_->estimate(filtered, processed, normals);
+ }
+ }
+
+ // compute keypoints
+ this->computeKeypoints(processed, keypoints, normals);
+ std::cout << " " << normals->size() << " " << processed->size() << std::endl;
+
+ if (keypoints->points.empty()) {
+ PCL_WARN("SHOTLocalEstimation :: No keypoints were found\n");
+ return false;
+ }
+
+ std::cout << keypoints->size() << " " << normals->size() << " " << processed->size()
+ << std::endl;
+ // compute signatures
+ using SHOTEstimator = pcl::SHOTEstimation<PointInT, pcl::Normal, pcl::SHOT352>;
+ typename pcl::search::KdTree<PointInT>::Ptr tree(new pcl::search::KdTree<PointInT>);
+
+ pcl::PointCloud<pcl::SHOT352>::Ptr shots(new pcl::PointCloud<pcl::SHOT352>);
+
+ SHOTEstimator shot_estimate;
+ shot_estimate.setSearchMethod(tree);
+ shot_estimate.setInputCloud(keypoints);
+ shot_estimate.setSearchSurface(processed);
+ shot_estimate.setInputNormals(normals);
+ shot_estimate.setRadiusSearch(support_radius_);
+ shot_estimate.compute(*shots);
+ signatures->resize(shots->size());
+ signatures->width = shots->size();
+ signatures->height = 1;
+
+ int size_feat = sizeof((*signatures)[0].histogram) / sizeof(float);
+
+ for (std::size_t k = 0; k < shots->size(); k++)
+ for (int i = 0; i < size_feat; i++)
+ (*signatures)[k].histogram[i] = (*shots)[k].descriptor[i];
+
+ return true;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/features/shot_omp.h>
#include <pcl/io/pcd_io.h>
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename FeatureT>
+class SHOTLocalEstimationOMP : public LocalEstimator<PointInT, FeatureT> {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
+ using KeypointCloud = pcl::PointCloud<pcl::PointXYZ>;
+
+ using LocalEstimator<PointInT, FeatureT>::support_radius_;
+ using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
+ using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
+ using LocalEstimator<PointInT, FeatureT>::neighborhood_indices_;
+ using LocalEstimator<PointInT, FeatureT>::neighborhood_dist_;
+ using LocalEstimator<PointInT, FeatureT>::adaptative_MLS_;
+
+public:
+ bool
+ estimate(PointInTPtr& in,
+ PointInTPtr& processed,
+ PointInTPtr& keypoints,
+ FeatureTPtr& signatures) override
{
- template<typename PointInT, typename FeatureT>
- class SHOTLocalEstimationOMP : public LocalEstimator<PointInT, FeatureT>
+ if (!normal_estimator_) {
+ PCL_ERROR("SHOTLocalEstimationOMP :: This feature needs normals... please "
+ "provide a normal estimator\n");
+ return false;
+ }
+
+ if (keypoint_extractor_.empty()) {
+ PCL_ERROR("SHOTLocalEstimationOMP :: This feature needs a keypoint extractor... "
+ "please provide one\n");
+ return false;
+ }
+
+ pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
+ pcl::MovingLeastSquares<PointInT, PointInT> mls;
+ if (adaptative_MLS_) {
+ typename search::KdTree<PointInT>::Ptr tree;
+ Eigen::Vector4f centroid_cluster;
+ pcl::compute3DCentroid(*in, centroid_cluster);
+ float dist_to_sensor = centroid_cluster.norm();
+ float sigma = dist_to_sensor * 0.01f;
+ mls.setSearchMethod(tree);
+ mls.setSearchRadius(sigma);
+ mls.setUpsamplingMethod(mls.SAMPLE_LOCAL_PLANE);
+ mls.setUpsamplingRadius(0.002);
+ mls.setUpsamplingStepSize(0.001);
+ }
+
+ normals.reset(new pcl::PointCloud<pcl::Normal>);
+ {
+ pcl::ScopeTime t("Compute normals");
+ normal_estimator_->estimate(in, processed, normals);
+ }
+
+ if (adaptative_MLS_) {
+ mls.setInputCloud(processed);
+
+ PointInTPtr filtered(new pcl::PointCloud<PointInT>);
+ mls.process(*filtered);
+
+ processed.reset(new pcl::PointCloud<PointInT>);
+ normals.reset(new pcl::PointCloud<pcl::Normal>);
{
-
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using FeatureTPtr = typename pcl::PointCloud<FeatureT>::Ptr;
- using KeypointCloud = pcl::PointCloud<pcl::PointXYZ>;
-
- using LocalEstimator<PointInT, FeatureT>::support_radius_;
- using LocalEstimator<PointInT, FeatureT>::normal_estimator_;
- using LocalEstimator<PointInT, FeatureT>::keypoint_extractor_;
- using LocalEstimator<PointInT, FeatureT>::neighborhood_indices_;
- using LocalEstimator<PointInT, FeatureT>::neighborhood_dist_;
- using LocalEstimator<PointInT, FeatureT>::adaptative_MLS_;
-
- public:
- bool
- estimate (PointInTPtr & in, PointInTPtr & processed, PointInTPtr & keypoints, FeatureTPtr & signatures) override
- {
- if (!normal_estimator_)
- {
- PCL_ERROR("SHOTLocalEstimationOMP :: This feature needs normals... please provide a normal estimator\n");
- return false;
- }
-
- if (keypoint_extractor_.empty ())
- {
- PCL_ERROR("SHOTLocalEstimationOMP :: This feature needs a keypoint extractor... please provide one\n");
- return false;
- }
-
- pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
- pcl::MovingLeastSquares<PointInT, PointInT> mls;
- if (adaptative_MLS_)
- {
- typename search::KdTree<PointInT>::Ptr tree;
- Eigen::Vector4f centroid_cluster;
- pcl::compute3DCentroid (*in, centroid_cluster);
- float dist_to_sensor = centroid_cluster.norm ();
- float sigma = dist_to_sensor * 0.01f;
- mls.setSearchMethod (tree);
- mls.setSearchRadius (sigma);
- mls.setUpsamplingMethod (mls.SAMPLE_LOCAL_PLANE);
- mls.setUpsamplingRadius (0.002);
- mls.setUpsamplingStepSize (0.001);
- }
-
- normals.reset (new pcl::PointCloud<pcl::Normal>);
- {
- pcl::ScopeTime t ("Compute normals");
- normal_estimator_->estimate (in, processed, normals);
- }
-
- if (adaptative_MLS_)
- {
- mls.setInputCloud (processed);
-
- PointInTPtr filtered (new pcl::PointCloud<PointInT>);
- mls.process (*filtered);
-
- processed.reset (new pcl::PointCloud<PointInT>);
- normals.reset (new pcl::PointCloud<pcl::Normal>);
- {
- pcl::ScopeTime t ("Compute normals after MLS");
- filtered->is_dense = false;
- normal_estimator_->estimate (filtered, processed, normals);
- }
- }
-
- this->computeKeypoints(processed, keypoints, normals);
- std::cout << " " << normals->points.size() << " " << processed->points.size() << std::endl;
-
- if (keypoints->points.empty ())
- {
- PCL_WARN("SHOTLocalEstimationOMP :: No keypoints were found\n");
- return false;
- }
-
- //compute signatures
- using SHOTEstimator = pcl::SHOTEstimationOMP<PointInT, pcl::Normal, pcl::SHOT352>;
- typename pcl::search::KdTree<PointInT>::Ptr tree (new pcl::search::KdTree<PointInT>);
- tree->setInputCloud (processed);
-
- pcl::PointCloud<pcl::SHOT352>::Ptr shots (new pcl::PointCloud<pcl::SHOT352>);
- SHOTEstimator shot_estimate;
- shot_estimate.setNumberOfThreads (8);
- shot_estimate.setSearchMethod (tree);
- shot_estimate.setInputCloud (keypoints);
- shot_estimate.setSearchSurface(processed);
- shot_estimate.setInputNormals (normals);
- shot_estimate.setRadiusSearch (support_radius_);
-
- {
- pcl::ScopeTime t ("Compute SHOT");
- shot_estimate.compute (*shots);
- }
-
- signatures->resize (shots->points.size ());
- signatures->width = static_cast<int> (shots->points.size ());
- signatures->height = 1;
-
- int size_feat = sizeof(signatures->points[0].histogram) / sizeof(float);
-
- int good = 0;
- for (const auto &point : shots->points)
- {
-
- int NaNs = 0;
- for (int i = 0; i < size_feat; i++)
- {
- if (!std::isfinite(point.descriptor[i]))
- NaNs++;
- }
-
- if (NaNs == 0)
- {
- for (int i = 0; i < size_feat; i++)
- {
- signatures->points[good].histogram[i] = point.descriptor[i];
- }
-
- good++;
- }
- }
-
- signatures->resize (good);
- signatures->width = good;
-
- return true;
-
+ pcl::ScopeTime t("Compute normals after MLS");
+ filtered->is_dense = false;
+ normal_estimator_->estimate(filtered, processed, normals);
+ }
+ }
+
+ this->computeKeypoints(processed, keypoints, normals);
+ std::cout << " " << normals->size() << " " << processed->size() << std::endl;
+
+ if (keypoints->points.empty()) {
+ PCL_WARN("SHOTLocalEstimationOMP :: No keypoints were found\n");
+ return false;
+ }
+
+ // compute signatures
+ using SHOTEstimator = pcl::SHOTEstimationOMP<PointInT, pcl::Normal, pcl::SHOT352>;
+ typename pcl::search::KdTree<PointInT>::Ptr tree(new pcl::search::KdTree<PointInT>);
+ tree->setInputCloud(processed);
+
+ pcl::PointCloud<pcl::SHOT352>::Ptr shots(new pcl::PointCloud<pcl::SHOT352>);
+ SHOTEstimator shot_estimate;
+ shot_estimate.setNumberOfThreads(8);
+ shot_estimate.setSearchMethod(tree);
+ shot_estimate.setInputCloud(keypoints);
+ shot_estimate.setSearchSurface(processed);
+ shot_estimate.setInputNormals(normals);
+ shot_estimate.setRadiusSearch(support_radius_);
+
+ {
+ pcl::ScopeTime t("Compute SHOT");
+ shot_estimate.compute(*shots);
+ }
+
+ signatures->resize(shots->size());
+ signatures->width = shots->size();
+ signatures->height = 1;
+
+ int size_feat = sizeof((*signatures)[0].histogram) / sizeof(float);
+
+ int good = 0;
+ for (const auto& point : shots->points) {
+
+ int NaNs = 0;
+ for (int i = 0; i < size_feat; i++) {
+ if (!std::isfinite(point.descriptor[i]))
+ NaNs++;
+ }
+
+ if (NaNs == 0) {
+ for (int i = 0; i < size_feat; i++) {
+ (*signatures)[good].histogram[i] = point.descriptor[i];
}
- };
+ good++;
+ }
+ }
+ signatures->resize(good);
+ signatures->width = good;
+
+ return true;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#pragma once
+#include <pcl/common/time.h>
+#include <pcl/features/integral_image_normal.h>
+#include <pcl/features/normal_3d.h>
#include <pcl/filters/radius_outlier_removal.h>
#include <pcl/filters/voxel_grid.h>
-#include <pcl/features/normal_3d.h>
-#include <pcl/features/integral_image_normal.h>
-#include <pcl/common/time.h>
-#include <pcl/memory.h> // for pcl::make_shared
+#include <pcl/memory.h> // for pcl::make_shared
+#include <pcl/types.h> // for pcl::index_t
+
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT, typename PointOutT>
+class PreProcessorAndNormalEstimator {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
-namespace pcl
-{
- namespace rec_3d_framework
+ float
+ computeMeshResolution(PointInTPtr& input)
{
- template<typename PointInT, typename PointOutT>
- class PreProcessorAndNormalEstimator
- {
+ using KdTreeInPtr = typename pcl::KdTree<PointInT>::Ptr;
+ KdTreeInPtr tree = pcl::make_shared<pcl::KdTreeFLANN<PointInT>>(false);
+ tree->setInputCloud(input);
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ std::vector<int> nn_indices(9);
+ std::vector<float> nn_distances(9);
+ std::vector<int> src_indices;
- float
- computeMeshResolution (PointInTPtr & input)
- {
- using KdTreeInPtr = typename pcl::KdTree<PointInT>::Ptr;
- KdTreeInPtr tree = pcl::make_shared<pcl::KdTreeFLANN<PointInT> > (false);
- tree->setInputCloud (input);
+ float sum_distances = 0.0;
+ std::vector<float> avg_distances(input->size());
+ // Iterate through the source data set
+ for (std::size_t i = 0; i < input->size(); ++i) {
+ tree->nearestKSearch((*input)[i], 9, nn_indices, nn_distances);
- std::vector<int> nn_indices (9);
- std::vector<float> nn_distances (9);
- std::vector<int> src_indices;
+ float avg_dist_neighbours = 0.0;
+ for (std::size_t j = 1; j < nn_indices.size(); j++)
+ avg_dist_neighbours += std::sqrt(nn_distances[j]);
- float sum_distances = 0.0;
- std::vector<float> avg_distances (input->points.size ());
- // Iterate through the source data set
- for (std::size_t i = 0; i < input->points.size (); ++i)
- {
- tree->nearestKSearch (input->points[i], 9, nn_indices, nn_distances);
+ avg_dist_neighbours /= static_cast<float>(nn_indices.size());
- float avg_dist_neighbours = 0.0;
- for (std::size_t j = 1; j < nn_indices.size (); j++)
- avg_dist_neighbours += std::sqrt (nn_distances[j]);
+ avg_distances[i] = avg_dist_neighbours;
+ sum_distances += avg_dist_neighbours;
+ }
- avg_dist_neighbours /= static_cast<float> (nn_indices.size ());
+ std::sort(avg_distances.begin(), avg_distances.end());
+ float avg = avg_distances[static_cast<int>(avg_distances.size()) / 2 + 1];
+ return avg;
+ }
- avg_distances[i] = avg_dist_neighbours;
- sum_distances += avg_dist_neighbours;
- }
+public:
+ bool compute_mesh_resolution_;
+ bool do_voxel_grid_;
+ bool remove_outliers_;
- std::sort (avg_distances.begin (), avg_distances.end ());
- float avg = avg_distances[static_cast<int> (avg_distances.size ()) / 2 + 1];
- return avg;
- }
+ // this values are used when CMR=false
+ float grid_resolution_;
+ float normal_radius_;
- public:
+ // this are used when CMR=true
+ float factor_normals_;
+ float factor_voxel_grid_;
+ float mesh_resolution_;
- bool compute_mesh_resolution_;
- bool do_voxel_grid_;
- bool remove_outliers_;
+ PreProcessorAndNormalEstimator()
+ {
+ remove_outliers_ = do_voxel_grid_ = compute_mesh_resolution_ = false;
+ }
- //this values are used when CMR=false
- float grid_resolution_;
- float normal_radius_;
+ void
+ setFactorsForCMR(float f1, float f2)
+ {
+ factor_voxel_grid_ = f1;
+ factor_normals_ = f2;
+ }
- //this are used when CMR=true
- float factor_normals_;
- float factor_voxel_grid_;
- float mesh_resolution_;
+ void
+ setValuesForCMRFalse(float f1, float f2)
+ {
+ grid_resolution_ = f1;
+ normal_radius_ = f2;
+ }
- PreProcessorAndNormalEstimator ()
- {
- remove_outliers_ = do_voxel_grid_ = compute_mesh_resolution_ = false;
- }
+ void
+ setDoVoxelGrid(bool b)
+ {
+ do_voxel_grid_ = b;
+ }
- void
- setFactorsForCMR (float f1, float f2)
- {
- factor_voxel_grid_ = f1;
- factor_normals_ = f2;
- }
+ void
+ setRemoveOutliers(bool b)
+ {
+ remove_outliers_ = b;
+ }
- void
- setValuesForCMRFalse (float f1, float f2)
- {
- grid_resolution_ = f1;
- normal_radius_ = f2;
- }
+ void
+ setCMR(bool b)
+ {
+ compute_mesh_resolution_ = b;
+ }
- void
- setDoVoxelGrid (bool b)
- {
- do_voxel_grid_ = b;
- }
+ void
+ estimate(PointInTPtr& in,
+ PointInTPtr& out,
+ pcl::PointCloud<pcl::Normal>::Ptr& normals)
+ {
+ if (compute_mesh_resolution_) {
+ mesh_resolution_ = computeMeshResolution(in);
+ std::cout << "compute mesh resolution:" << mesh_resolution_ << std::endl;
+ }
+
+ if (do_voxel_grid_) {
+ pcl::ScopeTime t("Voxel grid...");
+ float voxel_grid_size = grid_resolution_;
+ if (compute_mesh_resolution_) {
+ voxel_grid_size = mesh_resolution_ * factor_voxel_grid_;
+ }
+
+ pcl::VoxelGrid<PointInT> grid_;
+ grid_.setInputCloud(in);
+ grid_.setLeafSize(voxel_grid_size, voxel_grid_size, voxel_grid_size);
+ grid_.setDownsampleAllData(true);
+ grid_.filter(*out);
+ }
+ else {
+ out = in;
+ }
+
+ if (out->points.empty()) {
+ PCL_WARN("NORMAL estimator: Cloud has no points after voxel grid, "
+ "won't be able to compute normals!\n");
+ return;
+ }
+
+ if (remove_outliers_) {
+ pcl::ScopeTime t("remove_outliers_...");
+ PointInTPtr out2(new pcl::PointCloud<PointInT>());
+ float radius = normal_radius_;
+ if (compute_mesh_resolution_) {
+ radius = mesh_resolution_ * factor_normals_;
+ if (do_voxel_grid_)
+ radius *= factor_voxel_grid_;
+ }
+
+ // in synthetic views the render grazes some parts of the objects
+ // thus creating a very sparse set of points that causes the normals to be very
+ // noisy remove these points
+ pcl::RadiusOutlierRemoval<PointInT> sor;
+ sor.setInputCloud(out);
+ sor.setRadiusSearch(radius);
+ sor.setMinNeighborsInRadius(16);
+ sor.filter(*out2);
+ out = out2;
+ }
+
+ if (out->points.empty()) {
+ PCL_WARN("NORMAL estimator: Cloud has no points after removing outliers...!\n");
+ return;
+ }
+
+ float radius = normal_radius_;
+ if (compute_mesh_resolution_) {
+ radius = mesh_resolution_ * factor_normals_;
+ if (do_voxel_grid_)
+ radius *= factor_voxel_grid_;
+ }
+
+ if (out->isOrganized()) {
+ pcl::IntegralImageNormalEstimation<PointInT, pcl::Normal> n3d;
+ n3d.setNormalEstimationMethod(n3d.COVARIANCE_MATRIX);
+ n3d.setInputCloud(out);
+ n3d.setRadiusSearch(radius);
+ n3d.setKSearch(0);
+ {
+ pcl::ScopeTime t("compute normals...");
+ n3d.compute(*normals);
+ }
+ }
+ else {
- void
- setRemoveOutliers (bool b)
- {
- remove_outliers_ = b;
+ // check nans before computing normals
+ {
+ pcl::ScopeTime t("check nans...");
+ pcl::index_t j = 0;
+ for (const auto& point : *out) {
+ if (!isXYZFinite(point))
+ continue;
+
+ (*out)[j] = point;
+ j++;
}
- void
- setCMR (bool b)
- {
- compute_mesh_resolution_ = b;
+ if (j != static_cast<pcl::index_t>(out->size())) {
+ PCL_ERROR("Contain nans...");
}
- void
- estimate (PointInTPtr & in, PointInTPtr & out, pcl::PointCloud<pcl::Normal>::Ptr & normals)
- {
- if (compute_mesh_resolution_)
- {
- mesh_resolution_ = computeMeshResolution (in);
- std::cout << "compute mesh resolution:" << mesh_resolution_ << std::endl;
- }
-
- if (do_voxel_grid_)
- {
- pcl::ScopeTime t ("Voxel grid...");
- float voxel_grid_size = grid_resolution_;
- if (compute_mesh_resolution_)
- {
- voxel_grid_size = mesh_resolution_ * factor_voxel_grid_;
- }
-
- pcl::VoxelGrid<PointInT> grid_;
- grid_.setInputCloud (in);
- grid_.setLeafSize (voxel_grid_size, voxel_grid_size, voxel_grid_size);
- grid_.setDownsampleAllData (true);
- grid_.filter (*out);
- }
- else
- {
- out = in;
- }
-
- if (out->points.empty ())
- {
- PCL_WARN("NORMAL estimator: Cloud has no points after voxel grid, won't be able to compute normals!\n");
- return;
- }
-
- if (remove_outliers_)
- {
- pcl::ScopeTime t ("remove_outliers_...");
- PointInTPtr out2 (new pcl::PointCloud<PointInT> ());
- float radius = normal_radius_;
- if (compute_mesh_resolution_)
- {
- radius = mesh_resolution_ * factor_normals_;
- if (do_voxel_grid_)
- radius *= factor_voxel_grid_;
- }
-
- //in synthetic views the render grazes some parts of the objects
- //thus creating a very sparse set of points that causes the normals to be very noisy
- //remove these points
- pcl::RadiusOutlierRemoval<PointInT> sor;
- sor.setInputCloud (out);
- sor.setRadiusSearch (radius);
- sor.setMinNeighborsInRadius (16);
- sor.filter (*out2);
- out = out2;
-
- }
-
- if (out->points.empty ())
- {
- PCL_WARN("NORMAL estimator: Cloud has no points after removing outliers...!\n");
- return;
- }
-
- float radius = normal_radius_;
- if (compute_mesh_resolution_)
- {
- radius = mesh_resolution_ * factor_normals_;
- if (do_voxel_grid_)
- radius *= factor_voxel_grid_;
- }
-
- if (out->isOrganized ())
- {
- pcl::IntegralImageNormalEstimation<PointInT, pcl::Normal> n3d;
- n3d.setNormalEstimationMethod (n3d.COVARIANCE_MATRIX);
- //n3d.setNormalEstimationMethod (n3d.AVERAGE_3D_GRADIENT);
- n3d.setInputCloud (out);
- n3d.setRadiusSearch (radius);
- n3d.setKSearch (0);
- //n3d.setMaxDepthChangeFactor(0.02f);
- //n3d.setNormalSmoothingSize(15.0f);
- {
- pcl::ScopeTime t ("compute normals...");
- n3d.compute (*normals);
- }
- }
- else
- {
-
- //check nans before computing normals
- {
- pcl::ScopeTime t ("check nans...");
- int j = 0;
- for (std::size_t i = 0; i < out->points.size (); ++i)
- {
- if (!std::isfinite (out->points[i].x) || !std::isfinite (out->points[i].y) || !std::isfinite (out->points[i].z))
- continue;
-
- out->points[j] = out->points[i];
- j++;
- }
-
- if (j != static_cast<int> (out->points.size ()))
- {
- PCL_ERROR("Contain nans...");
- }
-
- out->points.resize (j);
- out->width = j;
- out->height = 1;
- }
-
- pcl::NormalEstimation<PointInT, pcl::Normal> n3d;
- typename pcl::search::KdTree<PointInT>::Ptr normals_tree (new pcl::search::KdTree<PointInT>);
- normals_tree->setInputCloud (out);
- n3d.setRadiusSearch (radius);
- n3d.setSearchMethod (normals_tree);
- n3d.setInputCloud (out);
- {
- pcl::ScopeTime t ("compute normals not organized...");
- n3d.compute (*normals);
- }
- }
-
- //check nans...
- if (!out->isOrganized ())
- {
- pcl::ScopeTime t ("check nans...");
- int j = 0;
- for (std::size_t i = 0; i < normals->points.size (); ++i)
- {
- if (!std::isfinite (normals->points[i].normal_x) || !std::isfinite (normals->points[i].normal_y)
- || !std::isfinite (normals->points[i].normal_z))
- continue;
-
- normals->points[j] = normals->points[i];
- out->points[j] = out->points[i];
- j++;
- }
-
- normals->points.resize (j);
- normals->width = j;
- normals->height = 1;
-
- out->points.resize (j);
- out->width = j;
- out->height = 1;
- }
- else
- {
- //is is organized, we set the xyz points to NaN
- pcl::ScopeTime t ("check nans organized...");
- bool NaNs = false;
- for (std::size_t i = 0; i < normals->points.size (); ++i)
- {
- if (std::isfinite (normals->points[i].normal_x) && std::isfinite (normals->points[i].normal_y)
- && std::isfinite (normals->points[i].normal_z))
- continue;
-
- NaNs = true;
-
- out->points[i].x = out->points[i].y = out->points[i].z = std::numeric_limits<float>::quiet_NaN ();
- }
-
- if (NaNs)
- {
- PCL_WARN("normals contain NaNs\n");
- out->is_dense = false;
- }
- }
-
- /*for (std::size_t i = 0; i < out->points.size (); i++)
- {
- int r, g, b;
- r = static_cast<int> (out->points[i].r);
- g = static_cast<int> (out->points[i].g);
- b = static_cast<int> (out->points[i].b);
- std::cout << "in normal estimator:" << r << " " << g << " " << b << std::endl;
- }*/
- }
- };
+ out->points.resize(j);
+ out->width = j;
+ out->height = 1;
+ }
+
+ pcl::NormalEstimation<PointInT, pcl::Normal> n3d;
+ typename pcl::search::KdTree<PointInT>::Ptr normals_tree(
+ new pcl::search::KdTree<PointInT>);
+ normals_tree->setInputCloud(out);
+ n3d.setRadiusSearch(radius);
+ n3d.setSearchMethod(normals_tree);
+ n3d.setInputCloud(out);
+ {
+ pcl::ScopeTime t("compute normals not organized...");
+ n3d.compute(*normals);
+ }
+ }
+
+ // check nans...
+ if (!out->isOrganized()) {
+ pcl::ScopeTime t("check nans...");
+ int j = 0;
+ for (std::size_t i = 0; i < normals->size(); ++i) {
+ if (!isNormalFinite((*normals)[i]))
+ continue;
+
+ (*normals)[j] = (*normals)[i];
+ (*out)[j] = (*out)[i];
+ j++;
+ }
+
+ normals->points.resize(j);
+ normals->width = j;
+ normals->height = 1;
+
+ out->points.resize(j);
+ out->width = j;
+ out->height = 1;
+ }
+ else {
+ // is is organized, we set the xyz points to NaN
+ pcl::ScopeTime t("check nans organized...");
+ bool NaNs = false;
+ for (std::size_t i = 0; i < normals->size(); ++i) {
+ if (!isNormalFinite((*normals)[i]))
+ continue;
+
+ NaNs = true;
+
+ (*out)[i].x = (*out)[i].y = (*out)[i].z =
+ std::numeric_limits<float>::quiet_NaN();
+ }
+
+ if (NaNs) {
+ PCL_WARN("normals contain NaNs\n");
+ out->is_dense = false;
+ }
+ }
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#pragma once
#include <pcl/apps/3d_rec_framework/pc_source/source.h>
+#include <pcl/apps/3d_rec_framework/utils/vtk_model_sampling.h>
#include <pcl/apps/render_views_tesselated_sphere.h>
#include <pcl/io/io.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/apps/3d_rec_framework/utils/vtk_model_sampling.h>
#include <vtkTransformPolyDataFilter.h>
#include <functional>
-namespace pcl
-{
- namespace rec_3d_framework
- {
+namespace pcl {
+namespace rec_3d_framework {
- /**
- * \brief Data source class based on mesh models
- * \author Aitor Aldoma
- */
-
- template<typename PointInT>
- class MeshSource : public Source<PointInT>
- {
- using SourceT = Source<PointInT>;
- using ModelT = Model<PointInT>;
-
- using SourceT::path_;
- using SourceT::models_;
- using SourceT::createTrainingDir;
- using SourceT::getModelsInDirectory;
- using SourceT::model_scale_;
-
- int tes_level_;
- int resolution_;
- float radius_sphere_;
- float view_angle_;
- bool gen_organized_;
- std::function<bool
- (const Eigen::Vector3f &)> campos_constraints_func_;
-
- public:
-
- using SourceT::setFilterDuplicateViews;
-
- MeshSource () :
- SourceT ()
- {
- gen_organized_ = false;
- }
+/**
+ * \brief Data source class based on mesh models
+ * \author Aitor Aldoma
+ */
- void
- setTesselationLevel (int lev)
- {
- tes_level_ = lev;
- }
+template <typename PointInT>
+class MeshSource : public Source<PointInT> {
+ using SourceT = Source<PointInT>;
+ using ModelT = Model<PointInT>;
- void
- setCamPosConstraints (std::function<bool
- (const Eigen::Vector3f &)> & bb)
- {
- campos_constraints_func_ = bb;
- }
+ using SourceT::createTrainingDir;
+ using SourceT::getModelsInDirectory;
+ using SourceT::model_scale_;
+ using SourceT::models_;
+ using SourceT::path_;
- void
- setResolution (int res)
- {
- resolution_ = res;
- }
+ int tes_level_;
+ int resolution_;
+ float radius_sphere_;
+ float view_angle_;
+ bool gen_organized_;
+ std::function<bool(const Eigen::Vector3f&)> campos_constraints_func_;
- void
- setRadiusSphere (float r)
- {
- radius_sphere_ = r;
- }
+public:
+ using SourceT::setFilterDuplicateViews;
- void
- setViewAngle (float a)
- {
- view_angle_ = a;
- }
+ MeshSource() : SourceT() { gen_organized_ = false; }
- void
- loadOrGenerate (std::string & dir, std::string & model_path, ModelT & model)
- {
- std::stringstream pathmodel;
- pathmodel << dir << "/" << model.class_ << "/" << model.id_;
- bf::path trained_dir = pathmodel.str ();
-
- model.views_.reset (new std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr>);
- model.poses_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
- model.self_occlusions_.reset (new std::vector<float>);
- model.assembled_.reset (new pcl::PointCloud<pcl::PointXYZ>);
- uniform_sampling (model_path, 100000, *model.assembled_, model_scale_);
-
- if (bf::exists (trained_dir))
- {
- //load views, poses and self-occlusions
- std::vector < std::string > view_filenames;
- int number_of_views = 0;
- for (const auto& dir_entry : bf::directory_iterator(trained_dir))
- {
- //check if its a directory, then get models in it
- if (!(bf::is_directory (dir_entry)))
- {
- //check that it is a ply file and then add, otherwise ignore..
- std::vector < std::string > strs;
- std::vector < std::string > strs_;
-
- std::string file = (dir_entry.path ().filename ()).string();
-
- boost::split (strs, file, boost::is_any_of ("."));
- boost::split (strs_, file, boost::is_any_of ("_"));
-
- std::string extension = strs[strs.size () - 1];
-
- if (extension == "pcd" && strs_[0] == "view")
- {
- view_filenames.push_back ((dir_entry.path ().filename ()).string());
-
- number_of_views++;
- }
- }
- }
-
- for (const auto &view_filename : view_filenames)
- {
- std::stringstream view_file;
- view_file << pathmodel.str () << "/" << view_filename;
- typename pcl::PointCloud<PointInT>::Ptr cloud (new pcl::PointCloud<PointInT> ());
- pcl::io::loadPCDFile (view_file.str (), *cloud);
-
- model.views_->push_back (cloud);
-
- std::string file_replaced1 (view_filename);
- boost::replace_all (file_replaced1, "view", "pose");
- boost::replace_all (file_replaced1, ".pcd", ".txt");
-
- std::string file_replaced2 (view_filename);
- boost::replace_all (file_replaced2, "view", "entropy");
- boost::replace_all (file_replaced2, ".pcd", ".txt");
-
- //read pose as well
- std::stringstream pose_file;
- pose_file << pathmodel.str () << "/" << file_replaced1;
-
- Eigen::Matrix4f pose;
- PersistenceUtils::readMatrixFromFile (pose_file.str (), pose);
-
- model.poses_->push_back (pose);
-
- //read entropy as well
- std::stringstream entropy_file;
- entropy_file << pathmodel.str () << "/" << file_replaced2;
- float entropy = 0;
- PersistenceUtils::readFloatFromFile (entropy_file.str (), entropy);
- model.self_occlusions_->push_back (entropy);
-
- }
+ void
+ setTesselationLevel(int lev)
+ {
+ tes_level_ = lev;
+ }
- }
- else
- {
- //load PLY model and scale it
- vtkSmartPointer<vtkPLYReader> reader = vtkSmartPointer<vtkPLYReader>::New ();
- reader->SetFileName (model_path.c_str ());
-
- vtkSmartPointer<vtkTransform> trans = vtkSmartPointer<vtkTransform>::New ();
- trans->Scale (model_scale_, model_scale_, model_scale_);
- trans->Modified ();
- trans->Update ();
-
- vtkSmartPointer<vtkTransformPolyDataFilter> filter_scale = vtkSmartPointer<vtkTransformPolyDataFilter>::New ();
- filter_scale->SetTransform (trans);
- filter_scale->SetInputConnection (reader->GetOutputPort ());
- filter_scale->Update ();
-
- vtkSmartPointer<vtkPolyData> mapper = filter_scale->GetOutput ();
-
- //generate views
- pcl::apps::RenderViewsTesselatedSphere render_views;
- render_views.setResolution (resolution_);
- render_views.setUseVertices (false);
- render_views.setRadiusSphere (radius_sphere_);
- render_views.setComputeEntropies (true);
- render_views.setTesselationLevel (tes_level_);
- render_views.setViewAngle (view_angle_);
- render_views.addModelFromPolyData (mapper);
- render_views.setGenOrganized (gen_organized_);
- render_views.setCamPosConstraints (campos_constraints_func_);
- render_views.generateViews ();
-
- std::vector<typename PointCloud<PointInT>::Ptr> views_xyz_orig;
- std::vector < Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > poses;
- std::vector<float> entropies;
-
- render_views.getViews (views_xyz_orig);
- render_views.getPoses (poses);
- render_views.getEntropies (entropies);
-
- model.views_.reset (new std::vector<typename PointCloud<PointInT>::Ptr> ());
- model.poses_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > ());
- model.self_occlusions_.reset (new std::vector<float> ());
-
- for (std::size_t i = 0; i < views_xyz_orig.size (); i++)
- {
- model.views_->push_back (views_xyz_orig[i]);
- model.poses_->push_back (poses[i]);
- model.self_occlusions_->push_back (entropies[i]);
- }
-
- std::stringstream direc;
- direc << dir << "/" << model.class_ << "/" << model.id_;
- this->createClassAndModelDirectories (dir, model.class_, model.id_);
-
- for (std::size_t i = 0; i < model.views_->size (); i++)
- {
- //save generated model for future use
- std::stringstream path_view;
- path_view << direc.str () << "/view_" << i << ".pcd";
- pcl::io::savePCDFileBinary (path_view.str (), *(model.views_->at (i)));
-
- std::stringstream path_pose;
- path_pose << direc.str () << "/pose_" << i << ".txt";
-
- pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile (path_pose.str (), model.poses_->at (i));
-
- std::stringstream path_entropy;
- path_entropy << direc.str () << "/entropy_" << i << ".txt";
- pcl::rec_3d_framework::PersistenceUtils::writeFloatToFile (path_entropy.str (), model.self_occlusions_->at (i));
- }
-
- loadOrGenerate (dir, model_path, model);
+ void
+ setCamPosConstraints(std::function<bool(const Eigen::Vector3f&)>& bb)
+ {
+ campos_constraints_func_ = bb;
+ }
- }
- }
+ void
+ setResolution(int res)
+ {
+ resolution_ = res;
+ }
- /**
- * \brief Creates the model representation of the training set, generating views if needed
- */
- void
- generate (std::string & training_dir) override
- {
-
- //create training dir fs if not existent
- createTrainingDir (training_dir);
-
- //get models in directory
- std::vector < std::string > files;
- std::string start;
- std::string ext = std::string ("ply");
- bf::path dir = path_;
- getModelsInDirectory (dir, start, files, ext);
-
- models_.reset (new std::vector<ModelT>);
-
- for (const auto &filename : files)
- {
- ModelT m;
- this->getIdAndClassFromFilename (filename, m.id_, m.class_);
-
- //check which of them have been trained using training_dir and the model_id_
- //load views, poses and self-occlusions for those that exist
- //generate otherwise
- std::cout << filename << std::endl;
- std::string path_model = path_ + '/' + filename;
- loadOrGenerate (training_dir, path_model, m);
-
- models_->push_back (m);
+ void
+ setRadiusSphere(float r)
+ {
+ radius_sphere_ = r;
+ }
+
+ void
+ setViewAngle(float a)
+ {
+ view_angle_ = a;
+ }
+
+ void
+ loadOrGenerate(std::string& dir, std::string& model_path, ModelT& model)
+ {
+ std::stringstream pathmodel;
+ pathmodel << dir << "/" << model.class_ << "/" << model.id_;
+ bf::path trained_dir = pathmodel.str();
+
+ model.views_.reset(new std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr>);
+ model.poses_.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
+ model.self_occlusions_.reset(new std::vector<float>);
+ model.assembled_.reset(new pcl::PointCloud<pcl::PointXYZ>);
+ uniform_sampling(model_path, 100000, *model.assembled_, model_scale_);
+
+ if (bf::exists(trained_dir)) {
+ // load views, poses and self-occlusions
+ std::vector<std::string> view_filenames;
+ int number_of_views = 0;
+ for (const auto& dir_entry : bf::directory_iterator(trained_dir)) {
+ // check if its a directory, then get models in it
+ if (!(bf::is_directory(dir_entry))) {
+ // check that it is a ply file and then add, otherwise ignore..
+ std::vector<std::string> strs;
+ std::vector<std::string> strs_;
+
+ std::string file = (dir_entry.path().filename()).string();
+
+ boost::split(strs, file, boost::is_any_of("."));
+ boost::split(strs_, file, boost::is_any_of("_"));
+
+ std::string extension = strs[strs.size() - 1];
+
+ if (extension == "pcd" && strs_[0] == "view") {
+ view_filenames.push_back((dir_entry.path().filename()).string());
+
+ number_of_views++;
}
}
- };
+ }
+
+ for (const auto& view_filename : view_filenames) {
+ std::stringstream view_file;
+ view_file << pathmodel.str() << "/" << view_filename;
+ typename pcl::PointCloud<PointInT>::Ptr cloud(new pcl::PointCloud<PointInT>());
+ pcl::io::loadPCDFile(view_file.str(), *cloud);
+
+ model.views_->push_back(cloud);
+
+ std::string file_replaced1(view_filename);
+ boost::replace_all(file_replaced1, "view", "pose");
+ boost::replace_all(file_replaced1, ".pcd", ".txt");
+
+ std::string file_replaced2(view_filename);
+ boost::replace_all(file_replaced2, "view", "entropy");
+ boost::replace_all(file_replaced2, ".pcd", ".txt");
+
+ // read pose as well
+ std::stringstream pose_file;
+ pose_file << pathmodel.str() << "/" << file_replaced1;
+
+ Eigen::Matrix4f pose;
+ PersistenceUtils::readMatrixFromFile(pose_file.str(), pose);
+
+ model.poses_->push_back(pose);
+
+ // read entropy as well
+ std::stringstream entropy_file;
+ entropy_file << pathmodel.str() << "/" << file_replaced2;
+ float entropy = 0;
+ PersistenceUtils::readFloatFromFile(entropy_file.str(), entropy);
+ model.self_occlusions_->push_back(entropy);
+ }
+ }
+ else {
+ // load PLY model and scale it
+ vtkSmartPointer<vtkPLYReader> reader = vtkSmartPointer<vtkPLYReader>::New();
+ reader->SetFileName(model_path.c_str());
+
+ vtkSmartPointer<vtkTransform> trans = vtkSmartPointer<vtkTransform>::New();
+ trans->Scale(model_scale_, model_scale_, model_scale_);
+ trans->Modified();
+ trans->Update();
+
+ vtkSmartPointer<vtkTransformPolyDataFilter> filter_scale =
+ vtkSmartPointer<vtkTransformPolyDataFilter>::New();
+ filter_scale->SetTransform(trans);
+ filter_scale->SetInputConnection(reader->GetOutputPort());
+ filter_scale->Update();
+
+ vtkSmartPointer<vtkPolyData> mapper = filter_scale->GetOutput();
+
+ // generate views
+ pcl::apps::RenderViewsTesselatedSphere render_views;
+ render_views.setResolution(resolution_);
+ render_views.setUseVertices(false);
+ render_views.setRadiusSphere(radius_sphere_);
+ render_views.setComputeEntropies(true);
+ render_views.setTesselationLevel(tes_level_);
+ render_views.setViewAngle(view_angle_);
+ render_views.addModelFromPolyData(mapper);
+ render_views.setGenOrganized(gen_organized_);
+ render_views.setCamPosConstraints(campos_constraints_func_);
+ render_views.generateViews();
+
+ std::vector<typename PointCloud<PointInT>::Ptr> views_xyz_orig;
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>> poses;
+ std::vector<float> entropies;
+
+ render_views.getViews(views_xyz_orig);
+ render_views.getPoses(poses);
+ render_views.getEntropies(entropies);
+
+ model.views_.reset(new std::vector<typename PointCloud<PointInT>::Ptr>());
+ model.poses_.reset(new std::vector<Eigen::Matrix4f,
+ Eigen::aligned_allocator<Eigen::Matrix4f>>());
+ model.self_occlusions_.reset(new std::vector<float>());
+
+ for (std::size_t i = 0; i < views_xyz_orig.size(); i++) {
+ model.views_->push_back(views_xyz_orig[i]);
+ model.poses_->push_back(poses[i]);
+ model.self_occlusions_->push_back(entropies[i]);
+ }
+
+ std::stringstream direc;
+ direc << dir << "/" << model.class_ << "/" << model.id_;
+ this->createClassAndModelDirectories(dir, model.class_, model.id_);
+
+ for (std::size_t i = 0; i < model.views_->size(); i++) {
+ // save generated model for future use
+ std::stringstream path_view;
+ path_view << direc.str() << "/view_" << i << ".pcd";
+ pcl::io::savePCDFileBinary(path_view.str(), *(model.views_->at(i)));
+
+ std::stringstream path_pose;
+ path_pose << direc.str() << "/pose_" << i << ".txt";
+
+ pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile(path_pose.str(),
+ model.poses_->at(i));
+
+ std::stringstream path_entropy;
+ path_entropy << direc.str() << "/entropy_" << i << ".txt";
+ pcl::rec_3d_framework::PersistenceUtils::writeFloatToFile(
+ path_entropy.str(), model.self_occlusions_->at(i));
+ }
+
+ loadOrGenerate(dir, model_path, model);
+ }
+ }
+
+ /**
+ * \brief Creates the model representation of the training set, generating views if
+ * needed
+ */
+ void
+ generate(std::string& training_dir) override
+ {
+
+ // create training dir fs if not existent
+ createTrainingDir(training_dir);
+
+ // get models in directory
+ std::vector<std::string> files;
+ std::string start;
+ std::string ext = std::string("ply");
+ bf::path dir = path_;
+ getModelsInDirectory(dir, start, files, ext);
+
+ models_.reset(new std::vector<ModelT>);
+
+ for (const auto& filename : files) {
+ ModelT m;
+ this->getIdAndClassFromFilename(filename, m.id_, m.class_);
+
+ // check which of them have been trained using training_dir and the model_id_
+ // load views, poses and self-occlusions for those that exist
+ // generate otherwise
+ std::cout << filename << std::endl;
+ std::string path_model = path_ + '/' + filename;
+ loadOrGenerate(training_dir, path_model, m);
+
+ models_->push_back(m);
+ }
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
-namespace pcl
-{
- namespace rec_3d_framework
- {
-
- /**
- * \brief Data source class based on partial views from sensor.
- * In this case, the training data is obtained directly from a depth sensor.
- * The filesystem should contain pcd files (representing a view of an object in
- * camera coordinates) and each view needs to be associated with a txt file
- * containing a 4x4 matrix representing the transformation from camera coordinates
- * to a global object coordinates frame.
- * \author Aitor Aldoma
- */
-
- template<typename PointInT>
- class RegisteredViewsSource : public Source<PointInT>
- {
- using SourceT = Source<PointInT>;
- using ModelT = Model<PointInT>;
-
- using SourceT::path_;
- using SourceT::models_;
- using SourceT::createTrainingDir;
- using SourceT::getModelsInDirectory;
- using SourceT::model_scale_;
-
- std::string view_prefix_;
- int pose_files_order_; //0 is row, 1 is column
-
- public:
- RegisteredViewsSource ()
- {
- view_prefix_ = std::string ("view");
- pose_files_order_ = 0;
- }
-
- void
- setPrefix (std::string & pre)
- {
- view_prefix_ = pre;
- }
-
- void
- setPoseRowOrder (int o)
- {
- pose_files_order_ = o;
- }
-
- void
- getViewsFilenames (bf::path & path_with_views, std::vector<std::string> & view_filenames)
- {
- int number_of_views = 0;
- for (const auto& dir_entry : bf::directory_iterator(path_with_views))
- {
- if (!(bf::is_directory (dir_entry)))
- {
- std::vector < std::string > strs;
- std::vector < std::string > strs_;
-
- std::string file = (dir_entry.path ().filename ()).string();
-
- boost::split (strs, file, boost::is_any_of ("."));
- boost::split (strs_, file, boost::is_any_of ("_"));
-
- std::string extension = strs[strs.size () - 1];
+namespace pcl {
+namespace rec_3d_framework {
+
+/**
+ * \brief Data source class based on partial views from sensor.
+ * In this case, the training data is obtained directly from a depth sensor.
+ * The filesystem should contain pcd files (representing a view of an object in
+ * camera coordinates) and each view needs to be associated with a txt file
+ * containing a 4x4 matrix representing the transformation from camera coordinates
+ * to a global object coordinates frame.
+ * \author Aitor Aldoma
+ */
- if (extension == "pcd" && (strs_[0].compare (view_prefix_) == 0))
- {
- view_filenames.push_back ((dir_entry.path ().filename ()).string());
+template <typename PointInT>
+class RegisteredViewsSource : public Source<PointInT> {
+ using SourceT = Source<PointInT>;
+ using ModelT = Model<PointInT>;
- number_of_views++;
- }
- }
- }
- }
+ using SourceT::createTrainingDir;
+ using SourceT::getModelsInDirectory;
+ using SourceT::model_scale_;
+ using SourceT::models_;
+ using SourceT::path_;
- void
- assembleModelFromViewsAndPoses(ModelT & model, std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > & poses) {
- for(std::size_t i=0; i < model.views_->size(); i++) {
- Eigen::Matrix4f inv = poses[i];
- typename pcl::PointCloud<PointInT>::Ptr global_cloud(new pcl::PointCloud<PointInT>);
- pcl::transformPointCloud(*(model.views_->at(i)),*global_cloud, inv);
- *(model.assembled_) += *global_cloud;
- }
- }
+ std::string view_prefix_;
+ int pose_files_order_; // 0 is row, 1 is column
- void
- loadOrGenerate (std::string & dir, std::string & model_path, ModelT & model)
- {
- std::stringstream pathmodel;
- pathmodel << dir << "/" << model.class_ << "/" << model.id_;
- bf::path trained_dir = pathmodel.str ();
+public:
+ RegisteredViewsSource()
+ {
+ view_prefix_ = std::string("view");
+ pose_files_order_ = 0;
+ }
- model.views_.reset (new std::vector<typename pcl::PointCloud<PointInT>::Ptr>);
- model.poses_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
- model.self_occlusions_.reset (new std::vector<float>);
+ void
+ setPrefix(std::string& pre)
+ {
+ view_prefix_ = pre;
+ }
- if (bf::exists (trained_dir))
- {
- //load views and poses
- std::vector < std::string > view_filenames;
- int number_of_views = 0;
- for (const auto& dir_entry : bf::directory_iterator(trained_dir))
- {
- //check if its a directory, then get models in it
- if (!(bf::is_directory (*itr)))
- {
- //check that it is a ply file and then add, otherwise ignore..
- std::vector < std::string > strs;
- std::vector < std::string > strs_;
+ void
+ setPoseRowOrder(int o)
+ {
+ pose_files_order_ = o;
+ }
- std::string file = (dir_entry.path ().filename ()).string();
+ void
+ getViewsFilenames(bf::path& path_with_views, std::vector<std::string>& view_filenames)
+ {
+ int number_of_views = 0;
+ for (const auto& dir_entry : bf::directory_iterator(path_with_views)) {
+ if (!(bf::is_directory(dir_entry))) {
+ std::vector<std::string> strs;
+ std::vector<std::string> strs_;
- boost::split (strs, file, boost::is_any_of ("."));
- boost::split (strs_, file, boost::is_any_of ("_"));
+ std::string file = (dir_entry.path().filename()).string();
- std::string extension = strs[strs.size () - 1];
+ boost::split(strs, file, boost::is_any_of("."));
+ boost::split(strs_, file, boost::is_any_of("_"));
- if (extension == "pcd" && strs_[0] == "view")
- {
- view_filenames.push_back ((dir_entry.path ().filename ()).string());
- number_of_views++;
- }
- }
- }
+ std::string extension = strs[strs.size() - 1];
- std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > poses_to_assemble_;
+ if (extension == "pcd" && (strs_[0].compare(view_prefix_) == 0)) {
+ view_filenames.push_back((dir_entry.path().filename()).string());
- for (std::size_t i = 0; i < view_filenames.size (); i++)
- {
- std::stringstream view_file;
- view_file << pathmodel.str () << "/" << view_filenames[i];
- typename pcl::PointCloud<PointInT>::Ptr cloud (new pcl::PointCloud<PointInT> ());
- pcl::io::loadPCDFile (view_file.str (), *cloud);
+ number_of_views++;
+ }
+ }
+ }
+ }
- model.views_->push_back (cloud);
+ void
+ assembleModelFromViewsAndPoses(
+ ModelT& model,
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>& poses)
+ {
+ for (std::size_t i = 0; i < model.views_->size(); i++) {
+ Eigen::Matrix4f inv = poses[i];
+ typename pcl::PointCloud<PointInT>::Ptr global_cloud(
+ new pcl::PointCloud<PointInT>);
+ pcl::transformPointCloud(*(model.views_->at(i)), *global_cloud, inv);
+ *(model.assembled_) += *global_cloud;
+ }
+ }
- std::string file_replaced1 (view_filenames[i]);
- boost::replace_all (file_replaced1, "view", "pose");
- boost::replace_all (file_replaced1, ".pcd", ".txt");
+ void
+ loadOrGenerate(std::string& dir, std::string& model_path, ModelT& model)
+ {
+ std::stringstream pathmodel;
+ pathmodel << dir << "/" << model.class_ << "/" << model.id_;
+ bf::path trained_dir = pathmodel.str();
+
+ model.views_.reset(new std::vector<typename pcl::PointCloud<PointInT>::Ptr>);
+ model.poses_.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
+ model.self_occlusions_.reset(new std::vector<float>);
+
+ if (bf::exists(trained_dir)) {
+ // load views and poses
+ std::vector<std::string> view_filenames;
+ int number_of_views = 0;
+ for (const auto& dir_entry : bf::directory_iterator(trained_dir)) {
+ // check if its a directory, then get models in it
+ if (!(bf::is_directory(*itr))) {
+ // check that it is a ply file and then add, otherwise ignore..
+ std::vector<std::string> strs;
+ std::vector<std::string> strs_;
+
+ std::string file = (dir_entry.path().filename()).string();
+
+ boost::split(strs, file, boost::is_any_of("."));
+ boost::split(strs_, file, boost::is_any_of("_"));
+
+ std::string extension = strs[strs.size() - 1];
+
+ if (extension == "pcd" && strs_[0] == "view") {
+ view_filenames.push_back((dir_entry.path().filename()).string());
+ number_of_views++;
+ }
+ }
+ }
- //read pose as well
- std::stringstream pose_file;
- pose_file << pathmodel.str () << "/" << file_replaced1;
- Eigen::Matrix4f pose;
- PersistenceUtils::readMatrixFromFile (pose_file.str (), pose);
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>
+ poses_to_assemble_;
- if(pose_files_order_ != 0) {
- //std::cout << "Transpose..." << std::endl;
+ for (std::size_t i = 0; i < view_filenames.size(); i++) {
+ std::stringstream view_file;
+ view_file << pathmodel.str() << "/" << view_filenames[i];
+ typename pcl::PointCloud<PointInT>::Ptr cloud(new pcl::PointCloud<PointInT>());
+ pcl::io::loadPCDFile(view_file.str(), *cloud);
- Eigen::Matrix4f pose_trans = pose.transpose();
- poses_to_assemble_.push_back(pose_trans);
- //pose = pose_trans;
- //std::cout << pose << std::endl;
- }
+ model.views_->push_back(cloud);
- //std::cout << "pose being push backed to model" << std::endl;
- std::cout << pose << std::endl;
+ std::string file_replaced1(view_filenames[i]);
+ boost::replace_all(file_replaced1, "view", "pose");
+ boost::replace_all(file_replaced1, ".pcd", ".txt");
- //the recognizer assumes transformation from M to CC
- Eigen::Matrix4f inv = pose.inverse();
- model.poses_->push_back (inv);
+ // read pose as well
+ std::stringstream pose_file;
+ pose_file << pathmodel.str() << "/" << file_replaced1;
+ Eigen::Matrix4f pose;
+ PersistenceUtils::readMatrixFromFile(pose_file.str(), pose);
- model.self_occlusions_->push_back (-1.f);
+ if (pose_files_order_ != 0) {
+ Eigen::Matrix4f pose_trans = pose.transpose();
+ poses_to_assemble_.push_back(pose_trans);
+ }
- }
+ std::cout << pose << std::endl;
- model.assembled_.reset (new pcl::PointCloud<PointInT>);
- assembleModelFromViewsAndPoses(model, poses_to_assemble_);
+ // the recognizer assumes transformation from M to CC
+ Eigen::Matrix4f inv = pose.inverse();
+ model.poses_->push_back(inv);
- /*pcl::visualization::PCLVisualizer vis ("results");
- pcl::visualization::PointCloudColorHandlerCustom<PointInT> random_handler (model.assembled_, 255, 0, 0);
- vis.addPointCloud<PointInT> (model.assembled_, random_handler, "points");
+ model.self_occlusions_->push_back(-1.f);
+ }
- Eigen::Matrix4f view_transformation = model.poses_->at(0).inverse();
- typename pcl::PointCloud<PointInT>::Ptr view_trans(new pcl::PointCloud<PointInT>);
- pcl::transformPointCloud(*(model.views_->at(0)), *view_trans, view_transformation);
+ model.assembled_.reset(new pcl::PointCloud<PointInT>);
+ assembleModelFromViewsAndPoses(model, poses_to_assemble_);
+ }
+ else {
- pcl::visualization::PointCloudColorHandlerCustom<PointInT> random_handler2 (view_trans, 0, 255, 0);
- vis.addPointCloud<PointInT> (view_trans, random_handler2, "view");
+ // we just need to copy the views to the training directory
+ std::stringstream direc;
+ direc << dir << "/" << model.class_ << "/" << model.id_;
+ createClassAndModelDirectories(dir, model.class_, model.id_);
- vis.addCoordinateSystem(0.1);
- vis.spin ();*/
+ std::vector<std::string> view_filenames;
+ bf::path model_dir = model_path;
- }
- else
- {
+ getViewsFilenames(model_dir, view_filenames);
+ std::cout << view_filenames.size() << std::endl;
- //we just need to copy the views to the training directory
- std::stringstream direc;
- direc << dir << "/" << model.class_ << "/" << model.id_;
- createClassAndModelDirectories (dir, model.class_, model.id_);
+ for (std::size_t i = 0; i < view_filenames.size(); i++) {
+ std::stringstream view_file;
+ view_file << model_path << "/" << view_filenames[i];
+ typename pcl::PointCloud<PointInT>::Ptr cloud(new pcl::PointCloud<PointInT>());
+ pcl::io::loadPCDFile(view_file.str(), *cloud);
- std::vector < std::string > view_filenames;
- bf::path model_dir = model_path;
+ std::cout << view_file.str() << std::endl;
- getViewsFilenames (model_dir, view_filenames);
- std::cout << view_filenames.size () << std::endl;
+ std::stringstream path_view;
+ path_view << direc.str() << "/view_" << i << ".pcd";
+ pcl::io::savePCDFileBinary(path_view.str(), *cloud);
- for (std::size_t i = 0; i < view_filenames.size (); i++)
- {
- std::stringstream view_file;
- view_file << model_path << "/" << view_filenames[i];
- typename pcl::PointCloud<PointInT>::Ptr cloud (new pcl::PointCloud<PointInT> ());
- pcl::io::loadPCDFile (view_file.str (), *cloud);
+ std::string file_replaced1(view_file.str());
+ boost::replace_all(file_replaced1, view_prefix_, "pose");
+ boost::replace_all(file_replaced1, ".pcd", ".txt");
- std::cout << view_file.str () << std::endl;
+ Eigen::Matrix4f pose;
+ PersistenceUtils::readMatrixFromFile(file_replaced1, pose);
- std::stringstream path_view;
- path_view << direc.str () << "/view_" << i << ".pcd";
- pcl::io::savePCDFileBinary (path_view.str (), *cloud);
+ std::cout << pose << std::endl;
- std::string file_replaced1 (view_file.str ());
- boost::replace_all (file_replaced1, view_prefix_, "pose");
- boost::replace_all (file_replaced1, ".pcd", ".txt");
+ if (pose_files_order_ == 0) {
+ std::cout << "Transpose..." << std::endl;
+ Eigen::Matrix4f pose_trans = pose.transpose();
+ pose = pose_trans;
+ std::cout << pose << std::endl;
+ }
- Eigen::Matrix4f pose;
- PersistenceUtils::readMatrixFromFile (file_replaced1, pose);
+ std::stringstream path_pose;
+ path_pose << direc.str() << "/pose_" << i << ".txt";
+ pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile(path_pose.str(),
+ pose);
+ }
- std::cout << pose << std::endl;
+ loadOrGenerate(dir, model_path, model);
+ }
+ }
- if(pose_files_order_ == 0) {
- std::cout << "Transpose..." << std::endl;
- Eigen::Matrix4f pose_trans = pose.transpose();
- pose = pose_trans;
- std::cout << pose << std::endl;
- }
+ bool
+ isleafDirectory(bf::path& path)
+ {
+ bool no_dirs_inside = true;
+ for (const auto& dir_entry : bf::directory_iterator(path)) {
+ if (bf::is_directory(dir_entry)) {
+ no_dirs_inside = false;
+ }
+ }
+
+ return no_dirs_inside;
+ }
- std::stringstream path_pose;
- path_pose << direc.str () << "/pose_" << i << ".txt";
- pcl::rec_3d_framework::PersistenceUtils::writeMatrixToFile (path_pose.str (), pose);
- }
+ void
+ getModelsInDirectory(bf::path& dir,
+ std::string& rel_path_so_far,
+ std::vector<std::string>& relative_paths)
+ {
+ for (const auto& dir_entry : bf::directory_iterator(dir)) {
+ // check if its a directory, then get models in it
+ if (bf::is_directory(dir_entry)) {
+ std::string so_far =
+ rel_path_so_far + (dir_entry.path().filename()).string() + "/";
+ bf::path curr_path = dir_entry.path();
+
+ if (isleafDirectory(curr_path)) {
+ std::string path = rel_path_so_far + (dir_entry.path().filename()).string();
+ relative_paths.push_back(path);
+ }
+ else {
+ getModelsInDirectory(curr_path, so_far, relative_paths);
+ }
+ }
+ }
+ }
- loadOrGenerate (dir, model_path, model);
+ /**
+ * \brief Creates the model representation of the training set, generating views if
+ * needed
+ */
+ void
+ generate(std::string& training_dir)
+ {
- }
+ // create training dir fs if not existent
+ createTrainingDir(training_dir);
+
+ // get models in directory
+ std::vector<std::string> files;
+ std::string start = "";
+ bf::path dir = path_;
+ getModelsInDirectory(dir, start, files);
+
+ models_.reset(new std::vector<ModelT>);
+
+ for (std::size_t i = 0; i < files.size(); i++) {
+ ModelT m;
+
+ std::vector<std::string> strs;
+ boost::split(strs, files[i], boost::is_any_of("/\\"));
+ std::string name = strs[strs.size() - 1];
+
+ if (strs.size() == 1) {
+ m.id_ = strs[0];
+ }
+ else {
+ std::stringstream ss;
+ for (int i = 0; i < (static_cast<int>(strs.size()) - 1); i++) {
+ ss << strs[i];
+ if (i != (static_cast<int>(strs.size()) - 1))
+ ss << "/";
}
- bool
- isleafDirectory (bf::path & path)
- {
- bool no_dirs_inside = true;
- for (const auto& dir_entry : bf::directory_iterator(path))
- {
- if (bf::is_directory (dir_entry))
- {
- no_dirs_inside = false;
- }
- }
+ m.class_ = ss.str();
+ m.id_ = strs[strs.size() - 1];
+ }
- return no_dirs_inside;
- }
+ std::cout << m.class_ << " . " << m.id_ << std::endl;
+ // check which of them have been trained using training_dir and the model_id_
+ // load views, poses and self-occlusions for those that exist
+ // generate otherwise
- void
- getModelsInDirectory (bf::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths)
- {
- for (const auto& dir_entry : bf::directory_iterator(dir))
- {
- //check if its a directory, then get models in it
- if (bf::is_directory (dir_entry))
- {
- std::string so_far = rel_path_so_far + (dir_entry.path ().filename ()).string() + "/";
- bf::path curr_path = dir_entry.path ();
-
- if (isleafDirectory (curr_path))
- {
- std::string path = rel_path_so_far + (dir_entry.path ().filename ()).string();
- relative_paths.push_back (path);
- }
- else
- {
- getModelsInDirectory (curr_path, so_far, relative_paths);
- }
- }
- }
- }
+ std::stringstream model_path;
+ model_path << path_ << "/" << files[i];
+ std::string path_model = model_path.str();
+ loadOrGenerate(training_dir, path_model, m);
- /**
- * \brief Creates the model representation of the training set, generating views if needed
- */
- void
- generate (std::string & training_dir)
- {
-
- //create training dir fs if not existent
- createTrainingDir (training_dir);
-
- //get models in directory
- std::vector < std::string > files;
- std::string start = "";
- bf::path dir = path_;
- getModelsInDirectory (dir, start, files);
-
- models_.reset (new std::vector<ModelT>);
-
- for (std::size_t i = 0; i < files.size (); i++)
- {
- ModelT m;
-
- std::vector < std::string > strs;
- boost::split (strs, files[i], boost::is_any_of ("/\\"));
- std::string name = strs[strs.size () - 1];
-
- if (strs.size () == 1)
- {
- m.id_ = strs[0];
- }
- else
- {
- std::stringstream ss;
- for (int i = 0; i < (static_cast<int> (strs.size ()) - 1); i++)
- {
- ss << strs[i];
- if (i != (static_cast<int> (strs.size ()) - 1))
- ss << "/";
- }
-
- m.class_ = ss.str ();
- m.id_ = strs[strs.size () - 1];
- }
-
- std::cout << m.class_ << " . " << m.id_ << std::endl;
- //check which of them have been trained using training_dir and the model_id_
- //load views, poses and self-occlusions for those that exist
- //generate otherwise
-
- std::stringstream model_path;
- model_path << path_ << "/" << files[i];
- std::string path_model = model_path.str ();
- loadOrGenerate (training_dir, path_model, m);
-
- models_->push_back (m);
-
- //std::cout << files[i] << std::endl;
- }
- }
- };
+ models_->push_back(m);
+ }
}
-}
+};
+} // namespace rec_3d_framework
+} // namespace pcl
#pragma once
-#include <boost/filesystem.hpp>
-#include <boost/algorithm/string.hpp>
-#include <pcl/io/pcd_io.h>
#include <pcl/apps/3d_rec_framework/utils/persistence_utils.h>
-#include <pcl/filters/voxel_grid.h>
#include <pcl/common/transforms.h>
+#include <pcl/filters/voxel_grid.h>
+#include <pcl/io/pcd_io.h>
+
+#include <boost/algorithm/string.hpp>
+#include <boost/filesystem.hpp>
namespace bf = boost::filesystem;
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+/**
+ * \brief Model representation
+ * \author Aitor Aldoma
+ */
+
+template <typename PointT>
+class Model {
+ using PointTPtr = typename pcl::PointCloud<PointT>::Ptr;
+ using PointTPtrConst = typename pcl::PointCloud<PointT>::ConstPtr;
+
+public:
+ std::shared_ptr<std::vector<PointTPtr>> views_;
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ poses_;
+ std::shared_ptr<std::vector<float>> self_occlusions_;
+ std::string id_;
+ std::string class_;
+ PointTPtr assembled_;
+ typename std::map<float, PointTPtrConst> voxelized_assembled_;
+
+ bool
+ operator==(const Model& other) const
{
+ return (id_ == other.id_) && (class_ == other.class_);
+ }
- /**
- * \brief Model representation
- * \author Aitor Aldoma
- */
-
- template<typename PointT>
- class Model
- {
- using PointTPtr = typename pcl::PointCloud<PointT>::Ptr;
- using PointTPtrConst = typename pcl::PointCloud<PointT>::ConstPtr;
-
- public:
- std::shared_ptr<std::vector<PointTPtr>> views_;
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>> poses_;
- std::shared_ptr<std::vector<float>> self_occlusions_;
- std::string id_;
- std::string class_;
- PointTPtr assembled_;
- typename std::map<float, PointTPtrConst> voxelized_assembled_;
-
- bool
- operator== (const Model &other) const
- {
- return (id_ == other.id_) && (class_ == other.class_);
- }
+ PointTPtrConst
+ getAssembled(float resolution)
+ {
+ if (resolution <= 0)
+ return assembled_;
+
+ typename std::map<float, PointTPtrConst>::iterator it =
+ voxelized_assembled_.find(resolution);
+ if (it == voxelized_assembled_.end()) {
+ PointTPtr voxelized(new pcl::PointCloud<PointT>);
+ pcl::VoxelGrid<PointT> grid_;
+ grid_.setInputCloud(assembled_);
+ grid_.setLeafSize(resolution, resolution, resolution);
+ grid_.setDownsampleAllData(true);
+ grid_.filter(*voxelized);
+
+ PointTPtrConst voxelized_const(new pcl::PointCloud<PointT>(*voxelized));
+ voxelized_assembled_[resolution] = voxelized_const;
+ return voxelized_const;
+ }
+
+ return it->second;
+ }
+};
- PointTPtrConst
- getAssembled (float resolution)
- {
- if(resolution <= 0)
- return assembled_;
-
- typename std::map<float, PointTPtrConst>::iterator it = voxelized_assembled_.find (resolution);
- if (it == voxelized_assembled_.end ())
- {
- PointTPtr voxelized (new pcl::PointCloud<PointT>);
- pcl::VoxelGrid<PointT> grid_;
- grid_.setInputCloud (assembled_);
- grid_.setLeafSize (resolution, resolution, resolution);
- grid_.setDownsampleAllData(true);
- grid_.filter (*voxelized);
-
- PointTPtrConst voxelized_const (new pcl::PointCloud<PointT> (*voxelized));
- voxelized_assembled_[resolution] = voxelized_const;
- return voxelized_const;
- }
+/**
+ * \brief Abstract data source class, manages filesystem, incremental training, etc.
+ * \author Aitor Aldoma
+ */
- return it->second;
- }
- };
-
- /**
- * \brief Abstract data source class, manages filesystem, incremental training, etc.
- * \author Aitor Aldoma
- */
-
- template<typename PointInT>
- class Source
- {
-
- protected:
- using ModelT = Model<PointInT>;
- std::string path_;
- std::shared_ptr<std::vector<ModelT>> models_;
- float model_scale_;
- bool filter_duplicate_views_;
- bool load_views_;
-
- void
- getIdAndClassFromFilename (const std::string & filename, std::string & id, std::string & classname)
- {
-
- std::vector < std::string > strs;
- boost::split (strs, filename, boost::is_any_of ("/\\"));
- std::string name = strs[strs.size () - 1];
-
- std::stringstream ss;
- for (int i = 0; i < (static_cast<int> (strs.size ()) - 1); i++)
- {
- ss << strs[i];
- if (i != (static_cast<int> (strs.size ()) - 1))
- ss << "/";
- }
+template <typename PointInT>
+class Source {
+
+protected:
+ using ModelT = Model<PointInT>;
+ std::string path_;
+ std::shared_ptr<std::vector<ModelT>> models_;
+ float model_scale_;
+ bool filter_duplicate_views_;
+ bool load_views_;
+
+ void
+ getIdAndClassFromFilename(const std::string& filename,
+ std::string& id,
+ std::string& classname)
+ {
- classname = ss.str ();
- id = name.substr (0, name.length () - 4);
- }
+ std::vector<std::string> strs;
+ boost::split(strs, filename, boost::is_any_of("/\\"));
+ std::string name = strs[strs.size() - 1];
- void
- createTrainingDir (std::string & training_dir)
- {
- bf::path trained_dir = training_dir;
- if (!bf::exists (trained_dir))
- bf::create_directory (trained_dir);
- }
+ std::stringstream ss;
+ for (int i = 0; i < (static_cast<int>(strs.size()) - 1); i++) {
+ ss << strs[i];
+ if (i != (static_cast<int>(strs.size()) - 1))
+ ss << "/";
+ }
- void
- createClassAndModelDirectories (std::string & training_dir, std::string & class_str, std::string & id_str)
- {
- std::vector < std::string > strs;
- boost::split (strs, class_str, boost::is_any_of ("/\\"));
-
- std::stringstream ss;
- ss << training_dir << "/";
- for (const auto &str : strs)
- {
- ss << str << "/";
- bf::path trained_dir = ss.str ();
- if (!bf::exists (trained_dir))
- bf::create_directory (trained_dir);
- }
+ classname = ss.str();
+ id = name.substr(0, name.length() - 4);
+ }
- ss << id_str;
- bf::path trained_dir = ss.str ();
- if (!bf::exists (trained_dir))
- bf::create_directory (trained_dir);
- }
+ void
+ createTrainingDir(std::string& training_dir)
+ {
+ bf::path trained_dir = training_dir;
+ if (!bf::exists(trained_dir))
+ bf::create_directory(trained_dir);
+ }
- public:
+ void
+ createClassAndModelDirectories(std::string& training_dir,
+ std::string& class_str,
+ std::string& id_str)
+ {
+ std::vector<std::string> strs;
+ boost::split(strs, class_str, boost::is_any_of("/\\"));
+
+ std::stringstream ss;
+ ss << training_dir << "/";
+ for (const auto& str : strs) {
+ ss << str << "/";
+ bf::path trained_dir = ss.str();
+ if (!bf::exists(trained_dir))
+ bf::create_directory(trained_dir);
+ }
+
+ ss << id_str;
+ bf::path trained_dir = ss.str();
+ if (!bf::exists(trained_dir))
+ bf::create_directory(trained_dir);
+ }
- Source() {
- load_views_ = true;
- }
+public:
+ Source() { load_views_ = true; }
- virtual
- ~Source() = default;
+ virtual ~Source() = default;
- float
- getScale ()
- {
- return model_scale_;
- }
+ float
+ getScale()
+ {
+ return model_scale_;
+ }
- void
- setModelScale (float s)
- {
- model_scale_ = s;
- }
+ void
+ setModelScale(float s)
+ {
+ model_scale_ = s;
+ }
- void setFilterDuplicateViews(bool f) {
- filter_duplicate_views_ = f;
- std::cout << "setting filter duplicate views to " << f << std::endl;
- }
+ void
+ setFilterDuplicateViews(bool f)
+ {
+ filter_duplicate_views_ = f;
+ std::cout << "setting filter duplicate views to " << f << std::endl;
+ }
- void
- getModelsInDirectory (bf::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths, std::string & ext)
- {
- for (const auto& dir_entry : bf::directory_iterator(dir))
- {
- //check if its a directory, then get models in it
- if (bf::is_directory (dir_entry))
- {
- std::string so_far = rel_path_so_far + (dir_entry.path ().filename ()).string() + "/";
-
- bf::path curr_path = dir_entry.path ();
- getModelsInDirectory (curr_path, so_far, relative_paths, ext);
- }
- else
- {
- //check that it is a ply file and then add, otherwise ignore..
- std::vector < std::string > strs;
- std::string file = (dir_entry.path ().filename ()).string();
-
- boost::split (strs, file, boost::is_any_of ("."));
- std::string extension = strs[strs.size () - 1];
-
- if (extension == ext)
- {
- std::string path = rel_path_so_far + (dir_entry.path ().filename ()).string();
-
- relative_paths.push_back (path);
- }
- }
- }
+ void
+ getModelsInDirectory(bf::path& dir,
+ std::string& rel_path_so_far,
+ std::vector<std::string>& relative_paths,
+ std::string& ext)
+ {
+ for (const auto& dir_entry : bf::directory_iterator(dir)) {
+ // check if its a directory, then get models in it
+ if (bf::is_directory(dir_entry)) {
+ std::string so_far =
+ rel_path_so_far + (dir_entry.path().filename()).string() + "/";
+
+ bf::path curr_path = dir_entry.path();
+ getModelsInDirectory(curr_path, so_far, relative_paths, ext);
}
+ else {
+ // check that it is a ply file and then add, otherwise ignore..
+ std::vector<std::string> strs;
+ std::string file = (dir_entry.path().filename()).string();
+
+ boost::split(strs, file, boost::is_any_of("."));
+ std::string extension = strs[strs.size() - 1];
- void
- voxelizeAllModels (float resolution)
- {
- for (std::size_t i = 0; i < models_->size (); i++)
- {
- models_->at (i).getAssembled (resolution);
+ if (extension == ext) {
+ std::string path = rel_path_so_far + (dir_entry.path().filename()).string();
+
+ relative_paths.push_back(path);
}
}
+ }
+ }
- /**
- * \brief Generate model representation
- */
- virtual void
- generate (std::string & training_dir)=0;
-
- /**
- * \brief Get the generated model
- */
- std::shared_ptr<std::vector<ModelT>>
- getModels ()
- {
- return models_;
- }
+ void
+ voxelizeAllModels(float resolution)
+ {
+ for (std::size_t i = 0; i < models_->size(); i++) {
+ models_->at(i).getAssembled(resolution);
+ }
+ }
- std::shared_ptr<std::vector<ModelT>>
- getModels (std::string & model_id)
- {
-
- typename std::vector<ModelT>::iterator it = models_->begin ();
- while (it != models_->end ())
- {
- if (model_id != (*it).id_)
- {
- it = models_->erase (it);
- }
- else
- {
- it++;
- }
- }
+ /**
+ * \brief Generate model representation
+ */
+ virtual void
+ generate(std::string& training_dir) = 0;
+
+ /**
+ * \brief Get the generated model
+ */
+ std::shared_ptr<std::vector<ModelT>>
+ getModels()
+ {
+ return models_;
+ }
- return models_;
- }
+ std::shared_ptr<std::vector<ModelT>>
+ getModels(std::string& model_id)
+ {
- bool
- modelAlreadyTrained (ModelT m, std::string & base_dir, std::string & descr_name)
- {
- std::stringstream dir;
- dir << base_dir << "/" << m.class_ << "/" << m.id_ << "/" << descr_name;
- bf::path desc_dir = dir.str ();
- std::cout << dir.str () << std::endl;
- return bf::exists (desc_dir);
+ typename std::vector<ModelT>::iterator it = models_->begin();
+ while (it != models_->end()) {
+ if (model_id != (*it).id_) {
+ it = models_->erase(it);
}
-
- std::string
- getModelDescriptorDir (ModelT m, std::string & base_dir, std::string & descr_name)
- {
- std::stringstream dir;
- dir << base_dir << "/" << m.class_ << "/" << m.id_ << "/" << descr_name;
- return dir.str ();
+ else {
+ it++;
}
+ }
- void
- removeDescDirectory (ModelT m, std::string & base_dir, std::string & descr_name)
- {
- std::string dir = getModelDescriptorDir (m, base_dir, descr_name);
+ return models_;
+ }
- bf::path desc_dir = dir;
- if (bf::exists (desc_dir))
- bf::remove_all (desc_dir);
- }
+ bool
+ modelAlreadyTrained(ModelT m, std::string& base_dir, std::string& descr_name)
+ {
+ std::stringstream dir;
+ dir << base_dir << "/" << m.class_ << "/" << m.id_ << "/" << descr_name;
+ bf::path desc_dir = dir.str();
+ std::cout << dir.str() << std::endl;
+ return bf::exists(desc_dir);
+ }
- void
- setPath (std::string & path)
- {
- path_ = path;
- }
+ std::string
+ getModelDescriptorDir(ModelT m, std::string& base_dir, std::string& descr_name)
+ {
+ std::stringstream dir;
+ dir << base_dir << "/" << m.class_ << "/" << m.id_ << "/" << descr_name;
+ return dir.str();
+ }
- void setLoadViews(bool load) {
- load_views_ = load;
- }
- };
+ void
+ removeDescDirectory(ModelT m, std::string& base_dir, std::string& descr_name)
+ {
+ std::string dir = getModelDescriptorDir(m, base_dir, descr_name);
+
+ bf::path desc_dir = dir;
+ if (bf::exists(desc_dir))
+ bf::remove_all(desc_dir);
+ }
+
+ void
+ setPath(std::string& path)
+ {
+ path_ = path;
}
-}
+
+ void
+ setLoadViews(bool load)
+ {
+ load_views_ = load;
+ }
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#pragma once
+#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
+#include <pcl/apps/3d_rec_framework/pc_source/source.h>
+#include <pcl/common/common.h>
+
#include <flann/flann.hpp>
-#include <pcl/common/common.h>
-#include <pcl/apps/3d_rec_framework/pc_source/source.h>
-#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
+namespace pcl {
+namespace rec_3d_framework {
+
+template <typename PointInT>
+class PCL_EXPORTS GlobalClassifier {
+public:
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
-namespace pcl
-{
- namespace rec_3d_framework
+ virtual void
+ setNN(int nn) = 0;
+
+ virtual void
+ getCategory(std::vector<std::string>& categories) = 0;
+
+ virtual void
+ getConfidence(std::vector<float>& conf) = 0;
+
+ virtual void
+ classify() = 0;
+
+ virtual void
+ setIndices(std::vector<int>& indices) = 0;
+
+ virtual void
+ setInputCloud(const PointInTPtr& cloud) = 0;
+};
+
+/**
+ * \brief Nearest neighbor search based classification of PCL point type features.
+ * FLANN is used to identify a neighborhood, based on which different scoring schemes
+ * can be employed to obtain likelihood values for a specified list of classes.
+ * Available features: ESF, VFH, CVFH
+ * See apps/3d_rec_framework/tools/apps/global_classification.cpp for usage
+ * \author Aitor Aldoma
+ */
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+class PCL_EXPORTS GlobalNNPipeline
+: public pcl::rec_3d_framework::GlobalClassifier<PointInT> {
+
+protected:
+ struct index_score {
+ int idx_models_;
+ int idx_input_;
+ double score_;
+ };
+
+ struct sortIndexScores {
+ bool
+ operator()(const index_score& d1, const index_score& d2)
+ {
+ return d1.score_ < d2.score_;
+ }
+ } sortIndexScoresOp;
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using DistT = Distance<float>;
+ using ModelT = Model<PointInT>;
+
+ /** \brief Directory where the trained structure will be saved */
+ std::string training_dir_;
+
+ /** \brief Point cloud to be classified */
+ PointInTPtr input_;
+
+ /** \brief Model data source */
+ std::shared_ptr<pcl::rec_3d_framework::Source<PointInT>> source_;
+
+ /** \brief Computes a feature */
+ std::shared_ptr<GlobalEstimator<PointInT, FeatureT>> estimator_;
+
+ /** \brief Descriptor name */
+ std::string descr_name_;
+
+ using flann_model = std::pair<ModelT, std::vector<float>>;
+ flann::Matrix<float> flann_data_;
+ flann::Index<DistT>* flann_index_;
+ std::vector<flann_model> flann_models_;
+
+ std::vector<int> indices_;
+
+ // load features from disk and create flann structure
+ void
+ loadFeaturesAndCreateFLANN();
+
+ inline void
+ convertToFLANN(const std::vector<flann_model>& models, flann::Matrix<float>& data)
{
+ data.rows = models.size();
+ data.cols = models[0].second.size(); // number of histogram bins
- template<typename PointInT>
- class PCL_EXPORTS GlobalClassifier {
- public:
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ flann::Matrix<float> flann_data(new float[models.size() * models[0].second.size()],
+ models.size(),
+ models[0].second.size());
- virtual void
- setNN (int nn) = 0;
+ for (std::size_t i = 0; i < data.rows; ++i)
+ for (std::size_t j = 0; j < data.cols; ++j) {
+ flann_data.ptr()[i * data.cols + j] = models[i].second[j];
+ }
- virtual void
- getCategory (std::vector<std::string> & categories) = 0;
+ data = flann_data;
+ }
- virtual void
- getConfidence (std::vector<float> & conf) = 0;
+ void
+ nearestKSearch(flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances);
- virtual void
- classify () = 0;
+ int NN_;
+ std::vector<std::string> categories_;
+ std::vector<float> confidences_;
- virtual void
- setIndices (std::vector<int> & indices) = 0;
+ std::string first_nn_category_;
- virtual void
- setInputCloud (const PointInTPtr & cloud) = 0;
- };
+public:
+ GlobalNNPipeline() { NN_ = 1; }
- /**
- * \brief Nearest neighbor search based classification of PCL point type features.
- * FLANN is used to identify a neighborhood, based on which different scoring schemes
- * can be employed to obtain likelihood values for a specified list of classes.
- * Available features: ESF, VFH, CVFH
- * See apps/3d_rec_framework/tools/apps/global_classification.cpp for usage
- * \author Aitor Aldoma
- */
+ ~GlobalNNPipeline() {}
- template<template<class > class Distance, typename PointInT, typename FeatureT>
- class PCL_EXPORTS GlobalNNPipeline : public pcl::rec_3d_framework::GlobalClassifier<PointInT>
- {
+ void
+ setNN(int nn) override
+ {
+ NN_ = nn;
+ }
+
+ void
+ getCategory(std::vector<std::string>& categories) override
+ {
+ categories = categories_;
+ }
+
+ void
+ getConfidence(std::vector<float>& conf) override
+ {
+ conf = confidences_;
+ }
+
+ /**
+ * \brief Initializes the FLANN structure from the provided source
+ */
+
+ void
+ initialize(bool force_retrain = false);
+
+ /**
+ * \brief Performs classification
+ */
+
+ void
+ classify() override;
+
+ /**
+ * \brief Sets the model data source_
+ */
+ void
+ setDataSource(std::shared_ptr<Source<PointInT>>& source)
+ {
+ source_ = source;
+ }
+
+ /**
+ * \brief Sets the model data source_
+ */
+
+ void
+ setFeatureEstimator(std::shared_ptr<GlobalEstimator<PointInT, FeatureT>>& feat)
+ {
+ estimator_ = feat;
+ }
+
+ void
+ setIndices(std::vector<int>& indices) override
+ {
+ indices_ = indices;
+ }
- protected:
-
- struct index_score
- {
- int idx_models_;
- int idx_input_;
- double score_;
- };
+ /**
+ * \brief Sets the input cloud to be classified
+ */
+ void
+ setInputCloud(const PointInTPtr& cloud) override
+ {
+ input_ = cloud;
+ }
- struct sortIndexScores
- {
- bool
- operator() (const index_score& d1, const index_score& d2)
- {
- return d1.score_ < d2.score_;
- }
- } sortIndexScoresOp;
+ void
+ setDescriptorName(std::string& name)
+ {
+ descr_name_ = name;
+ }
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using DistT = Distance<float>;
- using ModelT = Model<PointInT>;
-
- /** \brief Directory where the trained structure will be saved */
- std::string training_dir_;
-
- /** \brief Point cloud to be classified */
- PointInTPtr input_;
-
- /** \brief Model data source */
- std::shared_ptr<pcl::rec_3d_framework::Source<PointInT>> source_;
-
- /** \brief Computes a feature */
- std::shared_ptr<GlobalEstimator<PointInT, FeatureT>> estimator_;
-
- /** \brief Descriptor name */
- std::string descr_name_;
-
- using flann_model = std::pair<ModelT, std::vector<float> >;
- flann::Matrix<float> flann_data_;
- flann::Index<DistT> * flann_index_;
- std::vector<flann_model> flann_models_;
-
- std::vector<int> indices_;
-
- //load features from disk and create flann structure
- void
- loadFeaturesAndCreateFLANN ();
-
- inline void
- convertToFLANN (const std::vector<flann_model> &models, flann::Matrix<float> &data)
- {
- data.rows = models.size ();
- data.cols = models[0].second.size (); // number of histogram bins
-
- flann::Matrix<float> flann_data (new float[models.size () * models[0].second.size ()], models.size (), models[0].second.size ());
-
- for (std::size_t i = 0; i < data.rows; ++i)
- for (std::size_t j = 0; j < data.cols; ++j)
- {
- flann_data.ptr ()[i * data.cols + j] = models[i].second[j];
- }
-
- data = flann_data;
- }
-
- void
- nearestKSearch (flann::Index<DistT> * index, const flann_model &model, int k, flann::Matrix<int> &indices, flann::Matrix<float> &distances);
-
- int NN_;
- std::vector<std::string> categories_;
- std::vector<float> confidences_;
-
- std::string first_nn_category_;
-
- public:
-
- GlobalNNPipeline ()
- {
- NN_ = 1;
- }
-
- ~GlobalNNPipeline ()
- {
- }
-
- void
- setNN (int nn) override
- {
- NN_ = nn;
- }
-
- void
- getCategory (std::vector<std::string> & categories) override
- {
- categories = categories_;
- }
-
- void
- getConfidence (std::vector<float> & conf) override
- {
- conf = confidences_;
- }
-
- /**
- * \brief Initializes the FLANN structure from the provided source
- */
-
- void
- initialize (bool force_retrain = false);
-
- /**
- * \brief Performs classification
- */
-
- void
- classify () override;
-
- /**
- * \brief Sets the model data source_
- */
- void
- setDataSource (std::shared_ptr<Source<PointInT>>& source)
- {
- source_ = source;
- }
-
- /**
- * \brief Sets the model data source_
- */
-
- void
- setFeatureEstimator (std::shared_ptr<GlobalEstimator<PointInT, FeatureT>>& feat)
- {
- estimator_ = feat;
- }
-
- void
- setIndices (std::vector<int> & indices) override
- {
- indices_ = indices;
- }
-
- /**
- * \brief Sets the input cloud to be classified
- */
- void
- setInputCloud (const PointInTPtr & cloud) override
- {
- input_ = cloud;
- }
-
- void
- setDescriptorName (std::string & name)
- {
- descr_name_ = name;
- }
-
- void
- setTrainingDir (std::string & dir)
- {
- training_dir_ = dir;
- }
- };
+ void
+ setTrainingDir(std::string& dir)
+ {
+ training_dir_ = dir;
}
-}
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#pragma once
-#include <flann/flann.h>
-#include <pcl/common/common.h>
-#include <pcl/apps/3d_rec_framework/pc_source/source.h>
-#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/global/crh_estimator.h>
+#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
+#include <pcl/apps/3d_rec_framework/pc_source/source.h>
+#include <pcl/common/common.h>
#include <pcl/recognition/hv/hypotheses_verification.h>
-namespace pcl
-{
- namespace rec_3d_framework
- {
-
- /**
- * \brief Nearest neighbor search based classification of PCL point type features.
- * FLANN is used to identify a neighborhood, based on which different scoring schemes
- * can be employed to obtain likelihood values for a specified list of classes.
- * Available features: ESF, VFH, CVFH
- * \author Aitor Aldoma
- */
-
- template<template<class > class Distance, typename PointInT, typename FeatureT>
- class PCL_EXPORTS GlobalNNCRHRecognizer
- {
+#include <flann/flann.h>
- protected:
+namespace pcl {
+namespace rec_3d_framework {
- struct index_score
- {
- int idx_models_;
- int idx_input_;
- double score_;
- };
+/**
+ * \brief Nearest neighbor search based classification of PCL point type features.
+ * FLANN is used to identify a neighborhood, based on which different scoring schemes
+ * can be employed to obtain likelihood values for a specified list of classes.
+ * Available features: ESF, VFH, CVFH
+ * \author Aitor Aldoma
+ */
- struct sortIndexScores
- {
- bool
- operator() (const index_score& d1, const index_score& d2)
- {
- return d1.score_ < d2.score_;
- }
- } sortIndexScoresOp;
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+class PCL_EXPORTS GlobalNNCRHRecognizer {
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using ConstPointInTPtr = typename pcl::PointCloud<PointInT>::ConstPtr;
+protected:
+ struct index_score {
+ int idx_models_;
+ int idx_input_;
+ double score_;
+ };
- using DistT = Distance<float>;
- using ModelT = Model<PointInT>;
- using CRHPointCloud = pcl::PointCloud<pcl::Histogram<90> >;
+ struct sortIndexScores {
+ bool
+ operator()(const index_score& d1, const index_score& d2)
+ {
+ return d1.score_ < d2.score_;
+ }
+ } sortIndexScoresOp;
- /** \brief Directory where the trained structure will be saved */
- std::string training_dir_;
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using ConstPointInTPtr = typename pcl::PointCloud<PointInT>::ConstPtr;
- /** \brief Point cloud to be classified */
- PointInTPtr input_;
+ using DistT = Distance<float>;
+ using ModelT = Model<PointInT>;
+ using CRHPointCloud = pcl::PointCloud<pcl::Histogram<90>>;
- /** \brief Model data source */
- std::shared_ptr<pcl::rec_3d_framework::Source<PointInT>> source_;
+ /** \brief Directory where the trained structure will be saved */
+ std::string training_dir_;
- /** \brief Computes a feature */
- std::shared_ptr<CRHEstimation<PointInT, FeatureT>> crh_estimator_;
+ /** \brief Point cloud to be classified */
+ PointInTPtr input_;
- /** \brief Hypotheses verification algorithm */
- std::shared_ptr<HypothesisVerification<PointInT, PointInT>> hv_algorithm_;
+ /** \brief Model data source */
+ std::shared_ptr<pcl::rec_3d_framework::Source<PointInT>> source_;
- /** \brief Descriptor name */
- std::string descr_name_;
+ /** \brief Computes a feature */
+ std::shared_ptr<CRHEstimation<PointInT, FeatureT>> crh_estimator_;
- int ICP_iterations_;
+ /** \brief Hypotheses verification algorithm */
+ std::shared_ptr<HypothesisVerification<PointInT, PointInT>> hv_algorithm_;
- bool noisify_;
- float noise_;
+ /** \brief Descriptor name */
+ std::string descr_name_;
- class flann_model
- {
- public:
- ModelT model;
- int view_id;
- int descriptor_id;
- std::vector<float> descr;
- };
+ int ICP_iterations_;
- flann::Matrix<float> flann_data_;
- flann::Index<DistT> * flann_index_;
- std::vector<flann_model> flann_models_;
+ bool noisify_;
+ float noise_;
+ class flann_model {
+ public:
+ ModelT model;
+ int view_id;
+ int descriptor_id;
+ std::vector<float> descr;
+ };
- bool use_cache_;
- std::map<std::pair<std::string, int>, Eigen::Matrix4f,
- std::less<>,
- Eigen::aligned_allocator<std::pair<const std::pair<std::string, int>, Eigen::Matrix4f> > > poses_cache_;
- std::map<std::pair<std::string, int>, Eigen::Vector3f > centroids_cache_;
+ flann::Matrix<float> flann_data_;
+ flann::Index<DistT>* flann_index_;
+ std::vector<flann_model> flann_models_;
- std::vector<int> indices_;
+ bool use_cache_;
+ std::map<std::pair<std::string, int>,
+ Eigen::Matrix4f,
+ std::less<>,
+ Eigen::aligned_allocator<
+ std::pair<const std::pair<std::string, int>, Eigen::Matrix4f>>>
+ poses_cache_;
+ std::map<std::pair<std::string, int>, Eigen::Vector3f> centroids_cache_;
- //load features from disk and create flann structure
- void
- loadFeaturesAndCreateFLANN ();
+ std::vector<int> indices_;
- inline void
- convertToFLANN (const std::vector<flann_model> &models, flann::Matrix<float> &data)
- {
- data.rows = models.size ();
- data.cols = models[0].descr.size (); // number of histogram bins
+ // load features from disk and create flann structure
+ void
+ loadFeaturesAndCreateFLANN();
- flann::Matrix<float> flann_data (new float[models.size () * models[0].descr.size ()], models.size (), models[0].descr.size ());
+ inline void
+ convertToFLANN(const std::vector<flann_model>& models, flann::Matrix<float>& data)
+ {
+ data.rows = models.size();
+ data.cols = models[0].descr.size(); // number of histogram bins
- for (std::size_t i = 0; i < data.rows; ++i)
- for (std::size_t j = 0; j < data.cols; ++j)
- {
- flann_data.ptr ()[i * data.cols + j] = models[i].descr[j];
- }
+ flann::Matrix<float> flann_data(new float[models.size() * models[0].descr.size()],
+ models.size(),
+ models[0].descr.size());
- data = flann_data;
+ for (std::size_t i = 0; i < data.rows; ++i)
+ for (std::size_t j = 0; j < data.cols; ++j) {
+ flann_data.ptr()[i * data.cols + j] = models[i].descr[j];
}
- void
- nearestKSearch (flann::Index<DistT> * index, const flann_model &model, int k, flann::Matrix<int> &indices, flann::Matrix<float> &distances);
+ data = flann_data;
+ }
- void
- getPose (ModelT & model, int view_id, Eigen::Matrix4f & pose_matrix);
+ void
+ nearestKSearch(flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances);
- void
- getCRH (ModelT & model, int view_id, int d_id, CRHPointCloud::Ptr & hist);
+ void
+ getPose(ModelT& model, int view_id, Eigen::Matrix4f& pose_matrix);
- void
- getCentroid (ModelT & model, int view_id, int d_id, Eigen::Vector3f & centroid);
+ void
+ getCRH(ModelT& model, int view_id, int d_id, CRHPointCloud::Ptr& hist);
- void
- getView (ModelT & model, int view_id, PointInTPtr & view);
+ void
+ getCentroid(ModelT& model, int view_id, int d_id, Eigen::Vector3f& centroid);
- int NN_;
+ void
+ getView(ModelT& model, int view_id, PointInTPtr& view);
- std::shared_ptr<std::vector<ModelT>> models_;
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>> transforms_;
+ int NN_;
- public:
+ std::shared_ptr<std::vector<ModelT>> models_;
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ transforms_;
- GlobalNNCRHRecognizer ()
- {
- ICP_iterations_ = 0;
- noisify_ = false;
- do_CRH_ = true;
- }
+public:
+ GlobalNNCRHRecognizer()
+ {
+ ICP_iterations_ = 0;
+ noisify_ = false;
+ do_CRH_ = true;
+ }
- ~GlobalNNCRHRecognizer ()
- {
- }
+ ~GlobalNNCRHRecognizer() {}
- void setNoise(float n) {
- noisify_ = true;
- noise_ = n;
- }
+ void
+ setNoise(float n)
+ {
+ noisify_ = true;
+ noise_ = n;
+ }
- void setDOCRH(bool b) {
- do_CRH_ = b;
- }
+ void
+ setDOCRH(bool b)
+ {
+ do_CRH_ = b;
+ }
- void
- setNN (int nn)
- {
- NN_ = nn;
- }
+ void
+ setNN(int nn)
+ {
+ NN_ = nn;
+ }
- void
- setICPIterations (int it)
- {
- ICP_iterations_ = it;
- }
+ void
+ setICPIterations(int it)
+ {
+ ICP_iterations_ = it;
+ }
- /**
- * \brief Initializes the FLANN structure from the provided source
- */
+ /**
+ * \brief Initializes the FLANN structure from the provided source
+ */
- void
- initialize (bool force_retrain = false);
+ void
+ initialize(bool force_retrain = false);
- /**
- * \brief Sets the model data source_
- */
- void
- setDataSource (std::shared_ptr<Source<PointInT>>& source)
- {
- source_ = source;
- }
+ /**
+ * \brief Sets the model data source_
+ */
+ void
+ setDataSource(std::shared_ptr<Source<PointInT>>& source)
+ {
+ source_ = source;
+ }
- /**
- * \brief Sets the model data source_
- */
+ /**
+ * \brief Sets the model data source_
+ */
- void
- setFeatureEstimator (std::shared_ptr<CRHEstimation<PointInT, FeatureT>>& feat)
- {
- crh_estimator_ = feat;
- }
+ void
+ setFeatureEstimator(std::shared_ptr<CRHEstimation<PointInT, FeatureT>>& feat)
+ {
+ crh_estimator_ = feat;
+ }
- /**
- * \brief Sets the HV algorithm
- */
- void
- setHVAlgorithm (std::shared_ptr<HypothesisVerification<PointInT, PointInT>>& alg)
- {
- hv_algorithm_ = alg;
- }
+ /**
+ * \brief Sets the HV algorithm
+ */
+ void
+ setHVAlgorithm(std::shared_ptr<HypothesisVerification<PointInT, PointInT>>& alg)
+ {
+ hv_algorithm_ = alg;
+ }
- void
- setIndices (std::vector<int> & indices)
- {
- indices_ = indices;
- }
+ void
+ setIndices(std::vector<int>& indices)
+ {
+ indices_ = indices;
+ }
- /**
- * \brief Sets the input cloud to be classified
- */
- void
- setInputCloud (const PointInTPtr & cloud)
- {
- input_ = cloud;
- }
+ /**
+ * \brief Sets the input cloud to be classified
+ */
+ void
+ setInputCloud(const PointInTPtr& cloud)
+ {
+ input_ = cloud;
+ }
- void
- setDescriptorName (std::string & name)
- {
- descr_name_ = name;
- }
+ void
+ setDescriptorName(std::string& name)
+ {
+ descr_name_ = name;
+ }
- void
- setTrainingDir (std::string & dir)
- {
- training_dir_ = dir;
- }
+ void
+ setTrainingDir(std::string& dir)
+ {
+ training_dir_ = dir;
+ }
- /**
- * \brief Performs recognition on the input cloud
- */
+ /**
+ * \brief Performs recognition on the input cloud
+ */
- void
- recognize ();
+ void
+ recognize();
- std::shared_ptr<std::vector<ModelT>>
- getModels ()
- {
- return models_;
- }
+ std::shared_ptr<std::vector<ModelT>>
+ getModels()
+ {
+ return models_;
+ }
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
- getTransforms ()
- {
- return transforms_;
- }
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ getTransforms()
+ {
+ return transforms_;
+ }
- void
- setUseCache (bool u)
- {
- use_cache_ = u;
- }
+ void
+ setUseCache(bool u)
+ {
+ use_cache_ = u;
+ }
- bool do_CRH_;
+ bool do_CRH_;
+};
- };
- }
-}
+} // namespace rec_3d_framework
+} // namespace pcl
#pragma once
-#include <flann/util/matrix.h>
-
-#include <pcl/common/common.h>
-#include <pcl/apps/3d_rec_framework/pc_source/source.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/global/global_estimator.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/global/ourcvfh_estimator.h>
+#include <pcl/apps/3d_rec_framework/pc_source/source.h>
+#include <pcl/common/common.h>
#include <pcl/recognition/hv/hypotheses_verification.h>
-namespace pcl
-{
- namespace rec_3d_framework
- {
-
- /**
- * \brief Nearest neighbor search based classification of PCL point type features.
- * Available features: CVFH
- * \author Aitor Aldoma
- */
-
- template<template<class > class Distance, typename PointInT, typename FeatureT = pcl::VFHSignature308>
- class PCL_EXPORTS GlobalNNCVFHRecognizer
- {
-
- protected:
-
- struct index_score
- {
- int idx_models_;
- int idx_input_;
- double score_;
- };
-
- struct sortIndexScores
- {
- bool
- operator() (const index_score& d1, const index_score& d2)
- {
- return d1.score_ < d2.score_;
- }
- } sortIndexScoresOp;
-
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using ConstPointInTPtr = typename pcl::PointCloud<PointInT>::ConstPtr;
-
- using DistT = Distance<float>;
- using ModelT = Model<PointInT>;
-
- /** \brief Directory where the trained structure will be saved */
- std::string training_dir_;
-
- /** \brief Point cloud to be classified */
- PointInTPtr input_;
-
- /** \brief Model data source */
- std::shared_ptr<pcl::rec_3d_framework::Source<PointInT>> source_;
-
- /** \brief Computes a feature */
- std::shared_ptr<OURCVFHEstimator<PointInT, FeatureT>> micvfh_estimator_;
-
- /** \brief Hypotheses verification algorithm */
- std::shared_ptr<HypothesisVerification<PointInT, PointInT>> hv_algorithm_;
-
- /** \brief Descriptor name */
- std::string descr_name_;
-
- int ICP_iterations_;
-
- bool noisify_;
- float noise_;
-
- class flann_model
- {
- public:
- ModelT model;
- int view_id;
- int descriptor_id;
- std::vector<float> descr;
-
- bool
- operator< (const flann_model &other) const
- {
- if ((this->model.id_.compare (other.model.id_) < 0))
- {
- return true;
- }
-
- if (this->model.id_ == other.model.id_)
- {
- //check view id
- if ((this->view_id < other.view_id))
- {
- return true;
- }
- if (this->view_id == other.view_id)
- {
- if (this->descriptor_id < other.descriptor_id)
- {
- return true;
- }
- }
- }
-
- return false;
- }
-
- bool
- operator== (const flann_model &other) const
- {
- return (model.id_ == other.model.id_) && (view_id == other.view_id) && (descriptor_id == other.descriptor_id);
- }
-
- };
-
- flann::Matrix<float> flann_data_;
- flann::Index<DistT> * flann_index_;
- std::vector<flann_model> flann_models_;
-
- std::vector<flann::Matrix<float> > single_categories_data_;
- std::vector<flann::Index<DistT> *> single_categories_index_;
- std::vector<std::shared_ptr<std::vector<int>>> single_categories_pointers_to_models_;
- std::map<std::string, int> category_to_vectors_indices_;
- std::vector<std::string> categories_to_be_searched_;
- bool use_single_categories_;
-
- bool use_cache_;
- std::map<std::pair<std::string, int>, Eigen::Matrix4f,
- std::less<>,
- Eigen::aligned_allocator<std::pair<const std::pair<std::string, int>, Eigen::Matrix4f> > > poses_cache_;
- std::map<std::pair<std::string, int>, Eigen::Vector3f> centroids_cache_;
-
- std::vector<int> indices_;
-
- bool compute_scale_;
-
- //load features from disk and create flann structure
- void
- loadFeaturesAndCreateFLANN ();
-
- inline void
- convertToFLANN (const std::vector<flann_model> &models, flann::Matrix<float> &data)
- {
- data.rows = models.size ();
- data.cols = models[0].descr.size (); // number of histogram bins
+#include <flann/util/matrix.h>
- flann::Matrix<float> flann_data (new float[models.size () * models[0].descr.size ()], models.size (), models[0].descr.size ());
+namespace pcl {
+namespace rec_3d_framework {
- for (std::size_t i = 0; i < data.rows; ++i)
- for (std::size_t j = 0; j < data.cols; ++j)
- {
- flann_data.ptr ()[i * data.cols + j] = models[i].descr[j];
- }
+/**
+ * \brief Nearest neighbor search based classification of PCL point type features.
+ * Available features: CVFH
+ * \author Aitor Aldoma
+ */
- data = flann_data;
+template <template <class> class Distance,
+ typename PointInT,
+ typename FeatureT = pcl::VFHSignature308>
+class PCL_EXPORTS GlobalNNCVFHRecognizer {
+
+protected:
+ struct index_score {
+ int idx_models_;
+ int idx_input_;
+ double score_;
+ };
+
+ struct sortIndexScores {
+ bool
+ operator()(const index_score& d1, const index_score& d2)
+ {
+ return d1.score_ < d2.score_;
+ }
+ } sortIndexScoresOp;
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using ConstPointInTPtr = typename pcl::PointCloud<PointInT>::ConstPtr;
+
+ using DistT = Distance<float>;
+ using ModelT = Model<PointInT>;
+
+ /** \brief Directory where the trained structure will be saved */
+ std::string training_dir_;
+
+ /** \brief Point cloud to be classified */
+ PointInTPtr input_;
+
+ /** \brief Model data source */
+ std::shared_ptr<pcl::rec_3d_framework::Source<PointInT>> source_;
+
+ /** \brief Computes a feature */
+ std::shared_ptr<OURCVFHEstimator<PointInT, FeatureT>> micvfh_estimator_;
+
+ /** \brief Hypotheses verification algorithm */
+ std::shared_ptr<HypothesisVerification<PointInT, PointInT>> hv_algorithm_;
+
+ /** \brief Descriptor name */
+ std::string descr_name_;
+
+ int ICP_iterations_;
+
+ bool noisify_;
+ float noise_;
+
+ class flann_model {
+ public:
+ ModelT model;
+ int view_id;
+ int descriptor_id;
+ std::vector<float> descr;
+
+ bool
+ operator<(const flann_model& other) const
+ {
+ if ((this->model.id_.compare(other.model.id_) < 0)) {
+ return true;
+ }
+
+ if (this->model.id_ == other.model.id_) {
+ // check view id
+ if ((this->view_id < other.view_id)) {
+ return true;
}
+ if (this->view_id == other.view_id) {
+ if (this->descriptor_id < other.descriptor_id) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ bool
+ operator==(const flann_model& other) const
+ {
+ return (model.id_ == other.model.id_) && (view_id == other.view_id) &&
+ (descriptor_id == other.descriptor_id);
+ }
+ };
+
+ flann::Matrix<float> flann_data_;
+ flann::Index<DistT>* flann_index_;
+ std::vector<flann_model> flann_models_;
+
+ std::vector<flann::Matrix<float>> single_categories_data_;
+ std::vector<flann::Index<DistT>*> single_categories_index_;
+ std::vector<std::shared_ptr<std::vector<int>>> single_categories_pointers_to_models_;
+ std::map<std::string, int> category_to_vectors_indices_;
+ std::vector<std::string> categories_to_be_searched_;
+ bool use_single_categories_;
+
+ bool use_cache_;
+ std::map<std::pair<std::string, int>,
+ Eigen::Matrix4f,
+ std::less<>,
+ Eigen::aligned_allocator<
+ std::pair<const std::pair<std::string, int>, Eigen::Matrix4f>>>
+ poses_cache_;
+ std::map<std::pair<std::string, int>, Eigen::Vector3f> centroids_cache_;
+
+ std::vector<int> indices_;
+
+ bool compute_scale_;
+
+ // load features from disk and create flann structure
+ void
+ loadFeaturesAndCreateFLANN();
+
+ inline void
+ convertToFLANN(const std::vector<flann_model>& models, flann::Matrix<float>& data)
+ {
+ data.rows = models.size();
+ data.cols = models[0].descr.size(); // number of histogram bins
- inline void
- convertToFLANN (const std::vector<flann_model> &models, const std::shared_ptr<std::vector<int>>& indices, flann::Matrix<float> &data)
- {
- data.rows = indices->size ();
- data.cols = models[0].descr.size (); // number of histogram bins
+ flann::Matrix<float> flann_data(new float[models.size() * models[0].descr.size()],
+ models.size(),
+ models[0].descr.size());
- flann::Matrix<float> flann_data(new float[indices->size () * models[0].descr.size ()],indices->size(),models[0].descr.size ());
+ for (std::size_t i = 0; i < data.rows; ++i)
+ for (std::size_t j = 0; j < data.cols; ++j) {
+ flann_data.ptr()[i * data.cols + j] = models[i].descr[j];
+ }
- for (std::size_t i = 0; i < data.rows; ++i)
- for (std::size_t j = 0; j < data.cols; ++j)
- {
- flann_data.ptr()[i * data.cols + j] = models[indices->at(i)].descr[j];
- }
+ data = flann_data;
+ }
- data = flann_data;
- }
+ inline void
+ convertToFLANN(const std::vector<flann_model>& models,
+ const std::shared_ptr<std::vector<int>>& indices,
+ flann::Matrix<float>& data)
+ {
+ data.rows = indices->size();
+ data.cols = models[0].descr.size(); // number of histogram bins
- void
- nearestKSearch (flann::Index<DistT> * index, const flann_model &model, int k, flann::Matrix<int> &indices, flann::Matrix<float> &distances);
+ flann::Matrix<float> flann_data(new float[indices->size() * models[0].descr.size()],
+ indices->size(),
+ models[0].descr.size());
- void
- getPose (ModelT & model, int view_id, Eigen::Matrix4f & pose_matrix);
+ for (std::size_t i = 0; i < data.rows; ++i)
+ for (std::size_t j = 0; j < data.cols; ++j) {
+ flann_data.ptr()[i * data.cols + j] = models[indices->at(i)].descr[j];
+ }
- bool
- getRollPose (ModelT & model, int view_id, int d_id, Eigen::Matrix4f & pose_matrix);
+ data = flann_data;
+ }
- void
- getCentroid (ModelT & model, int view_id, int d_id, Eigen::Vector3f & centroid);
+ void
+ nearestKSearch(flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances);
- void
- getView (ModelT & model, int view_id, PointInTPtr & view);
+ void
+ getPose(ModelT& model, int view_id, Eigen::Matrix4f& pose_matrix);
- int NN_;
+ bool
+ getRollPose(ModelT& model, int view_id, int d_id, Eigen::Matrix4f& pose_matrix);
- std::shared_ptr<std::vector<ModelT>> models_;
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>> transforms_;
+ void
+ getCentroid(ModelT& model, int view_id, int d_id, Eigen::Vector3f& centroid);
- std::vector<float> descriptor_distances_;
+ void
+ getView(ModelT& model, int view_id, PointInTPtr& view);
- public:
+ int NN_;
- GlobalNNCVFHRecognizer ()
- {
- ICP_iterations_ = 0;
- noisify_ = false;
- compute_scale_ = false;
- use_single_categories_ = false;
- }
+ std::shared_ptr<std::vector<ModelT>> models_;
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ transforms_;
- ~GlobalNNCVFHRecognizer ()
- {
- }
+ std::vector<float> descriptor_distances_;
- void
- getDescriptorDistances (std::vector<float> & ds)
- {
- ds = descriptor_distances_;
- }
+public:
+ GlobalNNCVFHRecognizer()
+ {
+ ICP_iterations_ = 0;
+ noisify_ = false;
+ compute_scale_ = false;
+ use_single_categories_ = false;
+ }
- void
- setComputeScale (bool d)
- {
- compute_scale_ = d;
- }
+ ~GlobalNNCVFHRecognizer() {}
- void
- setCategoriesToUseForRecognition (std::vector<std::string> & cats_to_use)
- {
- categories_to_be_searched_.clear ();
- categories_to_be_searched_ = cats_to_use;
- }
+ void
+ getDescriptorDistances(std::vector<float>& ds)
+ {
+ ds = descriptor_distances_;
+ }
- void setUseSingleCategories(bool b) {
- use_single_categories_ = b;
- }
+ void
+ setComputeScale(bool d)
+ {
+ compute_scale_ = d;
+ }
- void
- setNoise (float n)
- {
- noisify_ = true;
- noise_ = n;
- }
+ void
+ setCategoriesToUseForRecognition(std::vector<std::string>& cats_to_use)
+ {
+ categories_to_be_searched_.clear();
+ categories_to_be_searched_ = cats_to_use;
+ }
- void
- setNN (int nn)
- {
- NN_ = nn;
- }
+ void
+ setUseSingleCategories(bool b)
+ {
+ use_single_categories_ = b;
+ }
- void
- setICPIterations (int it)
- {
- ICP_iterations_ = it;
- }
+ void
+ setNoise(float n)
+ {
+ noisify_ = true;
+ noise_ = n;
+ }
- /**
- * \brief Initializes the FLANN structure from the provided source
- */
+ void
+ setNN(int nn)
+ {
+ NN_ = nn;
+ }
- void
- initialize (bool force_retrain = false);
+ void
+ setICPIterations(int it)
+ {
+ ICP_iterations_ = it;
+ }
- /**
- * \brief Sets the model data source_
- */
- void
- setDataSource (std::shared_ptr<Source<PointInT>>& source)
- {
- source_ = source;
- }
+ /**
+ * \brief Initializes the FLANN structure from the provided source
+ */
- /**
- * \brief Sets the model data source_
- */
+ void
+ initialize(bool force_retrain = false);
- void
- setFeatureEstimator (std::shared_ptr<OURCVFHEstimator<PointInT, FeatureT>>& feat)
- {
- micvfh_estimator_ = feat;
- }
+ /**
+ * \brief Sets the model data source_
+ */
+ void
+ setDataSource(std::shared_ptr<Source<PointInT>>& source)
+ {
+ source_ = source;
+ }
- /**
- * \brief Sets the HV algorithm
- */
- void
- setHVAlgorithm (std::shared_ptr<HypothesisVerification<PointInT, PointInT>>& alg)
- {
- hv_algorithm_ = alg;
- }
+ /**
+ * \brief Sets the model data source_
+ */
- void
- setIndices (std::vector<int> & indices)
- {
- indices_ = indices;
- }
+ void
+ setFeatureEstimator(std::shared_ptr<OURCVFHEstimator<PointInT, FeatureT>>& feat)
+ {
+ micvfh_estimator_ = feat;
+ }
- /**
- * \brief Sets the input cloud to be classified
- */
- void
- setInputCloud (const PointInTPtr & cloud)
- {
- input_ = cloud;
- }
+ /**
+ * \brief Sets the HV algorithm
+ */
+ void
+ setHVAlgorithm(std::shared_ptr<HypothesisVerification<PointInT, PointInT>>& alg)
+ {
+ hv_algorithm_ = alg;
+ }
- void
- setDescriptorName (std::string & name)
- {
- descr_name_ = name;
- }
+ void
+ setIndices(std::vector<int>& indices)
+ {
+ indices_ = indices;
+ }
- void
- setTrainingDir (std::string & dir)
- {
- training_dir_ = dir;
- }
+ /**
+ * \brief Sets the input cloud to be classified
+ */
+ void
+ setInputCloud(const PointInTPtr& cloud)
+ {
+ input_ = cloud;
+ }
- /**
- * \brief Performs recognition on the input cloud
- */
+ void
+ setDescriptorName(std::string& name)
+ {
+ descr_name_ = name;
+ }
- void
- recognize ();
+ void
+ setTrainingDir(std::string& dir)
+ {
+ training_dir_ = dir;
+ }
- std::shared_ptr<std::vector<ModelT>>
- getModels ()
- {
- return models_;
- }
+ /**
+ * \brief Performs recognition on the input cloud
+ */
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
- getTransforms ()
- {
- return transforms_;
- }
+ void
+ recognize();
- void
- setUseCache (bool u)
- {
- use_cache_ = u;
- }
+ std::shared_ptr<std::vector<ModelT>>
+ getModels()
+ {
+ return models_;
+ }
- };
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ getTransforms()
+ {
+ return transforms_;
}
-}
+
+ void
+ setUseCache(bool u)
+ {
+ use_cache_ = u;
+ }
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <pcl/apps/3d_rec_framework/pipeline/global_nn_classifier.h>
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::loadFeaturesAndCreateFLANN ()
- {
- auto models = source_->getModels ();
- for (std::size_t i = 0; i < models->size (); i++)
- {
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- for (const auto& dir_entry : bf::directory_iterator(path))
- {
- std::string file_name = (dir_entry.path ().filename ()).string();
-
- std::vector < std::string > strs;
- boost::split (strs, file_name, boost::is_any_of ("_"));
-
- if (strs[0] == "descriptor")
- {
- std::string full_file_name = dir_entry.path ().string ();
- std::vector < std::string > strs;
- boost::split (strs, full_file_name, boost::is_any_of ("/"));
-
- typename pcl::PointCloud<FeatureT>::Ptr signature (new pcl::PointCloud<FeatureT>);
- pcl::io::loadPCDFile (full_file_name, *signature);
-
- flann_model descr_model;
- descr_model.first = models->at (i);
- int size_feat = sizeof(signature->points[0].histogram) / sizeof(float);
- descr_model.second.resize (size_feat);
- memcpy (&descr_model.second[0], &signature->points[0].histogram[0], size_feat * sizeof(float));
-
- flann_models_.push_back (descr_model);
- }
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::
+ loadFeaturesAndCreateFLANN()
+{
+ auto models = source_->getModels();
+ for (std::size_t i = 0; i < models->size(); i++) {
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ for (const auto& dir_entry : bf::directory_iterator(path)) {
+ std::string file_name = (dir_entry.path().filename()).string();
+
+ std::vector<std::string> strs;
+ boost::split(strs, file_name, boost::is_any_of("_"));
+
+ if (strs[0] == "descriptor") {
+ std::string full_file_name = dir_entry.path().string();
+ std::vector<std::string> strs;
+ boost::split(strs, full_file_name, boost::is_any_of("/"));
+
+ typename pcl::PointCloud<FeatureT>::Ptr signature(
+ new pcl::PointCloud<FeatureT>);
+ pcl::io::loadPCDFile(full_file_name, *signature);
+
+ flann_model descr_model;
+ descr_model.first = models->at(i);
+ int size_feat = sizeof((*signature)[0].histogram) / sizeof(float);
+ descr_model.second.resize(size_feat);
+ memcpy(&descr_model.second[0],
+ &(*signature)[0].histogram[0],
+ size_feat * sizeof(float));
+
+ flann_models_.push_back(descr_model);
}
}
-
- convertToFLANN (flann_models_, flann_data_);
- flann_index_ = new flann::Index<DistT> (flann_data_, flann::LinearIndexParams ());
- flann_index_->buildIndex ();
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::nearestKSearch (flann::Index<DistT> * index, const flann_model &model,
- int k, flann::Matrix<int> &indices,
- flann::Matrix<float> &distances)
- {
- flann::Matrix<float> p = flann::Matrix<float> (new float[model.second.size ()], 1, model.second.size ());
- memcpy (&p.ptr ()[0], &model.second[0], p.cols * p.rows * sizeof(float));
-
- indices = flann::Matrix<int> (new int[k], 1, k);
- distances = flann::Matrix<float> (new float[k], 1, k);
- index->knnSearch (p, indices, distances, k, flann::SearchParams (512));
- delete[] p.ptr ();
+ convertToFLANN(flann_models_, flann_data_);
+ flann_index_ = new flann::Index<DistT>(flann_data_, flann::LinearIndexParams());
+ flann_index_->buildIndex();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::nearestKSearch(
+ flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances)
+{
+ flann::Matrix<float> p =
+ flann::Matrix<float>(new float[model.second.size()], 1, model.second.size());
+ memcpy(&p.ptr()[0], &model.second[0], p.cols * p.rows * sizeof(float));
+
+ indices = flann::Matrix<int>(new int[k], 1, k);
+ distances = flann::Matrix<float>(new float[k], 1, k);
+ index->knnSearch(p, indices, distances, k, flann::SearchParams(512));
+ delete[] p.ptr();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::classify()
+{
+
+ categories_.clear();
+ confidences_.clear();
+
+ first_nn_category_ = std::string("");
+
+ PointInTPtr processed(new pcl::PointCloud<PointInT>);
+ PointInTPtr in(new pcl::PointCloud<PointInT>);
+
+ typename pcl::PointCloud<FeatureT>::CloudVectorType signatures;
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>> centroids;
+
+ if (!indices_.empty()) {
+ pcl::copyPointCloud(*input_, indices_, *in);
+ }
+ else {
+ in = input_;
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::classify ()
- {
-
- categories_.clear ();
- confidences_.clear ();
-
- first_nn_category_ = std::string ("");
-
- PointInTPtr processed (new pcl::PointCloud<PointInT>);
- PointInTPtr in (new pcl::PointCloud<PointInT>);
-
- typename pcl::PointCloud<FeatureT>::CloudVectorType signatures;
- std::vector < Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > centroids;
-
- if (!indices_.empty ())
- {
- pcl::copyPointCloud (*input_, indices_, *in);
- }
- else
- {
- in = input_;
- }
-
- estimator_->estimate (in, processed, signatures, centroids);
- std::vector<index_score> indices_scores;
-
- if (!signatures.empty ())
- {
- for (std::size_t idx = 0; idx < signatures.size (); idx++)
- {
- float* hist = signatures[idx].points[0].histogram;
- int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(float);
- std::vector<float> std_hist (hist, hist + size_feat);
- ModelT empty;
-
- flann_model histogram (empty, std_hist);
- flann::Matrix<int> indices;
- flann::Matrix<float> distances;
- nearestKSearch (flann_index_, histogram, NN_, indices, distances);
-
- //gather NN-search results
- for (int i = 0; i < NN_; ++i)
- {
- index_score is;
- is.idx_models_ = indices[0][i];
- is.idx_input_ = static_cast<int> (idx);
- is.score_ = distances[0][i];
- indices_scores.push_back (is);
- }
+ estimator_->estimate(in, processed, signatures, centroids);
+ std::vector<index_score> indices_scores;
+
+ if (!signatures.empty()) {
+ for (std::size_t idx = 0; idx < signatures.size(); idx++) {
+ float* hist = signatures[idx][0].histogram;
+ int size_feat = sizeof(signatures[idx][0].histogram) / sizeof(float);
+ std::vector<float> std_hist(hist, hist + size_feat);
+ ModelT empty;
+
+ flann_model histogram(empty, std_hist);
+ flann::Matrix<int> indices;
+ flann::Matrix<float> distances;
+ nearestKSearch(flann_index_, histogram, NN_, indices, distances);
+
+ // gather NN-search results
+ for (int i = 0; i < NN_; ++i) {
+ index_score is;
+ is.idx_models_ = indices[0][i];
+ is.idx_input_ = static_cast<int>(idx);
+ is.score_ = distances[0][i];
+ indices_scores.push_back(is);
}
+ }
- std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);
- first_nn_category_ = flann_models_[indices_scores[0].idx_models_].first.class_;
+ std::sort(indices_scores.begin(), indices_scores.end(), sortIndexScoresOp);
+ first_nn_category_ = flann_models_[indices_scores[0].idx_models_].first.class_;
- std::map<std::string, int> category_map;
- int num_n = std::min (NN_, static_cast<int> (indices_scores.size ()));
+ std::map<std::string, int> category_map;
+ int num_n = std::min(NN_, static_cast<int>(indices_scores.size()));
- for (int i = 0; i < num_n; ++i)
- {
- std::string cat = flann_models_[indices_scores[i].idx_models_].first.class_;
- auto it = category_map.find (cat);
- if (it == category_map.end ())
- {
- category_map[cat] = 1;
- }
- else
- {
- it->second++;
- }
+ for (int i = 0; i < num_n; ++i) {
+ std::string cat = flann_models_[indices_scores[i].idx_models_].first.class_;
+ auto it = category_map.find(cat);
+ if (it == category_map.end()) {
+ category_map[cat] = 1;
}
-
- for (const auto &category : category_map)
- {
- float prob = static_cast<float> (category.second) / static_cast<float> (num_n);
- categories_.push_back (category.first);
- confidences_.push_back (prob);
+ else {
+ it->second++;
}
-
}
- else
- {
- first_nn_category_ = std::string ("error");
- categories_.push_back (first_nn_category_);
+
+ for (const auto& category : category_map) {
+ float prob = static_cast<float>(category.second) / static_cast<float>(num_n);
+ categories_.push_back(category.first);
+ confidences_.push_back(prob);
}
}
-
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::initialize (bool force_retrain)
- {
-
- //use the source to know what has to be trained and what not, checking if the descr_name directory exists
- //unless force_retrain is true, then train everything
- auto models = source_->getModels ();
- std::cout << "Models size:" << models->size () << std::endl;
-
- if (force_retrain)
- {
- for (std::size_t i = 0; i < models->size (); i++)
- {
- source_->removeDescDirectory (models->at (i), training_dir_, descr_name_);
- }
+ else {
+ first_nn_category_ = std::string("error");
+ categories_.push_back(first_nn_category_);
+ }
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNPipeline<Distance, PointInT, FeatureT>::initialize(
+ bool force_retrain)
+{
+
+ // use the source to know what has to be trained and what not, checking if the
+ // descr_name directory exists unless force_retrain is true, then train everything
+ auto models = source_->getModels();
+ std::cout << "Models size:" << models->size() << std::endl;
+
+ if (force_retrain) {
+ for (std::size_t i = 0; i < models->size(); i++) {
+ source_->removeDescDirectory(models->at(i), training_dir_, descr_name_);
}
+ }
- for (std::size_t i = 0; i < models->size (); i++)
- {
- if (!source_->modelAlreadyTrained (models->at (i), training_dir_, descr_name_))
- {
- for (std::size_t v = 0; v < models->at (i).views_->size (); v++)
- {
- PointInTPtr processed (new pcl::PointCloud<PointInT>);
- //pro view, compute signatures
- typename pcl::PointCloud<FeatureT>::CloudVectorType signatures;
- std::vector < Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > centroids;
- estimator_->estimate (models->at (i).views_->at (v), processed, signatures, centroids);
-
- //source_->makeModelPersistent (models->at (i), training_dir_, descr_name_, static_cast<int> (v));
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- bf::path desc_dir = path;
- if (!bf::exists (desc_dir))
- bf::create_directory (desc_dir);
-
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary (path_view.str (), *processed);
-
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile (path_pose.str (), models->at (i).poses_->at (v));
-
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile (path_entropy.str (), models->at (i).self_occlusions_->at (v));
-
- //save signatures and centroids to disk
- for (std::size_t j = 0; j < signatures.size (); j++)
- {
- std::stringstream path_centroid;
- path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
- Eigen::Vector3f centroid (centroids[j][0], centroids[j][1], centroids[j][2]);
- PersistenceUtils::writeCentroidToFile (path_centroid.str (), centroid);
-
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary (path_descriptor.str (), signatures[j]);
- }
+ for (std::size_t i = 0; i < models->size(); i++) {
+ if (!source_->modelAlreadyTrained(models->at(i), training_dir_, descr_name_)) {
+ for (std::size_t v = 0; v < models->at(i).views_->size(); v++) {
+ PointInTPtr processed(new pcl::PointCloud<PointInT>);
+ // pro view, compute signatures
+ typename pcl::PointCloud<FeatureT>::CloudVectorType signatures;
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>
+ centroids;
+ estimator_->estimate(
+ models->at(i).views_->at(v), processed, signatures, centroids);
+
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ bf::path desc_dir = path;
+ if (!bf::exists(desc_dir))
+ bf::create_directory(desc_dir);
+
+ std::stringstream path_view;
+ path_view << path << "/view_" << v << ".pcd";
+ pcl::io::savePCDFileBinary(path_view.str(), *processed);
+
+ std::stringstream path_pose;
+ path_pose << path << "/pose_" << v << ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose.str(),
+ models->at(i).poses_->at(v));
+
+ std::stringstream path_entropy;
+ path_entropy << path << "/entropy_" << v << ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ models->at(i).self_occlusions_->at(v));
+
+ // save signatures and centroids to disk
+ for (std::size_t j = 0; j < signatures.size(); j++) {
+ std::stringstream path_centroid;
+ path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
+ Eigen::Vector3f centroid(centroids[j][0], centroids[j][1], centroids[j][2]);
+ PersistenceUtils::writeCentroidToFile(path_centroid.str(), centroid);
+
+ std::stringstream path_descriptor;
+ path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor.str(), signatures[j]);
}
-
- }
- else
- {
- //else skip model
- std::cout << "The model has already been trained..." << std::endl;
}
}
-
- //load features from disk
- //initialize FLANN structure
- loadFeaturesAndCreateFLANN ();
+ else {
+ // else skip model
+ std::cout << "The model has already been trained..." << std::endl;
+ }
}
+
+ // load features from disk
+ // initialize FLANN structure
+ loadFeaturesAndCreateFLANN();
+}
* Author: aitor
*/
-#include <random>
-
#include <pcl/apps/3d_rec_framework/pipeline/global_nn_recognizer_crh.h>
+#include <pcl/common/time.h>
#include <pcl/recognition/crh_alignment.h>
#include <pcl/registration/icp.h>
-#include <pcl/common/time.h>
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getPose (ModelT & model, int view_id, Eigen::Matrix4f & pose_matrix)
- {
-
- if (use_cache_)
- {
- using mv_pair = std::pair<std::string, int>;
- mv_pair pair_model_view = std::make_pair (model.id_, view_id);
-
- std::map<mv_pair, Eigen::Matrix4f,
- std::less<>,
- Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f> > >::iterator it = poses_cache_.find (pair_model_view);
-
- if (it != poses_cache_.end ())
- {
- pose_matrix = it->second;
- return;
- }
+#include <random>
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getPose(
+ ModelT& model, int view_id, Eigen::Matrix4f& pose_matrix)
+{
+
+ if (use_cache_) {
+ using mv_pair = std::pair<std::string, int>;
+ mv_pair pair_model_view = std::make_pair(model.id_, view_id);
+
+ std::map<mv_pair,
+ Eigen::Matrix4f,
+ std::less<>,
+ Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f>>>::
+ iterator it = poses_cache_.find(pair_model_view);
+
+ if (it != poses_cache_.end()) {
+ pose_matrix = it->second;
+ return;
}
-
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/pose_" << view_id << ".txt";
-
- PersistenceUtils::readMatrixFromFile (dir.str (), pose_matrix);
- }
-
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getCRH (ModelT & model, int view_id, int d_id,
- CRHPointCloud::Ptr & hist)
- {
-
- hist.reset (new CRHPointCloud);
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/crh_" << view_id << "_" << d_id << ".pcd";
-
- pcl::io::loadPCDFile (dir.str (), *hist);
- }
-
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getCentroid (ModelT & model, int view_id, int d_id,
- Eigen::Vector3f & centroid)
- {
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/centroid_" << view_id << "_" << d_id << ".txt";
-
- PersistenceUtils::getCentroidFromFile (dir.str (), centroid);
- }
-
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getView (ModelT & model, int view_id, PointInTPtr & view)
- {
- view.reset (new pcl::PointCloud<PointInT>);
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/view_" << view_id << ".pcd";
- pcl::io::loadPCDFile (dir.str (), *view);
-
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::loadFeaturesAndCreateFLANN ()
- {
- auto models = source_->getModels ();
- for (std::size_t i = 0; i < models->size (); i++)
- {
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- for (const auto& dir_entry : bf::directory_iterator(path))
- {
- std::string file_name = (dir_entry.path ().filename ()).string();
-
- std::vector < std::string > strs;
- boost::split (strs, file_name, boost::is_any_of ("_"));
-
- if (strs[0] == "descriptor")
- {
-
- int view_id = atoi (strs[1].c_str ());
- std::vector < std::string > strs1;
- boost::split (strs1, strs[2], boost::is_any_of ("."));
- int descriptor_id = atoi (strs1[0].c_str ());
-
- std::string full_file_name = dir_entry.path ().string ();
- typename pcl::PointCloud<FeatureT>::Ptr signature (new pcl::PointCloud<FeatureT>);
- pcl::io::loadPCDFile (full_file_name, *signature);
-
- flann_model descr_model;
- descr_model.model = models->at (i);
- descr_model.view_id = view_id;
- descr_model.descriptor_id = descriptor_id;
-
- int size_feat = sizeof(signature->points[0].histogram) / sizeof(float);
- descr_model.descr.resize (size_feat);
- memcpy (&descr_model.descr[0], &signature->points[0].histogram[0], size_feat * sizeof(float));
-
- flann_models_.push_back (descr_model);
-
- if (use_cache_)
- {
-
- std::stringstream dir_pose;
- dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
-
- Eigen::Matrix4f pose_matrix;
- PersistenceUtils::readMatrixFromFile (dir_pose.str (), pose_matrix);
-
- std::pair<std::string, int> pair_model_view = std::make_pair (models->at (i).id_, descr_model.view_id);
- poses_cache_[pair_model_view] = pose_matrix;
- }
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/pose_" << view_id << ".txt";
+
+ PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getCRH(
+ ModelT& model, int view_id, int d_id, CRHPointCloud::Ptr& hist)
+{
+
+ hist.reset(new CRHPointCloud);
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/crh_" << view_id << "_" << d_id << ".pcd";
+
+ pcl::io::loadPCDFile(dir.str(), *hist);
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getCentroid(
+ ModelT& model, int view_id, int d_id, Eigen::Vector3f& centroid)
+{
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/centroid_" << view_id << "_" << d_id << ".txt";
+
+ PersistenceUtils::getCentroidFromFile(dir.str(), centroid);
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::getView(
+ ModelT& model, int view_id, PointInTPtr& view)
+{
+ view.reset(new pcl::PointCloud<PointInT>);
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/view_" << view_id << ".pcd";
+ pcl::io::loadPCDFile(dir.str(), *view);
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::
+ loadFeaturesAndCreateFLANN()
+{
+ auto models = source_->getModels();
+ for (std::size_t i = 0; i < models->size(); i++) {
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ for (const auto& dir_entry : bf::directory_iterator(path)) {
+ std::string file_name = (dir_entry.path().filename()).string();
+
+ std::vector<std::string> strs;
+ boost::split(strs, file_name, boost::is_any_of("_"));
+
+ if (strs[0] == "descriptor") {
+
+ int view_id = atoi(strs[1].c_str());
+ std::vector<std::string> strs1;
+ boost::split(strs1, strs[2], boost::is_any_of("."));
+ int descriptor_id = atoi(strs1[0].c_str());
+
+ std::string full_file_name = dir_entry.path().string();
+ typename pcl::PointCloud<FeatureT>::Ptr signature(
+ new pcl::PointCloud<FeatureT>);
+ pcl::io::loadPCDFile(full_file_name, *signature);
+
+ flann_model descr_model;
+ descr_model.model = models->at(i);
+ descr_model.view_id = view_id;
+ descr_model.descriptor_id = descriptor_id;
+
+ int size_feat = sizeof((*signature)[0].histogram) / sizeof(float);
+ descr_model.descr.resize(size_feat);
+ memcpy(&descr_model.descr[0],
+ &(*signature)[0].histogram[0],
+ size_feat * sizeof(float));
+
+ flann_models_.push_back(descr_model);
+
+ if (use_cache_) {
+
+ std::stringstream dir_pose;
+ dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
+
+ Eigen::Matrix4f pose_matrix;
+ PersistenceUtils::readMatrixFromFile(dir_pose.str(), pose_matrix);
+
+ std::pair<std::string, int> pair_model_view =
+ std::make_pair(models->at(i).id_, descr_model.view_id);
+ poses_cache_[pair_model_view] = pose_matrix;
}
}
}
-
- convertToFLANN (flann_models_, flann_data_);
- flann_index_ = new flann::Index<DistT> (flann_data_, flann::LinearIndexParams ());
- flann_index_->buildIndex ();
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::nearestKSearch (flann::Index<DistT> * index, const flann_model &model,
- int k, flann::Matrix<int> &indices,
- flann::Matrix<float> &distances)
- {
- flann::Matrix<float> p = flann::Matrix<float> (new float[model.descr.size ()], 1, model.descr.size ());
- memcpy (&p.ptr ()[0], &model.descr[0], p.cols * p.rows * sizeof(float));
-
- indices = flann::Matrix<int> (new int[k], 1, k);
- distances = flann::Matrix<float> (new float[k], 1, k);
- index->knnSearch (p, indices, distances, k, flann::SearchParams (512));
- delete[] p.ptr ();
- }
+ convertToFLANN(flann_models_, flann_data_);
+ flann_index_ = new flann::Index<DistT>(flann_data_, flann::LinearIndexParams());
+ flann_index_->buildIndex();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::
+ nearestKSearch(flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances)
+{
+ flann::Matrix<float> p =
+ flann::Matrix<float>(new float[model.descr.size()], 1, model.descr.size());
+ memcpy(&p.ptr()[0], &model.descr[0], p.cols * p.rows * sizeof(float));
+
+ indices = flann::Matrix<int>(new int[k], 1, k);
+ distances = flann::Matrix<float>(new float[k], 1, k);
+ index->knnSearch(p, indices, distances, k, flann::SearchParams(512));
+ delete[] p.ptr();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::recognize()
+{
+
+ models_.reset(new std::vector<ModelT>);
+ transforms_.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
+
+ PointInTPtr processed(new pcl::PointCloud<PointInT>);
+ PointInTPtr in(new pcl::PointCloud<PointInT>);
+
+ std::vector<pcl::PointCloud<FeatureT>,
+ Eigen::aligned_allocator<pcl::PointCloud<FeatureT>>>
+ signatures;
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>> centroids;
+
+ if (!indices_.empty())
+ pcl::copyPointCloud(*input_, indices_, *in);
+ else
+ in = input_;
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::recognize ()
{
+ pcl::ScopeTime t("Estimate feature");
+ crh_estimator_->estimate(in, processed, signatures, centroids);
+ }
- models_.reset (new std::vector<ModelT>);
- transforms_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
-
- PointInTPtr processed (new pcl::PointCloud<PointInT>);
- PointInTPtr in (new pcl::PointCloud<PointInT>);
-
- std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > signatures;
- std::vector < Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > centroids;
-
- if (!indices_.empty ())
- pcl::copyPointCloud (*input_, indices_, *in);
- else
- in = input_;
-
- {
- pcl::ScopeTime t ("Estimate feature");
- crh_estimator_->estimate (in, processed, signatures, centroids);
- }
+ std::vector<CRHPointCloud::Ptr> crh_histograms;
+ crh_estimator_->getCRHHistograms(crh_histograms);
- std::vector<CRHPointCloud::Ptr> crh_histograms;
- crh_estimator_->getCRHHistograms (crh_histograms);
+ std::vector<index_score> indices_scores;
+ if (!signatures.empty()) {
- std::vector<index_score> indices_scores;
- if (!signatures.empty ())
{
-
- {
- pcl::ScopeTime t_matching ("Matching and roll...");
- for (std::size_t idx = 0; idx < signatures.size (); idx++)
- {
-
- float* hist = signatures[idx].points[0].histogram;
- int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(float);
- std::vector<float> std_hist (hist, hist + size_feat);
- ModelT empty;
-
- flann_model histogram;
- histogram.descr = std_hist;
-
- flann::Matrix<int> indices;
- flann::Matrix<float> distances;
- nearestKSearch (flann_index_, histogram, NN_, indices, distances);
-
- //gather NN-search results
- for (int i = 0; i < NN_; ++i)
- {
- index_score is;
- is.idx_models_ = indices[0][i];
- is.idx_input_ = static_cast<int> (idx);
- is.score_ = distances[0][i];
- indices_scores.push_back (is);
- }
+ pcl::ScopeTime t_matching("Matching and roll...");
+ for (std::size_t idx = 0; idx < signatures.size(); idx++) {
+
+ float* hist = signatures[idx][0].histogram;
+ int size_feat = sizeof(signatures[idx][0].histogram) / sizeof(float);
+ std::vector<float> std_hist(hist, hist + size_feat);
+ ModelT empty;
+
+ flann_model histogram;
+ histogram.descr = std_hist;
+
+ flann::Matrix<int> indices;
+ flann::Matrix<float> distances;
+ nearestKSearch(flann_index_, histogram, NN_, indices, distances);
+
+ // gather NN-search results
+ for (int i = 0; i < NN_; ++i) {
+ index_score is;
+ is.idx_models_ = indices[0][i];
+ is.idx_input_ = static_cast<int>(idx);
+ is.score_ = distances[0][i];
+ indices_scores.push_back(is);
}
+ }
- std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);
+ std::sort(indices_scores.begin(), indices_scores.end(), sortIndexScoresOp);
- int num_n = std::min (NN_, static_cast<int> (indices_scores.size ()));
+ int num_n = std::min(NN_, static_cast<int>(indices_scores.size()));
+ if (do_CRH_) {
/*
- * Filter some hypothesis regarding to their distance to the first neighbour
+ * Once we have the models, we need to find a 6DOF pose using the roll histogram
+ * pass to pcl_recognition::CRHAlignment both views, centroids and CRH
*/
- /*std::vector<index_score> indices_scores_filtered;
- indices_scores_filtered.resize (num_n);
- indices_scores_filtered[0] = indices_scores[0];
-
- float best_score = indices_scores[0].score_;
- int kept = 1;
- for (int i = 1; i < num_n; ++i)
- {
- std::cout << best_score << indices_scores[i].score_ << (best_score / indices_scores[i].score_) << std::endl;
- if ((best_score / indices_scores[i].score_) > 0.75)
- {
- indices_scores_filtered[i] = indices_scores[i];
- kept++;
- }
+ pcl::CRHAlignment<PointInT, 90> crha;
- //best_score = indices_scores[i].score_;
- }
+ for (int i = 0; i < num_n; ++i) {
+ ModelT m = flann_models_[indices_scores[i].idx_models_].model;
+ int view_id = flann_models_[indices_scores[i].idx_models_].view_id;
+ int desc_id = flann_models_[indices_scores[i].idx_models_].descriptor_id;
- indices_scores_filtered.resize (kept);
- std::cout << indices_scores_filtered.size () << " § " << num_n << std::endl;
+ std::cout << m.id_ << " " << view_id << " " << desc_id << std::endl;
- indices_scores = indices_scores_filtered;
- num_n = indices_scores.size ();*/
+ // get crhs
+ CRHPointCloud::Ptr input_crh = crh_histograms[indices_scores[i].idx_input_];
+ CRHPointCloud::Ptr view_crh;
+ getCRH(m, view_id, desc_id, view_crh);
- if (do_CRH_)
- {
- /*
- * Once we have the models, we need to find a 6DOF pose using the roll histogram
- * pass to pcl_recognition::CRHAlignment both views, centroids and CRH
- */
+ // get centroids
+ Eigen::Vector3f input_centroid = centroids[indices_scores[i].idx_input_];
+ Eigen::Vector3f view_centroid;
+ getCentroid(m, view_id, desc_id, view_centroid);
- pcl::CRHAlignment<PointInT, 90> crha;
+ // crha.setModelAndInputView (view, processed);
+ crha.setInputAndTargetCentroids(view_centroid, input_centroid);
+ crha.align(*view_crh, *input_crh);
- for (int i = 0; i < num_n; ++i)
- {
- ModelT m = flann_models_[indices_scores[i].idx_models_].model;
- int view_id = flann_models_[indices_scores[i].idx_models_].view_id;
- int desc_id = flann_models_[indices_scores[i].idx_models_].descriptor_id;
+ Eigen::Matrix4f model_view_pose;
+ getPose(m, view_id, model_view_pose);
- std::cout << m.id_ << " " << view_id << " " << desc_id << std::endl;
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>
+ roll_transforms;
+ crha.getTransforms(roll_transforms);
- //get crhs
- CRHPointCloud::Ptr input_crh = crh_histograms[indices_scores[i].idx_input_];
- CRHPointCloud::Ptr view_crh;
- getCRH (m, view_id, desc_id, view_crh);
-
- //get centroids
- Eigen::Vector3f input_centroid = centroids[indices_scores[i].idx_input_];
- Eigen::Vector3f view_centroid;
- getCentroid (m, view_id, desc_id, view_centroid);
-
- //crha.setModelAndInputView (view, processed);
- crha.setInputAndTargetCentroids (view_centroid, input_centroid);
- crha.align (*view_crh, *input_crh);
-
- Eigen::Matrix4f model_view_pose;
- getPose (m, view_id, model_view_pose);
-
- std::vector < Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > roll_transforms;
- crha.getTransforms (roll_transforms);
-
- //create object hypothesis
- for (const auto &roll_transform : roll_transforms)
- {
- Eigen::Matrix4f final_roll_trans (roll_transform * model_view_pose);
- models_->push_back (m);
- transforms_->push_back (final_roll_trans);
- }
+ // create object hypothesis
+ for (const auto& roll_transform : roll_transforms) {
+ Eigen::Matrix4f final_roll_trans(roll_transform * model_view_pose);
+ models_->push_back(m);
+ transforms_->push_back(final_roll_trans);
}
}
- else
- {
- for (int i = 0; i < num_n; ++i)
- {
- ModelT m = flann_models_[indices_scores[i].idx_models_].model;
- models_->push_back (m);
- }
+ }
+ else {
+ for (int i = 0; i < num_n; ++i) {
+ ModelT m = flann_models_[indices_scores[i].idx_models_].model;
+ models_->push_back(m);
}
}
+ }
- std::cout << "Number of object hypotheses:" << models_->size () << std::endl;
+ std::cout << "Number of object hypotheses:" << models_->size() << std::endl;
- /**
- * POSE REFINEMENT
- **/
+ /**
+ * POSE REFINEMENT
+ **/
- if (ICP_iterations_ > 0)
- {
- pcl::ScopeTime t ("Pose refinement");
+ if (ICP_iterations_ > 0) {
+ pcl::ScopeTime t("Pose refinement");
- //Prepare scene and model clouds for the pose refinement step
- float VOXEL_SIZE_ICP_ = 0.005f;
- PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ());
- pcl::VoxelGrid<PointInT> voxel_grid_icp;
- voxel_grid_icp.setInputCloud (processed);
- voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
- voxel_grid_icp.filter (*cloud_voxelized_icp);
- source_->voxelizeAllModels (VOXEL_SIZE_ICP_);
+ // Prepare scene and model clouds for the pose refinement step
+ float VOXEL_SIZE_ICP_ = 0.005f;
+ PointInTPtr cloud_voxelized_icp(new pcl::PointCloud<PointInT>());
+ pcl::VoxelGrid<PointInT> voxel_grid_icp;
+ voxel_grid_icp.setInputCloud(processed);
+ voxel_grid_icp.setLeafSize(VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
+ voxel_grid_icp.filter(*cloud_voxelized_icp);
+ source_->voxelizeAllModels(VOXEL_SIZE_ICP_);
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(VOXEL_SIZE_ICP_, cloud_voxelized_icp) \
num_threads(omp_get_num_procs())
- for (int i = 0; i < static_cast<int> (models_->size ()); i++)
- {
-
- ConstPointInTPtr model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
- PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
-
- pcl::IterativeClosestPoint<PointInT, PointInT> reg;
- reg.setInputSource (model_aligned); //model
- reg.setInputTarget (cloud_voxelized_icp); //scene
- reg.setMaximumIterations (ICP_iterations_);
- reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 3.f);
- reg.setTransformationEpsilon (1e-5);
-
- typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ());
- reg.align (*output_);
-
- Eigen::Matrix4f icp_trans = reg.getFinalTransformation ();
- transforms_->at (i) = icp_trans * transforms_->at (i);
- }
+ // clang-format on
+ for (int i = 0; i < static_cast<int>(models_->size()); i++) {
+
+ ConstPointInTPtr model_cloud = models_->at(i).getAssembled(VOXEL_SIZE_ICP_);
+ PointInTPtr model_aligned(new pcl::PointCloud<PointInT>);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms_->at(i));
+
+ pcl::IterativeClosestPoint<PointInT, PointInT> reg;
+ reg.setInputSource(model_aligned); // model
+ reg.setInputTarget(cloud_voxelized_icp); // scene
+ reg.setMaximumIterations(ICP_iterations_);
+ reg.setMaxCorrespondenceDistance(VOXEL_SIZE_ICP_ * 3.f);
+ reg.setTransformationEpsilon(1e-5);
+
+ typename pcl::PointCloud<PointInT>::Ptr output_(
+ new pcl::PointCloud<PointInT>());
+ reg.align(*output_);
+
+ Eigen::Matrix4f icp_trans = reg.getFinalTransformation();
+ transforms_->at(i) = icp_trans * transforms_->at(i);
}
+ }
- /**
- * HYPOTHESES VERIFICATION
- **/
-
- if (hv_algorithm_)
- {
+ /**
+ * HYPOTHESES VERIFICATION
+ **/
- pcl::ScopeTime t ("HYPOTHESES VERIFICATION");
+ if (hv_algorithm_) {
- std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
- aligned_models.resize (models_->size ());
+ pcl::ScopeTime t("HYPOTHESES VERIFICATION");
- for (std::size_t i = 0; i < models_->size (); i++)
- {
- ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.005f);
- PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
- aligned_models[i] = model_aligned;
- }
+ std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
+ aligned_models.resize(models_->size());
- std::vector<bool> mask_hv;
- hv_algorithm_->setSceneCloud (input_);
- hv_algorithm_->addModels (aligned_models, true);
- hv_algorithm_->verify ();
- hv_algorithm_->getMask (mask_hv);
+ for (std::size_t i = 0; i < models_->size(); i++) {
+ ConstPointInTPtr model_cloud = models_->at(i).getAssembled(0.005f);
+ PointInTPtr model_aligned(new pcl::PointCloud<PointInT>);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms_->at(i));
+ aligned_models[i] = model_aligned;
+ }
- std::shared_ptr<std::vector<ModelT>> models_temp;
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>> transforms_temp;
+ std::vector<bool> mask_hv;
+ hv_algorithm_->setSceneCloud(input_);
+ hv_algorithm_->addModels(aligned_models, true);
+ hv_algorithm_->verify();
+ hv_algorithm_->getMask(mask_hv);
- models_temp.reset (new std::vector<ModelT>);
- transforms_temp.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
+ std::shared_ptr<std::vector<ModelT>> models_temp;
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ transforms_temp;
- for (std::size_t i = 0; i < models_->size (); i++)
- {
- if (!mask_hv[i])
- continue;
+ models_temp.reset(new std::vector<ModelT>);
+ transforms_temp.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
- models_temp->push_back (models_->at (i));
- transforms_temp->push_back (transforms_->at (i));
- }
+ for (std::size_t i = 0; i < models_->size(); i++) {
+ if (!mask_hv[i])
+ continue;
- models_ = models_temp;
- transforms_ = transforms_temp;
+ models_temp->push_back(models_->at(i));
+ transforms_temp->push_back(transforms_->at(i));
}
+ models_ = models_temp;
+ transforms_ = transforms_temp;
}
}
-
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::initialize (bool force_retrain)
- {
-
- //use the source to know what has to be trained and what not, checking if the descr_name directory exists
- //unless force_retrain is true, then train everything
- auto models = source_->getModels ();
- std::cout << "Models size:" << models->size () << std::endl;
-
- if (force_retrain)
- {
- for (std::size_t i = 0; i < models->size (); i++)
- {
- source_->removeDescDirectory (models->at (i), training_dir_, descr_name_);
- }
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCRHRecognizer<Distance, PointInT, FeatureT>::initialize(
+ bool force_retrain)
+{
+
+ // use the source to know what has to be trained and what not, checking if the
+ // descr_name directory exists unless force_retrain is true, then train everything
+ auto models = source_->getModels();
+ std::cout << "Models size:" << models->size() << std::endl;
+
+ if (force_retrain) {
+ for (std::size_t i = 0; i < models->size(); i++) {
+ source_->removeDescDirectory(models->at(i), training_dir_, descr_name_);
}
+ }
- for (std::size_t i = 0; i < models->size (); i++)
- {
- if (!source_->modelAlreadyTrained (models->at (i), training_dir_, descr_name_))
- {
- for (std::size_t v = 0; v < models->at (i).views_->size (); v++)
- {
- PointInTPtr processed (new pcl::PointCloud<PointInT>);
- PointInTPtr view = models->at (i).views_->at (v);
-
- if (noisify_)
- {
- std::random_device rd;
- std::mt19937 rng(rd());
- std::normal_distribution<float> nd (0.0f, noise_);
- // Noisify each point in the dataset
- for (std::size_t cp = 0; cp < view->points.size (); ++cp)
- view->points[cp].z += nd (rng);
- }
-
- //pro view, compute signatures and CRH
- std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > signatures;
- std::vector < Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > centroids;
- crh_estimator_->estimate (view, processed, signatures, centroids);
-
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- bf::path desc_dir = path;
- if (!bf::exists (desc_dir))
- bf::create_directory (desc_dir);
-
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary (path_view.str (), *processed);
-
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile (path_pose.str (), models->at (i).poses_->at (v));
-
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile (path_entropy.str (), models->at (i).self_occlusions_->at (v));
-
- std::vector<CRHPointCloud::Ptr> crh_histograms;
- crh_estimator_->getCRHHistograms (crh_histograms);
-
- //save signatures and centroids to disk
- for (std::size_t j = 0; j < signatures.size (); j++)
- {
- std::stringstream path_centroid;
- path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
- Eigen::Vector3f centroid (centroids[j][0], centroids[j][1], centroids[j][2]);
- PersistenceUtils::writeCentroidToFile (path_centroid.str (), centroid);
-
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary (path_descriptor.str (), signatures[j]);
-
- std::stringstream path_roll;
- path_roll << path << "/crh_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary (path_roll.str (), *crh_histograms[j]);
- }
+ for (std::size_t i = 0; i < models->size(); i++) {
+ if (!source_->modelAlreadyTrained(models->at(i), training_dir_, descr_name_)) {
+ for (std::size_t v = 0; v < models->at(i).views_->size(); v++) {
+ PointInTPtr processed(new pcl::PointCloud<PointInT>);
+ PointInTPtr view = models->at(i).views_->at(v);
+
+ if (noisify_) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ std::normal_distribution<float> nd(0.0f, noise_);
+ // Noisify each point in the dataset
+ for (std::size_t cp = 0; cp < view->size(); ++cp)
+ (*view)[cp].z += nd(rng);
}
- }
- else
- {
- //else skip model
- std::cout << "The model has already been trained..." << std::endl;
+ // pro view, compute signatures and CRH
+ std::vector<pcl::PointCloud<FeatureT>,
+ Eigen::aligned_allocator<pcl::PointCloud<FeatureT>>>
+ signatures;
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>
+ centroids;
+ crh_estimator_->estimate(view, processed, signatures, centroids);
+
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ bf::path desc_dir = path;
+ if (!bf::exists(desc_dir))
+ bf::create_directory(desc_dir);
+
+ std::stringstream path_view;
+ path_view << path << "/view_" << v << ".pcd";
+ pcl::io::savePCDFileBinary(path_view.str(), *processed);
+
+ std::stringstream path_pose;
+ path_pose << path << "/pose_" << v << ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose.str(),
+ models->at(i).poses_->at(v));
+
+ std::stringstream path_entropy;
+ path_entropy << path << "/entropy_" << v << ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ models->at(i).self_occlusions_->at(v));
+
+ std::vector<CRHPointCloud::Ptr> crh_histograms;
+ crh_estimator_->getCRHHistograms(crh_histograms);
+
+ // save signatures and centroids to disk
+ for (std::size_t j = 0; j < signatures.size(); j++) {
+ std::stringstream path_centroid;
+ path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
+ Eigen::Vector3f centroid(centroids[j][0], centroids[j][1], centroids[j][2]);
+ PersistenceUtils::writeCentroidToFile(path_centroid.str(), centroid);
+
+ std::stringstream path_descriptor;
+ path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor.str(), signatures[j]);
+
+ std::stringstream path_roll;
+ path_roll << path << "/crh_" << v << "_" << j << ".pcd";
+ pcl::io::savePCDFileBinary(path_roll.str(), *crh_histograms[j]);
+ }
}
}
-
- //load features from disk
- //initialize FLANN structure
- loadFeaturesAndCreateFLANN ();
+ else {
+ // else skip model
+ std::cout << "The model has already been trained..." << std::endl;
+ }
}
+
+ // load features from disk
+ // initialize FLANN structure
+ loadFeaturesAndCreateFLANN();
+}
* Author: aitor
*/
-#include <flann/flann.hpp>
#include <pcl/apps/3d_rec_framework/pipeline/global_nn_recognizer_cvfh.h>
-#include <pcl/registration/icp.h>
#include <pcl/common/time.h>
+#include <pcl/registration/icp.h>
#include <pcl/visualization/pcl_visualizer.h>
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::getPose (ModelT & model, int view_id, Eigen::Matrix4f & pose_matrix)
- {
-
- if (use_cache_)
- {
- using mv_pair = std::pair<std::string, int>;
- mv_pair pair_model_view = std::make_pair (model.id_, view_id);
-
- std::map<mv_pair, Eigen::Matrix4f,
- std::less<>,
- Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f> > >::iterator it = poses_cache_.find (pair_model_view);
-
- if (it != poses_cache_.end ())
- {
- pose_matrix = it->second;
- return;
- }
+#include <flann/flann.hpp>
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::getPose(
+ ModelT& model, int view_id, Eigen::Matrix4f& pose_matrix)
+{
+
+ if (use_cache_) {
+ using mv_pair = std::pair<std::string, int>;
+ mv_pair pair_model_view = std::make_pair(model.id_, view_id);
+
+ std::map<mv_pair,
+ Eigen::Matrix4f,
+ std::less<>,
+ Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f>>>::
+ iterator it = poses_cache_.find(pair_model_view);
+
+ if (it != poses_cache_.end()) {
+ pose_matrix = it->second;
+ return;
}
-
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/pose_" << view_id << ".txt";
-
- PersistenceUtils::readMatrixFromFile (dir.str (), pose_matrix);
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- bool
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::getRollPose (ModelT & model, int view_id, int d_id,
- Eigen::Matrix4f & pose_matrix)
- {
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/pose_" << view_id << ".txt";
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
+ PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+}
- dir << path << "/roll_trans_" << view_id << "_" << d_id << ".txt";
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+bool
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::
+ getRollPose(ModelT& model, int view_id, int d_id, Eigen::Matrix4f& pose_matrix)
+{
- bf::path file_path = dir.str ();
- if (bf::exists (file_path))
- {
- PersistenceUtils::readMatrixFromFile (dir.str (), pose_matrix);
- return true;
- }
- return false;
- }
-
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::getCentroid (ModelT & model, int view_id, int d_id,
- Eigen::Vector3f & centroid)
- {
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/centroid_" << view_id << "_" << d_id << ".txt";
-
- PersistenceUtils::getCentroidFromFile (dir.str (), centroid);
- }
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::getView (ModelT & model, int view_id, PointInTPtr & view)
- {
- view.reset (new pcl::PointCloud<PointInT>);
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/view_" << view_id << ".pcd";
- pcl::io::loadPCDFile (dir.str (), *view);
+ dir << path << "/roll_trans_" << view_id << "_" << d_id << ".txt";
+ bf::path file_path = dir.str();
+ if (bf::exists(file_path)) {
+ PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+ return true;
}
-
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::loadFeaturesAndCreateFLANN ()
- {
-
- auto models = source_->getModels ();
-
- std::map<std::string, std::shared_ptr<std::vector<int>>> single_categories;
- if (use_single_categories_)
- {
- for (std::size_t i = 0; i < models->size (); i++)
- {
- std::map<std::string, std::shared_ptr<std::vector<int>>>::iterator it;
- std::string cat_model = models->at (i).class_;
- it = single_categories.find (cat_model);
- if (it == single_categories.end ())
- {
- std::shared_ptr<std::vector<int>> v (new std::vector<int>);
- single_categories[cat_model] = v;
- }
+ return false;
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::
+ getCentroid(ModelT& model, int view_id, int d_id, Eigen::Vector3f& centroid)
+{
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/centroid_" << view_id << "_" << d_id << ".txt";
+
+ PersistenceUtils::getCentroidFromFile(dir.str(), centroid);
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::getView(
+ ModelT& model, int view_id, PointInTPtr& view)
+{
+ view.reset(new pcl::PointCloud<PointInT>);
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/view_" << view_id << ".pcd";
+ pcl::io::loadPCDFile(dir.str(), *view);
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::
+ loadFeaturesAndCreateFLANN()
+{
+
+ auto models = source_->getModels();
+
+ std::map<std::string, std::shared_ptr<std::vector<int>>> single_categories;
+ if (use_single_categories_) {
+ for (std::size_t i = 0; i < models->size(); i++) {
+ std::map<std::string, std::shared_ptr<std::vector<int>>>::iterator it;
+ std::string cat_model = models->at(i).class_;
+ it = single_categories.find(cat_model);
+ if (it == single_categories.end()) {
+ std::shared_ptr<std::vector<int>> v(new std::vector<int>);
+ single_categories[cat_model] = v;
}
}
+ }
- for (std::size_t i = 0; i < models->size (); i++)
- {
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- for (const auto& dir_entry : bf::directory_iterator(path))
- {
- std::string file_name = (dir_entry.path ().filename ()).string();
-
- std::vector < std::string > strs;
- boost::split (strs, file_name, boost::is_any_of ("_"));
-
- if (strs[0] == "descriptor")
- {
-
- int view_id = atoi (strs[1].c_str ());
- std::vector < std::string > strs1;
- boost::split (strs1, strs[2], boost::is_any_of ("."));
- int descriptor_id = atoi (strs1[0].c_str ());
-
- std::string full_file_name = dir_entry.path ().string ();
- typename pcl::PointCloud<FeatureT>::Ptr signature (new pcl::PointCloud<FeatureT>);
- pcl::io::loadPCDFile (full_file_name, *signature);
-
- flann_model descr_model;
- descr_model.model = models->at (i);
- descr_model.view_id = view_id;
- descr_model.descriptor_id = descriptor_id;
-
- int size_feat = sizeof(signature->points[0].histogram) / sizeof(float);
- descr_model.descr.resize (size_feat);
- memcpy (&descr_model.descr[0], &signature->points[0].histogram[0], size_feat * sizeof(float));
-
- if (use_single_categories_)
- {
- std::map<std::string, std::shared_ptr<std::vector<int>>>::iterator it;
- std::string cat_model = models->at (i).class_;
- it = single_categories.find (cat_model);
- if (it == single_categories.end ())
- {
- std::cout << cat_model << std::endl;
- std::cout << "Should not happen..." << std::endl;
- }
- else
- {
- it->second->push_back (static_cast<int> (flann_models_.size ()));
- }
+ for (std::size_t i = 0; i < models->size(); i++) {
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ for (const auto& dir_entry : bf::directory_iterator(path)) {
+ std::string file_name = (dir_entry.path().filename()).string();
+
+ std::vector<std::string> strs;
+ boost::split(strs, file_name, boost::is_any_of("_"));
+
+ if (strs[0] == "descriptor") {
+
+ int view_id = atoi(strs[1].c_str());
+ std::vector<std::string> strs1;
+ boost::split(strs1, strs[2], boost::is_any_of("."));
+ int descriptor_id = atoi(strs1[0].c_str());
+
+ std::string full_file_name = dir_entry.path().string();
+ typename pcl::PointCloud<FeatureT>::Ptr signature(
+ new pcl::PointCloud<FeatureT>);
+ pcl::io::loadPCDFile(full_file_name, *signature);
+
+ flann_model descr_model;
+ descr_model.model = models->at(i);
+ descr_model.view_id = view_id;
+ descr_model.descriptor_id = descriptor_id;
+
+ int size_feat = sizeof((*signature)[0].histogram) / sizeof(float);
+ descr_model.descr.resize(size_feat);
+ memcpy(&descr_model.descr[0],
+ &(*signature)[0].histogram[0],
+ size_feat * sizeof(float));
+
+ if (use_single_categories_) {
+ std::map<std::string, std::shared_ptr<std::vector<int>>>::iterator it;
+ std::string cat_model = models->at(i).class_;
+ it = single_categories.find(cat_model);
+ if (it == single_categories.end()) {
+ std::cout << cat_model << std::endl;
+ std::cout << "Should not happen..." << std::endl;
}
+ else {
+ it->second->push_back(static_cast<int>(flann_models_.size()));
+ }
+ }
- flann_models_.push_back (descr_model);
+ flann_models_.push_back(descr_model);
- if (use_cache_)
- {
+ if (use_cache_) {
- std::stringstream dir_pose;
- dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
+ std::stringstream dir_pose;
+ dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
- Eigen::Matrix4f pose_matrix;
- PersistenceUtils::readMatrixFromFile (dir_pose.str (), pose_matrix);
+ Eigen::Matrix4f pose_matrix;
+ PersistenceUtils::readMatrixFromFile(dir_pose.str(), pose_matrix);
- std::pair<std::string, int> pair_model_view = std::make_pair (models->at (i).id_, descr_model.view_id);
- poses_cache_[pair_model_view] = pose_matrix;
- }
+ std::pair<std::string, int> pair_model_view =
+ std::make_pair(models->at(i).id_, descr_model.view_id);
+ poses_cache_[pair_model_view] = pose_matrix;
}
}
}
+ }
- convertToFLANN (flann_models_, flann_data_);
- flann_index_ = new flann::Index<DistT> (flann_data_, flann::LinearIndexParams ());
- flann_index_->buildIndex ();
-
- //single categories...
- if (use_single_categories_)
- {
- single_categories_data_.resize (single_categories.size ());
- single_categories_index_.resize (single_categories.size ());
- single_categories_pointers_to_models_.resize (single_categories.size ());
-
- int kk = 0;
- for (const auto &single_category : single_categories)
- {
- //create index and flann data
- convertToFLANN (flann_models_, single_category.second, single_categories_data_[kk]);
- single_categories_index_[kk] = new flann::Index<DistT> (single_categories_data_[kk], flann::LinearIndexParams ());
- single_categories_pointers_to_models_[kk] = single_category.second;
-
- category_to_vectors_indices_[single_category.first] = kk;
- kk++;
- }
+ convertToFLANN(flann_models_, flann_data_);
+ flann_index_ = new flann::Index<DistT>(flann_data_, flann::LinearIndexParams());
+ flann_index_->buildIndex();
+
+ if (use_single_categories_) {
+ single_categories_data_.resize(single_categories.size());
+ single_categories_index_.resize(single_categories.size());
+ single_categories_pointers_to_models_.resize(single_categories.size());
+
+ int kk = 0;
+ for (const auto& single_category : single_categories) {
+ // create index and flann data
+ convertToFLANN(
+ flann_models_, single_category.second, single_categories_data_[kk]);
+ single_categories_index_[kk] = new flann::Index<DistT>(
+ single_categories_data_[kk], flann::LinearIndexParams());
+ single_categories_pointers_to_models_[kk] = single_category.second;
+
+ category_to_vectors_indices_[single_category.first] = kk;
+ kk++;
}
}
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::
+ nearestKSearch(flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances)
+{
+ flann::Matrix<float> p =
+ flann::Matrix<float>(new float[model.descr.size()], 1, model.descr.size());
+ memcpy(&p.ptr()[0], &model.descr[0], p.cols * p.rows * sizeof(float));
+
+ indices = flann::Matrix<int>(new int[k], 1, k);
+ distances = flann::Matrix<float>(new float[k], 1, k);
+ index->knnSearch(p, indices, distances, k, flann::SearchParams(512));
+ delete[] p.ptr();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::recognize()
+{
+
+ models_.reset(new std::vector<ModelT>);
+ transforms_.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
+
+ PointInTPtr processed(new pcl::PointCloud<PointInT>);
+ PointInTPtr in(new pcl::PointCloud<PointInT>);
+
+ std::vector<pcl::PointCloud<FeatureT>,
+ Eigen::aligned_allocator<pcl::PointCloud<FeatureT>>>
+ signatures;
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>> centroids;
+
+ if (!indices_.empty())
+ pcl::copyPointCloud(*input_, indices_, *in);
+ else
+ in = input_;
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::nearestKSearch (flann::Index<DistT> * index, const flann_model &model,
- int k, flann::Matrix<int> &indices,
- flann::Matrix<float> &distances)
{
- flann::Matrix<float> p = flann::Matrix<float> (new float[model.descr.size ()], 1, model.descr.size ());
- memcpy (&p.ptr ()[0], &model.descr[0], p.cols * p.rows * sizeof(float));
-
- indices = flann::Matrix<int> (new int[k], 1, k);
- distances = flann::Matrix<float> (new float[k], 1, k);
- index->knnSearch (p, indices, distances, k, flann::SearchParams (512));
- delete[] p.ptr ();
+ pcl::ScopeTime t("Estimate feature");
+ micvfh_estimator_->estimate(in, processed, signatures, centroids);
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::recognize ()
- {
+ std::vector<index_score> indices_scores;
+ descriptor_distances_.clear();
- models_.reset (new std::vector<ModelT>);
- transforms_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
-
- PointInTPtr processed (new pcl::PointCloud<PointInT>);
- PointInTPtr in (new pcl::PointCloud<PointInT>);
-
- std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > signatures;
- std::vector < Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > centroids;
-
- if (!indices_.empty ())
- pcl::copyPointCloud (*input_, indices_, *in);
- else
- in = input_;
+ if (!signatures.empty()) {
{
- pcl::ScopeTime t ("Estimate feature");
- micvfh_estimator_->estimate (in, processed, signatures, centroids);
- }
+ pcl::ScopeTime t_matching("Matching and roll...");
- std::vector<index_score> indices_scores;
- descriptor_distances_.clear ();
+ if (use_single_categories_ && (!categories_to_be_searched_.empty())) {
- if (!signatures.empty ())
- {
-
- {
- pcl::ScopeTime t_matching ("Matching and roll...");
-
- if (use_single_categories_ && (!categories_to_be_searched_.empty ()))
- {
-
- //perform search of the different signatures in the categories_to_be_searched_
- for (std::size_t c = 0; c < categories_to_be_searched_.size (); c++)
- {
- std::cout << "Using category:" << categories_to_be_searched_[c] << std::endl;
- for (std::size_t idx = 0; idx < signatures.size (); idx++)
- {
- /*float* hist = signatures[idx].points[0].histogram;
- std::vector<float> std_hist (hist, hist + getHistogramLength (dummy));
- flann_model histogram ("", std_hist);
- flann::Matrix<int> indices;
- flann::Matrix<float> distances;
-
- std::map<std::string, int>::iterator it;
- it = category_to_vectors_indices_.find (categories_to_be_searched_[c]);
-
- assert (it != category_to_vectors_indices_.end ());
- nearestKSearch (single_categories_index_[it->second], histogram, nmodels_, indices, distances);*/
-
- float* hist = signatures[idx].points[0].histogram;
- int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(float);
- std::vector<float> std_hist (hist, hist + size_feat);
- //ModelT empty;
-
- flann_model histogram;
- histogram.descr = std_hist;
- flann::Matrix<int> indices;
- flann::Matrix<float> distances;
-
- std::map<std::string, int>::iterator it;
- it = category_to_vectors_indices_.find (categories_to_be_searched_[c]);
- assert (it != category_to_vectors_indices_.end ());
-
- nearestKSearch (single_categories_index_[it->second], histogram, NN_, indices, distances);
- //gather NN-search results
- double score = 0;
- for (std::size_t i = 0; i < (std::size_t) NN_; ++i)
- {
- score = distances[0][i];
- index_score is;
- is.idx_models_ = single_categories_pointers_to_models_[it->second]->at (indices[0][i]);
- is.idx_input_ = static_cast<int> (idx);
- is.score_ = score;
- indices_scores.push_back (is);
- }
- }
-
- //we cannot add more than nmodels per category, so sort here and remove offending ones...
- std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);
- indices_scores.resize ((c + 1) * NN_);
- }
- }
- else
- {
- for (std::size_t idx = 0; idx < signatures.size (); idx++)
- {
-
- float* hist = signatures[idx].points[0].histogram;
- int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(float);
- std::vector<float> std_hist (hist, hist + size_feat);
- //ModelT empty;
+ // perform search of the different signatures in the categories_to_be_searched_
+ for (std::size_t c = 0; c < categories_to_be_searched_.size(); c++) {
+ std::cout << "Using category:" << categories_to_be_searched_[c] << std::endl;
+ for (std::size_t idx = 0; idx < signatures.size(); idx++) {
+ float* hist = signatures[idx][0].histogram;
+ int size_feat = sizeof(signatures[idx][0].histogram) / sizeof(float);
+ std::vector<float> std_hist(hist, hist + size_feat);
flann_model histogram;
histogram.descr = std_hist;
-
flann::Matrix<int> indices;
flann::Matrix<float> distances;
- nearestKSearch (flann_index_, histogram, NN_, indices, distances);
- //gather NN-search results
+ std::map<std::string, int>::iterator it;
+ it = category_to_vectors_indices_.find(categories_to_be_searched_[c]);
+ assert(it != category_to_vectors_indices_.end());
+
+ nearestKSearch(single_categories_index_[it->second],
+ histogram,
+ NN_,
+ indices,
+ distances);
+ // gather NN-search results
double score = 0;
- for (int i = 0; i < NN_; ++i)
- {
+ for (std::size_t i = 0; i < (std::size_t)NN_; ++i) {
score = distances[0][i];
index_score is;
- is.idx_models_ = indices[0][i];
- is.idx_input_ = static_cast<int> (idx);
+ is.idx_models_ =
+ single_categories_pointers_to_models_[it->second]->at(indices[0][i]);
+ is.idx_input_ = static_cast<int>(idx);
is.score_ = score;
- indices_scores.push_back (is);
-
- //ModelT m = flann_models_[indices[0][i]].model;
+ indices_scores.push_back(is);
}
}
- }
- std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);
-
- /*
- * There might be duplicated candidates, in those cases it makes sense to take
- * the closer one in descriptor space
- */
-
- typename std::map<flann_model, bool> found;
- typename std::map<flann_model, bool>::iterator it_map;
- for (std::size_t i = 0; i < indices_scores.size (); i++)
- {
- flann_model m = flann_models_[indices_scores[i].idx_models_];
- it_map = found.find (m);
- if (it_map == found.end ())
- {
- indices_scores[found.size ()] = indices_scores[i];
- found[m] = true;
+ // we cannot add more than nmodels per category, so sort here and remove
+ // offending ones...
+ std::sort(indices_scores.begin(), indices_scores.end(), sortIndexScoresOp);
+ indices_scores.resize((c + 1) * NN_);
+ }
+ }
+ else {
+ for (std::size_t idx = 0; idx < signatures.size(); idx++) {
+
+ float* hist = signatures[idx][0].histogram;
+ int size_feat = sizeof(signatures[idx][0].histogram) / sizeof(float);
+ std::vector<float> std_hist(hist, hist + size_feat);
+
+ flann_model histogram;
+ histogram.descr = std_hist;
+
+ flann::Matrix<int> indices;
+ flann::Matrix<float> distances;
+ nearestKSearch(flann_index_, histogram, NN_, indices, distances);
+
+ // gather NN-search results
+ double score = 0;
+ for (int i = 0; i < NN_; ++i) {
+ score = distances[0][i];
+ index_score is;
+ is.idx_models_ = indices[0][i];
+ is.idx_input_ = static_cast<int>(idx);
+ is.score_ = score;
+ indices_scores.push_back(is);
}
}
- indices_scores.resize (found.size ());
-
- int num_n = std::min (NN_, static_cast<int> (indices_scores.size ()));
-
- /*
- * Filter some hypothesis regarding to their distance to the first neighbour
- */
-
- /*std::vector<index_score> indices_scores_filtered;
- indices_scores_filtered.resize (num_n);
- indices_scores_filtered[0] = indices_scores[0];
-
- float best_score = indices_scores[0].score_;
- int kept = 1;
- for (int i = 1; i < num_n; ++i)
- {
- //std::cout << best_score << indices_scores[i].score_ << (best_score / indices_scores[i].score_) << std::endl;
- if ((best_score / indices_scores[i].score_) > 0.65)
- {
- indices_scores_filtered[i] = indices_scores[i];
- kept++;
- }
-
- //best_score = indices_scores[i].score_;
- }
-
- indices_scores_filtered.resize (kept);
- //std::cout << indices_scores_filtered.size () << " § " << num_n << std::endl;
+ }
- indices_scores = indices_scores_filtered;
- num_n = indices_scores.size ();*/
- std::cout << "Number of object hypotheses... " << num_n << std::endl;
+ std::sort(indices_scores.begin(), indices_scores.end(), sortIndexScoresOp);
+
+ /*
+ * There might be duplicated candidates, in those cases it makes sense to take
+ * the closer one in descriptor space
+ */
+
+ typename std::map<flann_model, bool> found;
+ typename std::map<flann_model, bool>::iterator it_map;
+ for (std::size_t i = 0; i < indices_scores.size(); i++) {
+ flann_model m = flann_models_[indices_scores[i].idx_models_];
+ it_map = found.find(m);
+ if (it_map == found.end()) {
+ indices_scores[found.size()] = indices_scores[i];
+ found[m] = true;
+ }
+ }
+ indices_scores.resize(found.size());
- std::vector<bool> valid_trans;
- std::vector < Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > transformations;
+ int num_n = std::min(NN_, static_cast<int>(indices_scores.size()));
+ std::cout << "Number of object hypotheses... " << num_n << std::endl;
- micvfh_estimator_->getValidTransformsVec (valid_trans);
- micvfh_estimator_->getTransformsVec (transformations);
+ std::vector<bool> valid_trans;
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>
+ transformations;
- for (int i = 0; i < num_n; ++i)
- {
- ModelT m = flann_models_[indices_scores[i].idx_models_].model;
- int view_id = flann_models_[indices_scores[i].idx_models_].view_id;
- int desc_id = flann_models_[indices_scores[i].idx_models_].descriptor_id;
+ micvfh_estimator_->getValidTransformsVec(valid_trans);
+ micvfh_estimator_->getTransformsVec(transformations);
- int idx_input = indices_scores[i].idx_input_;
+ for (int i = 0; i < num_n; ++i) {
+ ModelT m = flann_models_[indices_scores[i].idx_models_].model;
+ int view_id = flann_models_[indices_scores[i].idx_models_].view_id;
+ int desc_id = flann_models_[indices_scores[i].idx_models_].descriptor_id;
- std::cout << m.class_ << "/" << m.id_ << " ==> " << indices_scores[i].score_ << std::endl;
+ int idx_input = indices_scores[i].idx_input_;
- Eigen::Matrix4f roll_view_pose;
- bool roll_pose_found = getRollPose (m, view_id, desc_id, roll_view_pose);
+ std::cout << m.class_ << "/" << m.id_ << " ==> " << indices_scores[i].score_
+ << std::endl;
- if (roll_pose_found && valid_trans[idx_input])
- {
- Eigen::Matrix4f transposed = roll_view_pose.transpose ();
+ Eigen::Matrix4f roll_view_pose;
+ bool roll_pose_found = getRollPose(m, view_id, desc_id, roll_view_pose);
- //std::cout << transposed << std::endl;
+ if (roll_pose_found && valid_trans[idx_input]) {
+ Eigen::Matrix4f transposed = roll_view_pose.transpose();
- PointInTPtr view;
- getView (m, view_id, view);
+ PointInTPtr view;
+ getView(m, view_id, view);
- Eigen::Matrix4f model_view_pose;
- getPose (m, view_id, model_view_pose);
+ Eigen::Matrix4f model_view_pose;
+ getPose(m, view_id, model_view_pose);
- Eigen::Matrix4f scale_mat;
- scale_mat.setIdentity (4, 4);
+ Eigen::Matrix4f scale_mat;
+ scale_mat.setIdentity(4, 4);
- if (compute_scale_)
- {
- //compute scale using the whole view
- PointInTPtr view_transformed (new pcl::PointCloud<PointInT>);
- Eigen::Matrix4f hom_from_OVC_to_CC;
- hom_from_OVC_to_CC = transformations[idx_input].inverse () * transposed;
- pcl::transformPointCloud (*view, *view_transformed, hom_from_OVC_to_CC);
+ if (compute_scale_) {
+ // compute scale using the whole view
+ PointInTPtr view_transformed(new pcl::PointCloud<PointInT>);
+ Eigen::Matrix4f hom_from_OVC_to_CC;
+ hom_from_OVC_to_CC = transformations[idx_input].inverse() * transposed;
+ pcl::transformPointCloud(*view, *view_transformed, hom_from_OVC_to_CC);
- Eigen::Vector3f input_centroid = centroids[indices_scores[i].idx_input_];
- Eigen::Vector3f view_centroid;
- getCentroid (m, view_id, desc_id, view_centroid);
+ Eigen::Vector3f input_centroid = centroids[indices_scores[i].idx_input_];
+ Eigen::Vector3f view_centroid;
+ getCentroid(m, view_id, desc_id, view_centroid);
- Eigen::Vector4f cmatch4f (view_centroid[0], view_centroid[1], view_centroid[2], 0);
- Eigen::Vector4f cinput4f (input_centroid[0], input_centroid[1], input_centroid[2], 0);
+ Eigen::Vector4f cmatch4f(
+ view_centroid[0], view_centroid[1], view_centroid[2], 0);
+ Eigen::Vector4f cinput4f(
+ input_centroid[0], input_centroid[1], input_centroid[2], 0);
- Eigen::Vector4f max_pt_input;
- pcl::getMaxDistance (*processed, cinput4f, max_pt_input);
- max_pt_input[3] = 0;
- float max_dist_input = (cinput4f - max_pt_input).norm ();
+ Eigen::Vector4f max_pt_input;
+ pcl::getMaxDistance(*processed, cinput4f, max_pt_input);
+ max_pt_input[3] = 0;
+ float max_dist_input = (cinput4f - max_pt_input).norm();
- //compute max dist for transformed model_view
- pcl::getMaxDistance (*view, cmatch4f, max_pt_input);
- max_pt_input[3] = 0;
- float max_dist_view = (cmatch4f - max_pt_input).norm ();
+ // compute max dist for transformed model_view
+ pcl::getMaxDistance(*view, cmatch4f, max_pt_input);
+ max_pt_input[3] = 0;
+ float max_dist_view = (cmatch4f - max_pt_input).norm();
- cmatch4f = hom_from_OVC_to_CC * cmatch4f;
- std::cout << max_dist_view << " " << max_dist_input << std::endl;
+ cmatch4f = hom_from_OVC_to_CC * cmatch4f;
+ std::cout << max_dist_view << " " << max_dist_input << std::endl;
- float scale_factor_view = max_dist_input / max_dist_view;
- std::cout << "Scale factor:" << scale_factor_view << std::endl;
+ float scale_factor_view = max_dist_input / max_dist_view;
+ std::cout << "Scale factor:" << scale_factor_view << std::endl;
- Eigen::Matrix4f center, center_inv;
+ Eigen::Matrix4f center, center_inv;
- center.setIdentity (4, 4);
- center (0, 3) = -cinput4f[0];
- center (1, 3) = -cinput4f[1];
- center (2, 3) = -cinput4f[2];
+ center.setIdentity(4, 4);
+ center(0, 3) = -cinput4f[0];
+ center(1, 3) = -cinput4f[1];
+ center(2, 3) = -cinput4f[2];
- center_inv.setIdentity (4, 4);
- center_inv (0, 3) = cinput4f[0];
- center_inv (1, 3) = cinput4f[1];
- center_inv (2, 3) = cinput4f[2];
+ center_inv.setIdentity(4, 4);
+ center_inv(0, 3) = cinput4f[0];
+ center_inv(1, 3) = cinput4f[1];
+ center_inv(2, 3) = cinput4f[2];
- scale_mat (0, 0) = scale_factor_view;
- scale_mat (1, 1) = scale_factor_view;
- scale_mat (2, 2) = scale_factor_view;
+ scale_mat(0, 0) = scale_factor_view;
+ scale_mat(1, 1) = scale_factor_view;
+ scale_mat(2, 2) = scale_factor_view;
- scale_mat = center_inv * scale_mat * center;
- }
+ scale_mat = center_inv * scale_mat * center;
+ }
- Eigen::Matrix4f hom_from_OC_to_CC;
- hom_from_OC_to_CC = scale_mat * transformations[idx_input].inverse () * transposed * model_view_pose;
+ Eigen::Matrix4f hom_from_OC_to_CC;
+ hom_from_OC_to_CC = scale_mat * transformations[idx_input].inverse() *
+ transposed * model_view_pose;
- models_->push_back (m);
- transforms_->push_back (hom_from_OC_to_CC);
- descriptor_distances_.push_back (static_cast<float> (indices_scores[i].score_));
- }
- else
- {
- PCL_WARN("The roll pose was not found, should use CRH here... \n");
- }
+ models_->push_back(m);
+ transforms_->push_back(hom_from_OC_to_CC);
+ descriptor_distances_.push_back(static_cast<float>(indices_scores[i].score_));
+ }
+ else {
+ PCL_WARN("The roll pose was not found, should use CRH here... \n");
}
}
+ }
- std::cout << "Number of object hypotheses:" << models_->size () << std::endl;
+ std::cout << "Number of object hypotheses:" << models_->size() << std::endl;
- /**
- * POSE REFINEMENT
- **/
+ /**
+ * POSE REFINEMENT
+ **/
- if (ICP_iterations_ > 0)
- {
- pcl::ScopeTime t ("Pose refinement");
+ if (ICP_iterations_ > 0) {
+ pcl::ScopeTime t("Pose refinement");
- //Prepare scene and model clouds for the pose refinement step
- float VOXEL_SIZE_ICP_ = 0.005f;
- PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ());
+ // Prepare scene and model clouds for the pose refinement step
+ float VOXEL_SIZE_ICP_ = 0.005f;
+ PointInTPtr cloud_voxelized_icp(new pcl::PointCloud<PointInT>());
- {
- pcl::ScopeTime t ("Voxelize stuff...");
- pcl::VoxelGrid<PointInT> voxel_grid_icp;
- voxel_grid_icp.setInputCloud (processed);
- voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
- voxel_grid_icp.filter (*cloud_voxelized_icp);
- source_->voxelizeAllModels (VOXEL_SIZE_ICP_);
- }
+ {
+ pcl::ScopeTime t("Voxelize stuff...");
+ pcl::VoxelGrid<PointInT> voxel_grid_icp;
+ voxel_grid_icp.setInputCloud(processed);
+ voxel_grid_icp.setLeafSize(VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
+ voxel_grid_icp.filter(*cloud_voxelized_icp);
+ source_->voxelizeAllModels(VOXEL_SIZE_ICP_);
+ }
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(cloud_voxelized_icp, VOXEL_SIZE_ICP_) \
num_threads(omp_get_num_procs())
- for (int i = 0; i < static_cast<int> (models_->size ()); i++)
- {
-
- ConstPointInTPtr model_cloud;
- PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
-
- if (compute_scale_)
- {
- model_cloud = models_->at (i).getAssembled (-1);
- PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>);
- pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i));
- pcl::VoxelGrid<PointInT> voxel_grid_icp;
- voxel_grid_icp.setInputCloud (model_aligned_m);
- voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
- voxel_grid_icp.filter (*model_aligned);
- }
- else
- {
- model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
- }
+ // clang-format on
+ for (int i = 0; i < static_cast<int>(models_->size()); i++) {
- pcl::IterativeClosestPoint<PointInT, PointInT> reg;
- reg.setInputSource (model_aligned); //model
- reg.setInputTarget (cloud_voxelized_icp); //scene
- reg.setMaximumIterations (ICP_iterations_);
- reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 3.f);
- reg.setTransformationEpsilon (1e-6);
+ ConstPointInTPtr model_cloud;
+ PointInTPtr model_aligned(new pcl::PointCloud<PointInT>);
- typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ());
- reg.align (*output_);
-
- Eigen::Matrix4f icp_trans = reg.getFinalTransformation ();
- transforms_->at (i) = icp_trans * transforms_->at (i);
+ if (compute_scale_) {
+ model_cloud = models_->at(i).getAssembled(-1);
+ PointInTPtr model_aligned_m(new pcl::PointCloud<PointInT>);
+ pcl::transformPointCloud(*model_cloud, *model_aligned_m, transforms_->at(i));
+ pcl::VoxelGrid<PointInT> voxel_grid_icp;
+ voxel_grid_icp.setInputCloud(model_aligned_m);
+ voxel_grid_icp.setLeafSize(VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
+ voxel_grid_icp.filter(*model_aligned);
+ }
+ else {
+ model_cloud = models_->at(i).getAssembled(VOXEL_SIZE_ICP_);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms_->at(i));
}
- }
- /**
- * HYPOTHESES VERIFICATION
- **/
+ pcl::IterativeClosestPoint<PointInT, PointInT> reg;
+ reg.setInputSource(model_aligned); // model
+ reg.setInputTarget(cloud_voxelized_icp); // scene
+ reg.setMaximumIterations(ICP_iterations_);
+ reg.setMaxCorrespondenceDistance(VOXEL_SIZE_ICP_ * 3.f);
+ reg.setTransformationEpsilon(1e-6);
- if (hv_algorithm_)
- {
+ typename pcl::PointCloud<PointInT>::Ptr output_(
+ new pcl::PointCloud<PointInT>());
+ reg.align(*output_);
- pcl::ScopeTime t ("HYPOTHESES VERIFICATION");
-
- std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
- aligned_models.resize (models_->size ());
-
- for (std::size_t i = 0; i < models_->size (); i++)
- {
- ConstPointInTPtr model_cloud;
- PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
-
- if (compute_scale_)
- {
- model_cloud = models_->at (i).getAssembled (-1);
- PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>);
- pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i));
- pcl::VoxelGrid<PointInT> voxel_grid_icp;
- voxel_grid_icp.setInputCloud (model_aligned_m);
- voxel_grid_icp.setLeafSize (0.005f, 0.005f, 0.005f);
- voxel_grid_icp.filter (*model_aligned);
- }
- else
- {
- model_cloud = models_->at (i).getAssembled (0.005f);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
- }
+ Eigen::Matrix4f icp_trans = reg.getFinalTransformation();
+ transforms_->at(i) = icp_trans * transforms_->at(i);
+ }
+ }
- //ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.005f);
- //PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
- //pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
- aligned_models[i] = model_aligned;
- }
+ /**
+ * HYPOTHESES VERIFICATION
+ **/
- std::vector<bool> mask_hv;
- hv_algorithm_->setSceneCloud (input_);
- hv_algorithm_->addModels (aligned_models, true);
- hv_algorithm_->verify ();
- hv_algorithm_->getMask (mask_hv);
+ if (hv_algorithm_) {
- std::shared_ptr<std::vector<ModelT>> models_temp;
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>> transforms_temp;
+ pcl::ScopeTime t("HYPOTHESES VERIFICATION");
- models_temp.reset (new std::vector<ModelT>);
- transforms_temp.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
+ std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
+ aligned_models.resize(models_->size());
- for (std::size_t i = 0; i < models_->size (); i++)
- {
- if (!mask_hv[i])
- continue;
+ for (std::size_t i = 0; i < models_->size(); i++) {
+ ConstPointInTPtr model_cloud;
+ PointInTPtr model_aligned(new pcl::PointCloud<PointInT>);
- models_temp->push_back (models_->at (i));
- transforms_temp->push_back (transforms_->at (i));
+ if (compute_scale_) {
+ model_cloud = models_->at(i).getAssembled(-1);
+ PointInTPtr model_aligned_m(new pcl::PointCloud<PointInT>);
+ pcl::transformPointCloud(*model_cloud, *model_aligned_m, transforms_->at(i));
+ pcl::VoxelGrid<PointInT> voxel_grid_icp;
+ voxel_grid_icp.setInputCloud(model_aligned_m);
+ voxel_grid_icp.setLeafSize(0.005f, 0.005f, 0.005f);
+ voxel_grid_icp.filter(*model_aligned);
+ }
+ else {
+ model_cloud = models_->at(i).getAssembled(0.005f);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms_->at(i));
}
- models_ = models_temp;
- transforms_ = transforms_temp;
+ aligned_models[i] = model_aligned;
}
- }
- }
+ std::vector<bool> mask_hv;
+ hv_algorithm_->setSceneCloud(input_);
+ hv_algorithm_->addModels(aligned_models, true);
+ hv_algorithm_->verify();
+ hv_algorithm_->getMask(mask_hv);
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::initialize (bool force_retrain)
- {
+ std::shared_ptr<std::vector<ModelT>> models_temp;
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ transforms_temp;
- //use the source to know what has to be trained and what not, checking if the descr_name directory exists
- //unless force_retrain is true, then train everything
- auto models = source_->getModels ();
- std::cout << "Models size:" << models->size () << std::endl;
+ models_temp.reset(new std::vector<ModelT>);
+ transforms_temp.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
- if (force_retrain)
- {
- for (std::size_t i = 0; i < models->size (); i++)
- {
- source_->removeDescDirectory (models->at (i), training_dir_, descr_name_);
+ for (std::size_t i = 0; i < models_->size(); i++) {
+ if (!mask_hv[i])
+ continue;
+
+ models_temp->push_back(models_->at(i));
+ transforms_temp->push_back(transforms_->at(i));
}
+
+ models_ = models_temp;
+ transforms_ = transforms_temp;
+ }
+ }
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::initialize(
+ bool force_retrain)
+{
+
+ // use the source to know what has to be trained and what not, checking if the
+ // descr_name directory exists unless force_retrain is true, then train everything
+ auto models = source_->getModels();
+ std::cout << "Models size:" << models->size() << std::endl;
+
+ if (force_retrain) {
+ for (std::size_t i = 0; i < models->size(); i++) {
+ source_->removeDescDirectory(models->at(i), training_dir_, descr_name_);
}
+ }
- for (std::size_t i = 0; i < models->size (); i++)
- {
- if (!source_->modelAlreadyTrained (models->at (i), training_dir_, descr_name_))
- {
- for (std::size_t v = 0; v < models->at (i).views_->size (); v++)
- {
- PointInTPtr processed (new pcl::PointCloud<PointInT>);
- PointInTPtr view = models->at (i).views_->at (v);
-
- if (view->points.empty ())
- PCL_WARN("View has no points!!!\n");
-
- if (noisify_)
- {
- std::random_device rd;
- std::mt19937 rng(rd());
- std::normal_distribution<float> nd (0.0f, noise_);
- // Noisify each point in the dataset
- for (std::size_t cp = 0; cp < view->points.size (); ++cp)
- view->points[cp].z += nd (rng);
- }
+ for (std::size_t i = 0; i < models->size(); i++) {
+ if (!source_->modelAlreadyTrained(models->at(i), training_dir_, descr_name_)) {
+ for (std::size_t v = 0; v < models->at(i).views_->size(); v++) {
+ PointInTPtr processed(new pcl::PointCloud<PointInT>);
+ PointInTPtr view = models->at(i).views_->at(v);
+
+ if (view->points.empty())
+ PCL_WARN("View has no points!!!\n");
+
+ if (noisify_) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ std::normal_distribution<float> nd(0.0f, noise_);
+ // Noisify each point in the dataset
+ for (std::size_t cp = 0; cp < view->size(); ++cp)
+ (*view)[cp].z += nd(rng);
+ }
- //pro view, compute signatures
- std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > signatures;
- std::vector < Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > centroids;
- micvfh_estimator_->estimate (view, processed, signatures, centroids);
-
- std::vector<bool> valid_trans;
- std::vector < Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > transforms;
-
- micvfh_estimator_->getValidTransformsVec (valid_trans);
- micvfh_estimator_->getTransformsVec (transforms);
-
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- bf::path desc_dir = path;
- if (!bf::exists (desc_dir))
- bf::create_directory (desc_dir);
-
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary (path_view.str (), *processed);
-
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile (path_pose.str (), models->at (i).poses_->at (v));
-
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile (path_entropy.str (), models->at (i).self_occlusions_->at (v));
-
- //save signatures and centroids to disk
- for (std::size_t j = 0; j < signatures.size (); j++)
- {
- if (valid_trans[j])
- {
- std::stringstream path_centroid;
- path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
- Eigen::Vector3f centroid (centroids[j][0], centroids[j][1], centroids[j][2]);
- PersistenceUtils::writeCentroidToFile (path_centroid.str (), centroid);
-
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
- pcl::io::savePCDFileBinary (path_descriptor.str (), signatures[j]);
-
- //save roll transform
- std::stringstream path_pose;
- path_pose << path << "/roll_trans_" << v << "_" << j << ".txt";
- PersistenceUtils::writeMatrixToFile (path_pose.str (), transforms[j]);
- }
+ // pro view, compute signatures
+ std::vector<pcl::PointCloud<FeatureT>,
+ Eigen::aligned_allocator<pcl::PointCloud<FeatureT>>>
+ signatures;
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>
+ centroids;
+ micvfh_estimator_->estimate(view, processed, signatures, centroids);
+
+ std::vector<bool> valid_trans;
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>
+ transforms;
+
+ micvfh_estimator_->getValidTransformsVec(valid_trans);
+ micvfh_estimator_->getTransformsVec(transforms);
+
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ bf::path desc_dir = path;
+ if (!bf::exists(desc_dir))
+ bf::create_directory(desc_dir);
+
+ std::stringstream path_view;
+ path_view << path << "/view_" << v << ".pcd";
+ pcl::io::savePCDFileBinary(path_view.str(), *processed);
+
+ std::stringstream path_pose;
+ path_pose << path << "/pose_" << v << ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose.str(),
+ models->at(i).poses_->at(v));
+
+ std::stringstream path_entropy;
+ path_entropy << path << "/entropy_" << v << ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ models->at(i).self_occlusions_->at(v));
+
+ // save signatures and centroids to disk
+ for (std::size_t j = 0; j < signatures.size(); j++) {
+ if (valid_trans[j]) {
+ std::stringstream path_centroid;
+ path_centroid << path << "/centroid_" << v << "_" << j << ".txt";
+ Eigen::Vector3f centroid(centroids[j][0], centroids[j][1], centroids[j][2]);
+ PersistenceUtils::writeCentroidToFile(path_centroid.str(), centroid);
+
+ std::stringstream path_descriptor;
+ path_descriptor << path << "/descriptor_" << v << "_" << j << ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor.str(), signatures[j]);
+
+ // save roll transform
+ std::stringstream path_pose;
+ path_pose << path << "/roll_trans_" << v << "_" << j << ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose.str(), transforms[j]);
}
}
-
- }
- else
- {
- //else skip model
- std::cout << "The model has already been trained..." << std::endl;
- //there is no need to keep the views in memory once the model has been trained
- models->at (i).views_->clear ();
}
}
-
- //load features from disk
- //initialize FLANN structure
- loadFeaturesAndCreateFLANN ();
+ else {
+ // else skip model
+ std::cout << "The model has already been trained..." << std::endl;
+ // there is no need to keep the views in memory once the model has been trained
+ models->at(i).views_->clear();
+ }
}
+
+ // load features from disk
+ // initialize FLANN structure
+ loadFeaturesAndCreateFLANN();
+}
-#include <flann/flann.hpp>
-
#include <pcl/apps/3d_rec_framework/pipeline/local_recognizer.h>
#include <pcl/apps/3d_rec_framework/utils/vtk_model_sampling.h>
#include <pcl/registration/correspondence_rejection_sample_consensus.h>
-#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/registration/icp.h>
-#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/registration/transformation_estimation_point_to_plane_lls.h>
+#include <pcl/registration/transformation_estimation_svd.h>
+#include <pcl/visualization/pcl_visualizer.h>
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::loadFeaturesAndCreateFLANN ()
- {
- auto models = source_->getModels ();
- std::cout << "Models size:" << models->size () << std::endl;
-
- for (std::size_t i = 0; i < models->size (); i++)
- {
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- for (const auto& dir_entry : bf::directory_iterator(path))
- {
- std::string file_name = (dir_entry.path ().filename ()).string();
-
- std::vector < std::string > strs;
- boost::split (strs, file_name, boost::is_any_of ("_"));
-
- if (strs[0] == "descriptor")
- {
- std::string full_file_name = dir_entry.path ().string ();
- std::string name = file_name.substr (0, file_name.length () - 4);
- std::vector < std::string > strs;
- boost::split (strs, name, boost::is_any_of ("_"));
-
- flann_model descr_model;
- descr_model.model = models->at (i);
- descr_model.view_id = atoi (strs[1].c_str ());
-
- if (use_cache_)
- {
-
- std::stringstream dir_keypoints;
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
- dir_keypoints << path << "/keypoint_indices_" << descr_model.view_id << ".pcd";
-
- std::stringstream dir_pose;
- dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
-
- Eigen::Matrix4f pose_matrix;
- PersistenceUtils::readMatrixFromFile (dir_pose.str (), pose_matrix);
-
- std::pair<std::string, int> pair_model_view = std::make_pair (models->at (i).id_, descr_model.view_id);
- poses_cache_[pair_model_view] = pose_matrix;
+#include <flann/flann.hpp>
- //load keypoints and save them to cache
- typename pcl::PointCloud<PointInT>::Ptr keypoints (new pcl::PointCloud<PointInT> ());
- pcl::io::loadPCDFile (dir_keypoints.str (), *keypoints);
- keypoints_cache_[pair_model_view] = keypoints;
- }
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::
+ loadFeaturesAndCreateFLANN()
+{
+ auto models = source_->getModels();
+ std::cout << "Models size:" << models->size() << std::endl;
+
+ for (std::size_t i = 0; i < models->size(); i++) {
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ for (const auto& dir_entry : bf::directory_iterator(path)) {
+ std::string file_name = (dir_entry.path().filename()).string();
+
+ std::vector<std::string> strs;
+ boost::split(strs, file_name, boost::is_any_of("_"));
+
+ if (strs[0] == "descriptor") {
+ std::string full_file_name = dir_entry.path().string();
+ std::string name = file_name.substr(0, file_name.length() - 4);
+ std::vector<std::string> strs;
+ boost::split(strs, name, boost::is_any_of("_"));
+
+ flann_model descr_model;
+ descr_model.model = models->at(i);
+ descr_model.view_id = atoi(strs[1].c_str());
+
+ if (use_cache_) {
+
+ std::stringstream dir_keypoints;
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+ dir_keypoints << path << "/keypoint_indices_" << descr_model.view_id
+ << ".pcd";
+
+ std::stringstream dir_pose;
+ dir_pose << path << "/pose_" << descr_model.view_id << ".txt";
+
+ Eigen::Matrix4f pose_matrix;
+ PersistenceUtils::readMatrixFromFile(dir_pose.str(), pose_matrix);
+
+ std::pair<std::string, int> pair_model_view =
+ std::make_pair(models->at(i).id_, descr_model.view_id);
+ poses_cache_[pair_model_view] = pose_matrix;
+
+ // load keypoints and save them to cache
+ typename pcl::PointCloud<PointInT>::Ptr keypoints(
+ new pcl::PointCloud<PointInT>());
+ pcl::io::loadPCDFile(dir_keypoints.str(), *keypoints);
+ keypoints_cache_[pair_model_view] = keypoints;
+ }
- typename pcl::PointCloud<FeatureT>::Ptr signature (new pcl::PointCloud<FeatureT> ());
- pcl::io::loadPCDFile (full_file_name, *signature);
+ typename pcl::PointCloud<FeatureT>::Ptr signature(
+ new pcl::PointCloud<FeatureT>());
+ pcl::io::loadPCDFile(full_file_name, *signature);
- int size_feat = sizeof(signature->points[0].histogram) / sizeof(float);
+ int size_feat = sizeof((*signature)[0].histogram) / sizeof(float);
- for (std::size_t dd = 0; dd < signature->points.size (); dd++)
- {
- descr_model.keypoint_id = static_cast<int> (dd);
- descr_model.descr.resize (size_feat);
+ for (std::size_t dd = 0; dd < signature->size(); dd++) {
+ descr_model.keypoint_id = static_cast<int>(dd);
+ descr_model.descr.resize(size_feat);
- memcpy (&descr_model.descr[0], &signature->points[dd].histogram[0], size_feat * sizeof(float));
+ memcpy(&descr_model.descr[0],
+ &(*signature)[dd].histogram[0],
+ size_feat * sizeof(float));
- flann_models_.push_back (descr_model);
- }
+ flann_models_.push_back(descr_model);
}
}
}
+ }
- convertToFLANN (flann_models_, flann_data_);
-
- flann_index_ = new flann::Index<DistT> (flann_data_, flann::KDTreeIndexParams (4));
- flann_index_->buildIndex ();
+ convertToFLANN(flann_models_, flann_data_);
+
+ flann_index_ = new flann::Index<DistT>(flann_data_, flann::KDTreeIndexParams(4));
+ flann_index_->buildIndex();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::
+ nearestKSearch(flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances)
+{
+ flann::Matrix<float> p =
+ flann::Matrix<float>(new float[model.descr.size()], 1, model.descr.size());
+ memcpy(&p.ptr()[0], &model.descr[0], p.cols * p.rows * sizeof(float));
+
+ indices = flann::Matrix<int>(new int[k], 1, k);
+ distances = flann::Matrix<float>(new float[k], 1, k);
+ index->knnSearch(p, indices, distances, k, flann::SearchParams(kdtree_splits_));
+ delete[] p.ptr();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::
+ initialize(bool force_retrain)
+{
+ std::shared_ptr<std::vector<ModelT>> models;
+
+ if (search_model_.empty()) {
+ models = source_->getModels();
+ }
+ else {
+ models = source_->getModels(search_model_);
+ // reset cache and flann structures
+ delete flann_index_;
+
+ flann_models_.clear();
+ poses_cache_.clear();
+ keypoints_cache_.clear();
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::nearestKSearch (flann::Index<DistT> * index,
- const flann_model &model, int k,
- flann::Matrix<int> &indices,
- flann::Matrix<float> &distances)
- {
- flann::Matrix<float> p = flann::Matrix<float> (new float[model.descr.size ()], 1, model.descr.size ());
- memcpy (&p.ptr ()[0], &model.descr[0], p.cols * p.rows * sizeof(float));
+ std::cout << "Models size:" << models->size() << std::endl;
- indices = flann::Matrix<int> (new int[k], 1, k);
- distances = flann::Matrix<float> (new float[k], 1, k);
- index->knnSearch (p, indices, distances, k, flann::SearchParams (kdtree_splits_));
- delete[] p.ptr ();
+ if (force_retrain) {
+ for (std::size_t i = 0; i < models->size(); i++) {
+ source_->removeDescDirectory(models->at(i), training_dir_, descr_name_);
+ }
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::initialize (bool force_retrain)
- {
- std::shared_ptr<std::vector<ModelT>> models;
-
- if(search_model_.empty()) {
- models = source_->getModels ();
- } else {
- models = source_->getModels (search_model_);
- //reset cache and flann structures
- delete flann_index_;
-
- flann_models_.clear();
- poses_cache_.clear();
- keypoints_cache_.clear();
- }
+ for (std::size_t i = 0; i < models->size(); i++) {
+ std::cout << models->at(i).class_ << " " << models->at(i).id_ << std::endl;
+
+ if (!source_->modelAlreadyTrained(models->at(i), training_dir_, descr_name_)) {
+ for (std::size_t v = 0; v < models->at(i).views_->size(); v++) {
+ PointInTPtr processed(new pcl::PointCloud<PointInT>);
+ typename pcl::PointCloud<FeatureT>::Ptr signatures(
+ new pcl::PointCloud<FeatureT>());
+ PointInTPtr keypoints_pointcloud;
+
+ bool success = estimator_->estimate(
+ models->at(i).views_->at(v), processed, keypoints_pointcloud, signatures);
+
+ if (success) {
+ std::string path =
+ source_->getModelDescriptorDir(models->at(i), training_dir_, descr_name_);
+
+ bf::path desc_dir = path;
+ if (!bf::exists(desc_dir))
+ bf::create_directory(desc_dir);
+
+ std::stringstream path_view;
+ path_view << path << "/view_" << v << ".pcd";
+ pcl::io::savePCDFileBinary(path_view.str(), *processed);
+
+ std::stringstream path_pose;
+ path_pose << path << "/pose_" << v << ".txt";
+ PersistenceUtils::writeMatrixToFile(path_pose.str(),
+ models->at(i).poses_->at(v));
+
+ if (v < models->at(i).self_occlusions_->size()) {
+ std::stringstream path_entropy;
+ path_entropy << path << "/entropy_" << v << ".txt";
+ PersistenceUtils::writeFloatToFile(path_entropy.str(),
+ models->at(i).self_occlusions_->at(v));
+ }
- std::cout << "Models size:" << models->size () << std::endl;
+ // save keypoints and signatures to disk
+ std::stringstream keypoints_sstr;
+ keypoints_sstr << path << "/keypoint_indices_" << v << ".pcd";
- if (force_retrain)
- {
- for (std::size_t i = 0; i < models->size (); i++)
- {
- source_->removeDescDirectory (models->at (i), training_dir_, descr_name_);
- }
- }
+ pcl::io::savePCDFileBinary(keypoints_sstr.str(), *keypoints_pointcloud);
- for (std::size_t i = 0; i < models->size (); i++)
- {
- std::cout << models->at (i).class_ << " " << models->at (i).id_ << std::endl;
-
- if (!source_->modelAlreadyTrained (models->at (i), training_dir_, descr_name_))
- {
- for (std::size_t v = 0; v < models->at (i).views_->size (); v++)
- {
- PointInTPtr processed (new pcl::PointCloud<PointInT>);
- typename pcl::PointCloud<FeatureT>::Ptr signatures (new pcl::PointCloud<FeatureT> ());
- PointInTPtr keypoints_pointcloud;
-
- bool success = estimator_->estimate (models->at (i).views_->at (v), processed, keypoints_pointcloud, signatures);
-
- if (success)
- {
- std::string path = source_->getModelDescriptorDir (models->at (i), training_dir_, descr_name_);
-
- bf::path desc_dir = path;
- if (!bf::exists (desc_dir))
- bf::create_directory (desc_dir);
-
- std::stringstream path_view;
- path_view << path << "/view_" << v << ".pcd";
- pcl::io::savePCDFileBinary (path_view.str (), *processed);
-
- std::stringstream path_pose;
- path_pose << path << "/pose_" << v << ".txt";
- PersistenceUtils::writeMatrixToFile (path_pose.str (), models->at (i).poses_->at (v));
-
- if(v < models->at (i).self_occlusions_->size()) {
- std::stringstream path_entropy;
- path_entropy << path << "/entropy_" << v << ".txt";
- PersistenceUtils::writeFloatToFile (path_entropy.str (), models->at (i).self_occlusions_->at (v));
- }
-
- //save keypoints and signatures to disk
- std::stringstream keypoints_sstr;
- keypoints_sstr << path << "/keypoint_indices_" << v << ".pcd";
-
- /*std::shared_ptr<std::vector<int>> indices (new std::vector<int> ());
- indices->resize (keypoints.points.size ());
- for (std::size_t kk = 0; kk < indices->size (); kk++)
- (*indices)[kk] = keypoints.points[kk];
- typename pcl::PointCloud<PointInT> keypoints_pointcloud;
- pcl::copyPointCloud (*processed, *indices, keypoints_pointcloud);*/
- pcl::io::savePCDFileBinary (keypoints_sstr.str (), *keypoints_pointcloud);
-
- std::stringstream path_descriptor;
- path_descriptor << path << "/descriptor_" << v << ".pcd";
- pcl::io::savePCDFileBinary (path_descriptor.str (), *signatures);
- }
+ std::stringstream path_descriptor;
+ path_descriptor << path << "/descriptor_" << v << ".pcd";
+ pcl::io::savePCDFileBinary(path_descriptor.str(), *signatures);
}
- } else {
- std::cout << "Model already trained..." << std::endl;
- //there is no need to keep the views in memory once the model has been trained
- models->at (i).views_->clear();
}
}
-
- loadFeaturesAndCreateFLANN ();
+ else {
+ std::cout << "Model already trained..." << std::endl;
+ // there is no need to keep the views in memory once the model has been trained
+ models->at(i).views_->clear();
+ }
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::recognize ()
- {
-
- models_.reset (new std::vector<ModelT>);
- transforms_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
-
- PointInTPtr processed;
- typename pcl::PointCloud<FeatureT>::Ptr signatures (new pcl::PointCloud<FeatureT> ());
- //pcl::PointCloud<int> keypoints_input;
- PointInTPtr keypoints_pointcloud;
-
- if (signatures_ != nullptr && processed_ != nullptr && (signatures_->size () == keypoints_pointcloud->points.size ()))
- {
- keypoints_pointcloud = keypoints_input_;
- signatures = signatures_;
- processed = processed_;
- std::cout << "Using the ISPK ..." << std::endl;
+ loadFeaturesAndCreateFLANN();
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::
+ recognize()
+{
+
+ models_.reset(new std::vector<ModelT>);
+ transforms_.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
+
+ PointInTPtr processed;
+ typename pcl::PointCloud<FeatureT>::Ptr signatures(new pcl::PointCloud<FeatureT>());
+ PointInTPtr keypoints_pointcloud;
+
+ if (signatures_ != nullptr && processed_ != nullptr &&
+ (signatures_->size() == keypoints_pointcloud->size())) {
+ keypoints_pointcloud = keypoints_input_;
+ signatures = signatures_;
+ processed = processed_;
+ std::cout << "Using the ISPK ..." << std::endl;
+ }
+ else {
+ processed.reset((new pcl::PointCloud<PointInT>));
+ if (!indices_.empty()) {
+ PointInTPtr sub_input(new pcl::PointCloud<PointInT>);
+ pcl::copyPointCloud(*input_, indices_, *sub_input);
+ estimator_->estimate(sub_input, processed, keypoints_pointcloud, signatures);
+ }
+ else {
+ estimator_->estimate(input_, processed, keypoints_pointcloud, signatures);
}
- else
- {
- processed.reset( (new pcl::PointCloud<PointInT>));
- if (!indices_.empty ())
- {
- PointInTPtr sub_input (new pcl::PointCloud<PointInT>);
- pcl::copyPointCloud (*input_, indices_, *sub_input);
- estimator_->estimate (sub_input, processed, keypoints_pointcloud, signatures);
- }
- else
- {
- estimator_->estimate (input_, processed, keypoints_pointcloud, signatures);
- }
- processed_ = processed;
+ processed_ = processed;
+ }
- }
+ std::cout << "Number of keypoints:" << keypoints_pointcloud->size() << std::endl;
- std::cout << "Number of keypoints:" << keypoints_pointcloud->points.size () << std::endl;
-
- int size_feat = sizeof(signatures->points[0].histogram) / sizeof(float);
-
- //feature matching and object hypotheses
- std::map<std::string, ObjectHypothesis> object_hypotheses;
- {
- for (std::size_t idx = 0; idx < signatures->points.size (); idx++)
- {
- float* hist = signatures->points[idx].histogram;
- std::vector<float> std_hist (hist, hist + size_feat);
- flann_model histogram;
- histogram.descr = std_hist;
- flann::Matrix<int> indices;
- flann::Matrix<float> distances;
- nearestKSearch (flann_index_, histogram, 1, indices, distances);
-
- //read view pose and keypoint coordinates, transform keypoint coordinates to model coordinates
- Eigen::Matrix4f homMatrixPose;
- getPose (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, homMatrixPose);
-
- typename pcl::PointCloud<PointInT>::Ptr keypoints (new pcl::PointCloud<PointInT> ());
- getKeypoints (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, keypoints);
-
- PointInT view_keypoint = keypoints->points[flann_models_.at (indices[0][0]).keypoint_id];
- PointInT model_keypoint;
- model_keypoint.getVector4fMap () = homMatrixPose.inverse () * view_keypoint.getVector4fMap ();
-
- typename std::map<std::string, ObjectHypothesis>::iterator it_map;
- if ((it_map = object_hypotheses.find (flann_models_.at (indices[0][0]).model.id_)) != object_hypotheses.end ())
- {
- //if the object hypothesis already exists, then add information
- ObjectHypothesis oh = (*it_map).second;
- oh.correspondences_pointcloud->points.push_back (model_keypoint);
- oh.correspondences_to_inputcloud->push_back (
- pcl::Correspondence (static_cast<int> (oh.correspondences_pointcloud->points.size () - 1),
- static_cast<int> (idx), distances[0][0]));
- oh.feature_distances_->push_back (distances[0][0]);
+ int size_feat = sizeof((*signatures)[0].histogram) / sizeof(float);
- }
- else
- {
- //create object hypothesis
- ObjectHypothesis oh;
+ // feature matching and object hypotheses
+ std::map<std::string, ObjectHypothesis> object_hypotheses;
+ {
+ for (std::size_t idx = 0; idx < signatures->size(); idx++) {
+ float* hist = (*signatures)[idx].histogram;
+ std::vector<float> std_hist(hist, hist + size_feat);
+ flann_model histogram;
+ histogram.descr = std_hist;
+ flann::Matrix<int> indices;
+ flann::Matrix<float> distances;
+ nearestKSearch(flann_index_, histogram, 1, indices, distances);
+
+ // read view pose and keypoint coordinates, transform keypoint coordinates to
+ // model coordinates
+ Eigen::Matrix4f homMatrixPose;
+ getPose(flann_models_.at(indices[0][0]).model,
+ flann_models_.at(indices[0][0]).view_id,
+ homMatrixPose);
+
+ typename pcl::PointCloud<PointInT>::Ptr keypoints(
+ new pcl::PointCloud<PointInT>());
+ getKeypoints(flann_models_.at(indices[0][0]).model,
+ flann_models_.at(indices[0][0]).view_id,
+ keypoints);
+
+ PointInT view_keypoint =
+ (*keypoints)[flann_models_.at(indices[0][0]).keypoint_id];
+ PointInT model_keypoint;
+ model_keypoint.getVector4fMap() =
+ homMatrixPose.inverse() * view_keypoint.getVector4fMap();
+
+ typename std::map<std::string, ObjectHypothesis>::iterator it_map;
+ if ((it_map = object_hypotheses.find(
+ flann_models_.at(indices[0][0]).model.id_)) != object_hypotheses.end()) {
+ // if the object hypothesis already exists, then add information
+ ObjectHypothesis oh = (*it_map).second;
+ oh.correspondences_pointcloud->points.push_back(model_keypoint);
+ oh.correspondences_to_inputcloud->push_back(pcl::Correspondence(
+ static_cast<int>(oh.correspondences_pointcloud->size() - 1),
+ static_cast<int>(idx),
+ distances[0][0]));
+ oh.feature_distances_->push_back(distances[0][0]);
+ }
+ else {
+ // create object hypothesis
+ ObjectHypothesis oh;
- typename pcl::PointCloud<PointInT>::Ptr correspondences_pointcloud (new pcl::PointCloud<PointInT> ());
- correspondences_pointcloud->points.push_back (model_keypoint);
+ typename pcl::PointCloud<PointInT>::Ptr correspondences_pointcloud(
+ new pcl::PointCloud<PointInT>());
+ correspondences_pointcloud->points.push_back(model_keypoint);
- oh.model_ = flann_models_.at (indices[0][0]).model;
- oh.correspondences_pointcloud = correspondences_pointcloud;
- //last keypoint for this model is a correspondence the current scene keypoint
+ oh.model_ = flann_models_.at(indices[0][0]).model;
+ oh.correspondences_pointcloud = correspondences_pointcloud;
+ // last keypoint for this model is a correspondence the current scene keypoint
- pcl::CorrespondencesPtr corr (new pcl::Correspondences ());
- oh.correspondences_to_inputcloud = corr;
- oh.correspondences_to_inputcloud->push_back (pcl::Correspondence (0, static_cast<int> (idx), distances[0][0]));
+ pcl::CorrespondencesPtr corr(new pcl::Correspondences());
+ oh.correspondences_to_inputcloud = corr;
+ oh.correspondences_to_inputcloud->push_back(
+ pcl::Correspondence(0, static_cast<int>(idx), distances[0][0]));
- std::shared_ptr<std::vector<float>> feat_dist (new std::vector<float>);
- feat_dist->push_back (distances[0][0]);
+ std::shared_ptr<std::vector<float>> feat_dist(new std::vector<float>);
+ feat_dist->push_back(distances[0][0]);
- oh.feature_distances_ = feat_dist;
- object_hypotheses[oh.model_.id_] = oh;
- }
+ oh.feature_distances_ = feat_dist;
+ object_hypotheses[oh.model_.id_] = oh;
}
}
+ }
- {
- //pcl::ScopeTime t("Geometric verification, RANSAC and transform estimation");
- for (auto it_map = object_hypotheses.cbegin (); it_map != object_hypotheses.cend (); it_map++)
- {
- std::vector < pcl::Correspondences > corresp_clusters;
- cg_algorithm_->setSceneCloud (keypoints_pointcloud);
- cg_algorithm_->setInputCloud ((*it_map).second.correspondences_pointcloud);
- cg_algorithm_->setModelSceneCorrespondences ((*it_map).second.correspondences_to_inputcloud);
- cg_algorithm_->cluster (corresp_clusters);
-
- std::cout << "Instances:" << corresp_clusters.size () << " Total correspondences:" << (*it_map).second.correspondences_to_inputcloud->size () << " " << it_map->first << std::endl;
- std::vector<bool> good_indices_for_hypothesis (corresp_clusters.size (), true);
-
- if (threshold_accept_model_hypothesis_ < 1.f)
- {
- //sort the hypotheses for each model according to their correspondences and take those that are threshold_accept_model_hypothesis_ over the max cardinality
- int max_cardinality = -1;
- for (const auto &corresp_cluster : corresp_clusters)
- {
- //std::cout << (corresp_clusters[i]).size() << " -- " << (*(*it_map).second.correspondences_to_inputcloud).size() << std::endl;
- if (max_cardinality < static_cast<int> (corresp_cluster.size ()))
- {
- max_cardinality = static_cast<int> (corresp_cluster.size ());
- }
+ {
+ for (auto it_map = object_hypotheses.cbegin(); it_map != object_hypotheses.cend();
+ it_map++) {
+ std::vector<pcl::Correspondences> corresp_clusters;
+ cg_algorithm_->setSceneCloud(keypoints_pointcloud);
+ cg_algorithm_->setInputCloud((*it_map).second.correspondences_pointcloud);
+ cg_algorithm_->setModelSceneCorrespondences(
+ (*it_map).second.correspondences_to_inputcloud);
+ cg_algorithm_->cluster(corresp_clusters);
+
+ std::cout << "Instances:" << corresp_clusters.size() << " Total correspondences:"
+ << (*it_map).second.correspondences_to_inputcloud->size() << " "
+ << it_map->first << std::endl;
+ std::vector<bool> good_indices_for_hypothesis(corresp_clusters.size(), true);
+
+ if (threshold_accept_model_hypothesis_ < 1.f) {
+ // sort the hypotheses for each model according to their correspondences and
+ // take those that are threshold_accept_model_hypothesis_ over the max
+ // cardinality
+ int max_cardinality = -1;
+ for (const auto& corresp_cluster : corresp_clusters) {
+ if (max_cardinality < static_cast<int>(corresp_cluster.size())) {
+ max_cardinality = static_cast<int>(corresp_cluster.size());
}
+ }
- for (std::size_t i = 0; i < corresp_clusters.size (); i++)
- {
- if (static_cast<float> ((corresp_clusters[i]).size ()) < (threshold_accept_model_hypothesis_ * static_cast<float> (max_cardinality)))
- {
- good_indices_for_hypothesis[i] = false;
- }
+ for (std::size_t i = 0; i < corresp_clusters.size(); i++) {
+ if (static_cast<float>((corresp_clusters[i]).size()) <
+ (threshold_accept_model_hypothesis_ *
+ static_cast<float>(max_cardinality))) {
+ good_indices_for_hypothesis[i] = false;
}
}
+ }
- for (std::size_t i = 0; i < corresp_clusters.size (); i++)
- {
-
- if (!good_indices_for_hypothesis[i])
- continue;
-
- //drawCorrespondences (processed, it_map->second, keypoints_pointcloud, corresp_clusters[i]);
+ for (std::size_t i = 0; i < corresp_clusters.size(); i++) {
- Eigen::Matrix4f best_trans;
- typename pcl::registration::TransformationEstimationSVD < PointInT, PointInT > t_est;
- t_est.estimateRigidTransformation (*(*it_map).second.correspondences_pointcloud, *keypoints_pointcloud, corresp_clusters[i], best_trans);
+ if (!good_indices_for_hypothesis[i])
+ continue;
- models_->push_back ((*it_map).second.model_);
- transforms_->push_back (best_trans);
+ Eigen::Matrix4f best_trans;
+ typename pcl::registration::TransformationEstimationSVD<PointInT, PointInT>
+ t_est;
+ t_est.estimateRigidTransformation(*(*it_map).second.correspondences_pointcloud,
+ *keypoints_pointcloud,
+ corresp_clusters[i],
+ best_trans);
- }
+ models_->push_back((*it_map).second.model_);
+ transforms_->push_back(best_trans);
}
}
+ }
- std::cout << "Number of hypotheses:" << models_->size() << std::endl;
+ std::cout << "Number of hypotheses:" << models_->size() << std::endl;
- /**
- * POSE REFINEMENT
- **/
+ /**
+ * POSE REFINEMENT
+ **/
- if (ICP_iterations_ > 0)
- {
- pcl::ScopeTime ticp ("ICP ");
+ if (ICP_iterations_ > 0) {
+ pcl::ScopeTime ticp("ICP ");
- //Prepare scene and model clouds for the pose refinement step
- PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ());
- pcl::VoxelGrid<PointInT> voxel_grid_icp;
- voxel_grid_icp.setInputCloud (processed);
- voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
- voxel_grid_icp.filter (*cloud_voxelized_icp);
- source_->voxelizeAllModels (VOXEL_SIZE_ICP_);
+ // Prepare scene and model clouds for the pose refinement step
+ PointInTPtr cloud_voxelized_icp(new pcl::PointCloud<PointInT>());
+ pcl::VoxelGrid<PointInT> voxel_grid_icp;
+ voxel_grid_icp.setInputCloud(processed);
+ voxel_grid_icp.setLeafSize(VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
+ voxel_grid_icp.filter(*cloud_voxelized_icp);
+ source_->voxelizeAllModels(VOXEL_SIZE_ICP_);
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(cloud_voxelized_icp) \
schedule(dynamic,1) \
num_threads(omp_get_num_procs())
- for (int i = 0; i < static_cast<int>(models_->size ()); i++)
- {
-
- ConstPointInTPtr model_cloud;
- PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
- model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
-
- typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT>::Ptr rej (
- new pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT> ());
-
- rej->setInputTarget (cloud_voxelized_icp);
- rej->setMaximumIterations (1000);
- rej->setInlierThreshold (0.005f);
- rej->setInputSource (model_aligned);
-
- pcl::IterativeClosestPoint<PointInT, PointInT> reg;
- reg.addCorrespondenceRejector (rej);
- reg.setInputTarget (cloud_voxelized_icp); //scene
- reg.setInputSource (model_aligned); //model
- reg.setMaximumIterations (ICP_iterations_);
- reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 4.f);
-
- typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ());
- reg.align (*output_);
-
- Eigen::Matrix4f icp_trans = reg.getFinalTransformation ();
- transforms_->at (i) = icp_trans * transforms_->at (i);
- }
+ // clang-format on
+ for (int i = 0; i < static_cast<int>(models_->size()); i++) {
+
+ ConstPointInTPtr model_cloud;
+ PointInTPtr model_aligned(new pcl::PointCloud<PointInT>);
+ model_cloud = models_->at(i).getAssembled(VOXEL_SIZE_ICP_);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms_->at(i));
+
+ typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT>::Ptr
+ rej(new pcl::registration::CorrespondenceRejectorSampleConsensus<PointInT>());
+
+ rej->setInputTarget(cloud_voxelized_icp);
+ rej->setMaximumIterations(1000);
+ rej->setInlierThreshold(0.005f);
+ rej->setInputSource(model_aligned);
+
+ pcl::IterativeClosestPoint<PointInT, PointInT> reg;
+ reg.addCorrespondenceRejector(rej);
+ reg.setInputTarget(cloud_voxelized_icp); // scene
+ reg.setInputSource(model_aligned); // model
+ reg.setMaximumIterations(ICP_iterations_);
+ reg.setMaxCorrespondenceDistance(VOXEL_SIZE_ICP_ * 4.f);
+
+ typename pcl::PointCloud<PointInT>::Ptr output_(new pcl::PointCloud<PointInT>());
+ reg.align(*output_);
+
+ Eigen::Matrix4f icp_trans = reg.getFinalTransformation();
+ transforms_->at(i) = icp_trans * transforms_->at(i);
}
+ }
- /**
- * HYPOTHESES VERIFICATION
- **/
-
- if (hv_algorithm_)
- {
-
- pcl::ScopeTime thv ("HV verification");
-
- std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
- aligned_models.resize (models_->size ());
- for (std::size_t i = 0; i < models_->size (); i++)
- {
- ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.0025f);
- //ConstPointInTPtr model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
- PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
- aligned_models[i] = model_aligned;
- }
-
- std::vector<bool> mask_hv;
- hv_algorithm_->setSceneCloud (input_);
- hv_algorithm_->addModels (aligned_models, true);
- hv_algorithm_->verify ();
- hv_algorithm_->getMask (mask_hv);
-
- std::shared_ptr<std::vector<ModelT>> models_temp;
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>> transforms_temp;
-
- models_temp.reset (new std::vector<ModelT>);
- transforms_temp.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
-
- for (std::size_t i = 0; i < models_->size (); i++)
- {
- if (!mask_hv[i])
- continue;
+ /**
+ * HYPOTHESES VERIFICATION
+ **/
- models_temp->push_back (models_->at (i));
- transforms_temp->push_back (transforms_->at (i));
- }
+ if (hv_algorithm_) {
- models_ = models_temp;
- transforms_ = transforms_temp;
+ pcl::ScopeTime thv("HV verification");
+ std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
+ aligned_models.resize(models_->size());
+ for (std::size_t i = 0; i < models_->size(); i++) {
+ ConstPointInTPtr model_cloud = models_->at(i).getAssembled(0.0025f);
+ PointInTPtr model_aligned(new pcl::PointCloud<PointInT>);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms_->at(i));
+ aligned_models[i] = model_aligned;
}
- }
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::getPose (ModelT & model, int view_id, Eigen::Matrix4f & pose_matrix)
- {
+ std::vector<bool> mask_hv;
+ hv_algorithm_->setSceneCloud(input_);
+ hv_algorithm_->addModels(aligned_models, true);
+ hv_algorithm_->verify();
+ hv_algorithm_->getMask(mask_hv);
- if (use_cache_)
- {
- using mv_pair = std::pair<std::string, int>;
- mv_pair pair_model_view = std::make_pair (model.id_, view_id);
+ std::shared_ptr<std::vector<ModelT>> models_temp;
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ transforms_temp;
- std::map<mv_pair, Eigen::Matrix4f,
- std::less<>,
- Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f> > >::iterator it = poses_cache_.find (pair_model_view);
+ models_temp.reset(new std::vector<ModelT>);
+ transforms_temp.reset(
+ new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>);
- if (it != poses_cache_.end ())
- {
- pose_matrix = it->second;
- return;
- }
+ for (std::size_t i = 0; i < models_->size(); i++) {
+ if (!mask_hv[i])
+ continue;
+ models_temp->push_back(models_->at(i));
+ transforms_temp->push_back(transforms_->at(i));
}
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/pose_" << view_id << ".txt";
-
- PersistenceUtils::readMatrixFromFile (dir.str (), pose_matrix);
+ models_ = models_temp;
+ transforms_ = transforms_temp;
+ }
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::getPose(
+ ModelT& model, int view_id, Eigen::Matrix4f& pose_matrix)
+{
+
+ if (use_cache_) {
+ using mv_pair = std::pair<std::string, int>;
+ mv_pair pair_model_view = std::make_pair(model.id_, view_id);
+
+ std::map<mv_pair,
+ Eigen::Matrix4f,
+ std::less<>,
+ Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f>>>::
+ iterator it = poses_cache_.find(pair_model_view);
+
+ if (it != poses_cache_.end()) {
+ pose_matrix = it->second;
+ return;
+ }
}
-template<template<class > class Distance, typename PointInT, typename FeatureT>
- void
- pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::getKeypoints (
- ModelT & model,
- int view_id,
- typename pcl::PointCloud<PointInT>::Ptr & keypoints_cloud)
- {
-
- if (use_cache_)
- {
- std::pair<std::string, int> pair_model_view = std::make_pair (model.id_, view_id);
- typename std::map<std::pair<std::string, int>, PointInTPtr>::iterator it = keypoints_cache_.find (pair_model_view);
-
- if (it != keypoints_cache_.end ())
- {
- keypoints_cloud = it->second;
- return;
- }
-
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/pose_" << view_id << ".txt";
+
+ PersistenceUtils::readMatrixFromFile(dir.str(), pose_matrix);
+}
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+void
+pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::
+ getKeypoints(ModelT& model,
+ int view_id,
+ typename pcl::PointCloud<PointInT>::Ptr& keypoints_cloud)
+{
+
+ if (use_cache_) {
+ std::pair<std::string, int> pair_model_view = std::make_pair(model.id_, view_id);
+ typename std::map<std::pair<std::string, int>, PointInTPtr>::iterator it =
+ keypoints_cache_.find(pair_model_view);
+
+ if (it != keypoints_cache_.end()) {
+ keypoints_cloud = it->second;
+ return;
}
+ }
- std::stringstream dir;
- std::string path = source_->getModelDescriptorDir (model, training_dir_, descr_name_);
- dir << path << "/keypoint_indices_" << view_id << ".pcd";
+ std::stringstream dir;
+ std::string path = source_->getModelDescriptorDir(model, training_dir_, descr_name_);
+ dir << path << "/keypoint_indices_" << view_id << ".pcd";
- pcl::io::loadPCDFile (dir.str (), *keypoints_cloud);
- }
+ pcl::io::loadPCDFile(dir.str(), *keypoints_cloud);
+}
#pragma once
-#include <flann/util/matrix.h>
-#include <pcl/common/common.h>
-#include <pcl/apps/3d_rec_framework/pc_source/source.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/local_estimator.h>
+#include <pcl/apps/3d_rec_framework/pc_source/source.h>
+#include <pcl/common/common.h>
#include <pcl/recognition/cg/correspondence_grouping.h>
#include <pcl/recognition/hv/hypotheses_verification.h>
#include <pcl/visualization/pcl_visualizer.h>
+#include <flann/util/matrix.h>
+
inline bool
-correspSorter (const pcl::Correspondence & i, const pcl::Correspondence & j)
+correspSorter(const pcl::Correspondence& i, const pcl::Correspondence& j)
{
return (i.distance < j.distance);
}
-namespace pcl
-{
- namespace rec_3d_framework
+namespace pcl {
+namespace rec_3d_framework {
+
+/**
+ * \brief Object recognition + 6DOF pose based on local features, GC and HV
+ * Contains keypoints/local features computation, matching using FLANN,
+ * point-to-point correspondence grouping, pose refinement and hypotheses verification
+ * Available features: SHOT, FPFH
+ * See apps/3d_rec_framework/tools/apps for usage
+ * \author Aitor Aldoma, Federico Tombari
+ */
+
+template <template <class> class Distance, typename PointInT, typename FeatureT>
+class PCL_EXPORTS LocalRecognitionPipeline {
+
+ using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
+ using ConstPointInTPtr = typename pcl::PointCloud<PointInT>::ConstPtr;
+
+ using DistT = Distance<float>;
+ using ModelT = Model<PointInT>;
+
+ /** \brief Directory where the trained structure will be saved */
+ std::string training_dir_;
+
+ /** \brief Point cloud to be classified */
+ PointInTPtr input_;
+
+ /** \brief Model data source */
+ std::shared_ptr<Source<PointInT>> source_;
+
+ /** \brief Computes a feature */
+ std::shared_ptr<LocalEstimator<PointInT, FeatureT>> estimator_;
+
+ /** \brief Point-to-point correspondence grouping algorithm */
+ std::shared_ptr<CorrespondenceGrouping<PointInT, PointInT>> cg_algorithm_;
+
+ /** \brief Hypotheses verification algorithm */
+ std::shared_ptr<HypothesisVerification<PointInT, PointInT>> hv_algorithm_;
+
+ /** \brief Descriptor name */
+ std::string descr_name_;
+
+ /** \brief Id of the model to be used */
+ std::string search_model_;
+
+ bool compute_table_plane_;
+
+ class flann_model {
+ public:
+ ModelT model;
+ int view_id;
+ int keypoint_id;
+ std::vector<float> descr;
+ };
+
+ flann::Matrix<float> flann_data_;
+ flann::Index<DistT>* flann_index_;
+ std::vector<flann_model> flann_models_;
+
+ std::vector<int> indices_;
+
+ bool use_cache_;
+ std::map<std::pair<std::string, int>,
+ Eigen::Matrix4f,
+ std::less<>,
+ Eigen::aligned_allocator<
+ std::pair<const std::pair<std::string, int>, Eigen::Matrix4f>>>
+ poses_cache_;
+ std::map<std::pair<std::string, int>, typename pcl::PointCloud<PointInT>::Ptr>
+ keypoints_cache_;
+
+ float threshold_accept_model_hypothesis_;
+ int ICP_iterations_;
+
+ std::shared_ptr<std::vector<ModelT>> models_;
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ transforms_;
+
+ int kdtree_splits_;
+ float VOXEL_SIZE_ICP_;
+
+ PointInTPtr keypoints_input_;
+ PointInTPtr processed_;
+ typename pcl::PointCloud<FeatureT>::Ptr signatures_;
+
+ // load features from disk and create flann structure
+ void
+ loadFeaturesAndCreateFLANN();
+
+ inline void
+ convertToFLANN(const std::vector<flann_model>& models, flann::Matrix<float>& data)
{
- /**
- * \brief Object recognition + 6DOF pose based on local features, GC and HV
- * Contains keypoints/local features computation, matching using FLANN,
- * point-to-point correspondence grouping, pose refinement and hypotheses verification
- * Available features: SHOT, FPFH
- * See apps/3d_rec_framework/tools/apps for usage
- * \author Aitor Aldoma, Federico Tombari
- */
+ data.rows = models.size();
+ data.cols = models[0].descr.size(); // number of histogram bins
- template<template<class > class Distance, typename PointInT, typename FeatureT>
- class PCL_EXPORTS LocalRecognitionPipeline
- {
+ flann::Matrix<float> flann_data(new float[models.size() * models[0].descr.size()],
+ models.size(),
+ models[0].descr.size());
- using PointInTPtr = typename pcl::PointCloud<PointInT>::Ptr;
- using ConstPointInTPtr = typename pcl::PointCloud<PointInT>::ConstPtr;
+ for (std::size_t i = 0; i < data.rows; ++i)
+ for (std::size_t j = 0; j < data.cols; ++j) {
+ flann_data.ptr()[i * data.cols + j] = models[i].descr[j];
+ }
- using DistT = Distance<float>;
- using ModelT = Model<PointInT>;
+ data = flann_data;
+ }
- /** \brief Directory where the trained structure will be saved */
- std::string training_dir_;
+ void
+ nearestKSearch(flann::Index<DistT>* index,
+ const flann_model& model,
+ int k,
+ flann::Matrix<int>& indices,
+ flann::Matrix<float>& distances);
+
+ class ObjectHypothesis {
+ public:
+ ModelT model_;
+ typename pcl::PointCloud<PointInT>::Ptr
+ correspondences_pointcloud; // points in model coordinates
+ std::shared_ptr<std::vector<float>> feature_distances_;
+ pcl::CorrespondencesPtr
+ correspondences_to_inputcloud; // indices between correspondences_pointcloud and
+ // scene cloud
+ };
+
+ void
+ getPose(ModelT& model, int view_id, Eigen::Matrix4f& pose_matrix);
+
+ void
+ getKeypoints(ModelT& model,
+ int view_id,
+ typename pcl::PointCloud<PointInT>::Ptr& keypoints_cloud);
+
+ void
+ drawCorrespondences(PointInTPtr& cloud,
+ ObjectHypothesis& oh,
+ PointInTPtr& keypoints_pointcloud,
+ pcl::Correspondences& correspondences)
+ {
+ pcl::visualization::PCLVisualizer vis_corresp_;
+ vis_corresp_.setWindowName("correspondences...");
+ pcl::visualization::PointCloudColorHandlerCustom<PointInT> random_handler(
+ cloud, 255, 0, 0);
+ vis_corresp_.addPointCloud<PointInT>(cloud, random_handler, "points");
+
+ typename pcl::PointCloud<PointInT>::ConstPtr cloud_sampled;
+ cloud_sampled = oh.model_.getAssembled(0.0025f);
+
+ pcl::visualization::PointCloudColorHandlerCustom<PointInT> random_handler_sampled(
+ cloud_sampled, 0, 0, 255);
+ vis_corresp_.addPointCloud<PointInT>(
+ cloud_sampled, random_handler_sampled, "sampled");
+
+ for (std::size_t kk = 0; kk < correspondences.size(); kk++) {
+ pcl::PointXYZ p;
+ p.getVector4fMap() =
+ (*oh.correspondences_pointcloud)[correspondences[kk].index_query]
+ .getVector4fMap();
+ pcl::PointXYZ p_scene;
+ p_scene.getVector4fMap() =
+ (*keypoints_pointcloud)[correspondences[kk].index_match].getVector4fMap();
+
+ std::stringstream line_name;
+ line_name << "line_" << kk;
+
+ vis_corresp_.addLine<pcl::PointXYZ, pcl::PointXYZ>(p_scene, p, line_name.str());
+ }
+
+ vis_corresp_.spin();
+ vis_corresp_.removeAllPointClouds();
+ vis_corresp_.removeAllShapes();
+ vis_corresp_.close();
+ }
- /** \brief Point cloud to be classified */
- PointInTPtr input_;
-
- /** \brief Model data source */
- std::shared_ptr<Source<PointInT>> source_;
-
- /** \brief Computes a feature */
- std::shared_ptr<LocalEstimator<PointInT, FeatureT>> estimator_;
+public:
+ LocalRecognitionPipeline()
+ {
+ use_cache_ = false;
+ threshold_accept_model_hypothesis_ = 0.2f;
+ ICP_iterations_ = 30;
+ kdtree_splits_ = 512;
+ search_model_ = "";
+ VOXEL_SIZE_ICP_ = 0.0025f;
+ compute_table_plane_ = false;
+ }
- /** \brief Point-to-point correspondence grouping algorithm */
- std::shared_ptr<CorrespondenceGrouping<PointInT, PointInT>> cg_algorithm_;
+ void
+ setISPK(typename pcl::PointCloud<FeatureT>::Ptr& signatures,
+ PointInTPtr& p,
+ PointInTPtr& keypoints)
+ {
+ keypoints_input_ = keypoints;
+ signatures_ = signatures;
+ processed_ = p;
+ }
- /** \brief Hypotheses verification algorithm */
- std::shared_ptr<HypothesisVerification<PointInT, PointInT>> hv_algorithm_;
-
- /** \brief Descriptor name */
- std::string descr_name_;
-
- /** \brief Id of the model to be used */
- std::string search_model_;
+ void
+ setVoxelSizeICP(float s)
+ {
+ VOXEL_SIZE_ICP_ = s;
+ }
+ void
+ setSearchModel(std::string& id)
+ {
+ search_model_ = id;
+ }
- bool compute_table_plane_;
+ void
+ setThresholdAcceptHyp(float t)
+ {
+ threshold_accept_model_hypothesis_ = t;
+ }
- class flann_model
- {
- public:
- ModelT model;
- int view_id;
- int keypoint_id;
- std::vector<float> descr;
- };
+ ~LocalRecognitionPipeline() {}
- flann::Matrix<float> flann_data_;
- flann::Index<DistT> * flann_index_;
- std::vector<flann_model> flann_models_;
-
- std::vector<int> indices_;
-
- bool use_cache_;
- std::map<std::pair<std::string, int>, Eigen::Matrix4f,
- std::less<>,
- Eigen::aligned_allocator<std::pair<const std::pair<std::string, int>, Eigen::Matrix4f> > > poses_cache_;
- std::map<std::pair<std::string, int>, typename pcl::PointCloud<PointInT>::Ptr> keypoints_cache_;
-
- float threshold_accept_model_hypothesis_;
- int ICP_iterations_;
-
- std::shared_ptr<std::vector<ModelT>> models_;
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>> transforms_;
-
- int kdtree_splits_;
- float VOXEL_SIZE_ICP_;
-
- PointInTPtr keypoints_input_;
- PointInTPtr processed_;
- typename pcl::PointCloud<FeatureT>::Ptr signatures_;
-
- //load features from disk and create flann structure
- void
- loadFeaturesAndCreateFLANN ();
-
- inline void
- convertToFLANN (const std::vector<flann_model> &models, flann::Matrix<float> &data)
- {
- data.rows = models.size ();
- data.cols = models[0].descr.size (); // number of histogram bins
-
- flann::Matrix<float> flann_data (new float[models.size () * models[0].descr.size ()], models.size (), models[0].descr.size ());
-
- for (std::size_t i = 0; i < data.rows; ++i)
- for (std::size_t j = 0; j < data.cols; ++j)
- {
- flann_data.ptr ()[i * data.cols + j] = models[i].descr[j];
- }
-
- data = flann_data;
- }
-
- void
- nearestKSearch (flann::Index<DistT> * index, const flann_model &model, int k, flann::Matrix<int> &indices, flann::Matrix<float> &distances);
-
- class ObjectHypothesis
- {
- public:
- ModelT model_;
- typename pcl::PointCloud<PointInT>::Ptr correspondences_pointcloud; //points in model coordinates
- std::shared_ptr<std::vector<float> > feature_distances_;
- pcl::CorrespondencesPtr correspondences_to_inputcloud; //indices between correspondences_pointcloud and scene cloud
- };
-
- void
- getPose (ModelT & model, int view_id, Eigen::Matrix4f & pose_matrix);
-
- void
- getKeypoints (ModelT & model, int view_id, typename pcl::PointCloud<PointInT>::Ptr & keypoints_cloud);
-
- void
- drawCorrespondences (PointInTPtr & cloud, ObjectHypothesis & oh, PointInTPtr & keypoints_pointcloud, pcl::Correspondences & correspondences)
- {
- pcl::visualization::PCLVisualizer vis_corresp_;
- vis_corresp_.setWindowName("correspondences...");
- pcl::visualization::PointCloudColorHandlerCustom<PointInT> random_handler (cloud, 255, 0, 0);
- vis_corresp_.addPointCloud<PointInT> (cloud, random_handler, "points");
-
- typename pcl::PointCloud<PointInT>::ConstPtr cloud_sampled;
- cloud_sampled = oh.model_.getAssembled (0.0025f);
-
- pcl::visualization::PointCloudColorHandlerCustom<PointInT> random_handler_sampled (cloud_sampled, 0, 0, 255);
- vis_corresp_.addPointCloud<PointInT> (cloud_sampled, random_handler_sampled, "sampled");
-
- for (std::size_t kk = 0; kk < correspondences.size (); kk++)
- {
- pcl::PointXYZ p;
- p.getVector4fMap () = oh.correspondences_pointcloud->points[correspondences[kk].index_query].getVector4fMap ();
- pcl::PointXYZ p_scene;
- p_scene.getVector4fMap () = keypoints_pointcloud->points[correspondences[kk].index_match].getVector4fMap ();
-
- std::stringstream line_name;
- line_name << "line_" << kk;
-
- vis_corresp_.addLine<pcl::PointXYZ, pcl::PointXYZ> (p_scene, p, line_name.str ());
- }
-
- vis_corresp_.spin ();
- vis_corresp_.removeAllPointClouds();
- vis_corresp_.removeAllShapes();
- vis_corresp_.close();
- }
-
- public:
-
- LocalRecognitionPipeline ()
- {
- use_cache_ = false;
- threshold_accept_model_hypothesis_ = 0.2f;
- ICP_iterations_ = 30;
- kdtree_splits_ = 512;
- search_model_ = "";
- VOXEL_SIZE_ICP_ = 0.0025f;
- compute_table_plane_ = false;
- }
-
- void setISPK(typename pcl::PointCloud<FeatureT>::Ptr & signatures, PointInTPtr & p, PointInTPtr & keypoints)
- {
- keypoints_input_ = keypoints;
- signatures_ = signatures;
- processed_ = p;
- }
-
- void setVoxelSizeICP(float s) {
- VOXEL_SIZE_ICP_ = s;
- }
- void
- setSearchModel (std::string & id)
- {
- search_model_ = id;
- }
-
- void
- setThresholdAcceptHyp (float t)
- {
- threshold_accept_model_hypothesis_ = t;
- }
-
- ~LocalRecognitionPipeline ()
- {
-
- }
-
- void
- setKdtreeSplits (int n)
- {
- kdtree_splits_ = n;
- }
-
- void
- setIndices (std::vector<int> & indices)
- {
- indices_ = indices;
- }
-
- void
- setICPIterations (int it)
- {
- ICP_iterations_ = it;
- }
-
- void
- setUseCache (bool u)
- {
- use_cache_ = u;
- }
-
- std::shared_ptr<std::vector<ModelT>>
- getModels ()
- {
- return models_;
- }
-
- std::shared_ptr<std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
- getTransforms ()
- {
- return transforms_;
- }
-
- /**
- * \brief Sets the model data source_
- */
- void
- setDataSource (std::shared_ptr<Source<PointInT>>& source)
- {
- source_ = source;
- }
-
- std::shared_ptr<Source<PointInT>>
- getDataSource ()
- {
- return source_;
- }
-
- /**
- * \brief Sets the local feature estimator
- */
- void
- setFeatureEstimator (std::shared_ptr<LocalEstimator<PointInT, FeatureT>>& feat)
- {
- estimator_ = feat;
- }
-
- /**
- * \brief Sets the CG algorithm
- */
- void
- setCGAlgorithm (std::shared_ptr<CorrespondenceGrouping<PointInT, PointInT>>& alg)
- {
- cg_algorithm_ = alg;
- }
-
- /**
- * \brief Sets the HV algorithm
- */
- void
- setHVAlgorithm (std::shared_ptr<HypothesisVerification<PointInT, PointInT>>& alg)
- {
- hv_algorithm_ = alg;
- }
-
- /**
- * \brief Sets the input cloud to be classified
- */
- void
- setInputCloud (const PointInTPtr & cloud)
- {
- input_ = cloud;
- }
-
- /**
- * \brief Sets the descriptor name
- */
- void
- setDescriptorName (std::string & name)
- {
- descr_name_ = name;
- }
-
- /**
- * \brief Filesystem dir where to keep the generated training data
- */
- void
- setTrainingDir (std::string & dir)
- {
- training_dir_ = dir;
- }
-
- void
- setComputeTablePlane(bool b) {
- compute_table_plane_ = b;
- }
-
- void
- getProcessed(PointInTPtr & cloud) {
- cloud = processed_;
- }
-
- /**
- * \brief Initializes the FLANN structure from the provided source
- * It does training for the models that haven't been trained yet
- */
-
- void
- initialize (bool force_retrain = false);
-
- /**
- * \brief Performs recognition and pose estimation on the input cloud
- */
-
- void
- recognize ();
- };
+ void
+ setKdtreeSplits(int n)
+ {
+ kdtree_splits_ = n;
}
-}
+
+ void
+ setIndices(std::vector<int>& indices)
+ {
+ indices_ = indices;
+ }
+
+ void
+ setICPIterations(int it)
+ {
+ ICP_iterations_ = it;
+ }
+
+ void
+ setUseCache(bool u)
+ {
+ use_cache_ = u;
+ }
+
+ std::shared_ptr<std::vector<ModelT>>
+ getModels()
+ {
+ return models_;
+ }
+
+ std::shared_ptr<
+ std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f>>>
+ getTransforms()
+ {
+ return transforms_;
+ }
+
+ /**
+ * \brief Sets the model data source_
+ */
+ void
+ setDataSource(std::shared_ptr<Source<PointInT>>& source)
+ {
+ source_ = source;
+ }
+
+ std::shared_ptr<Source<PointInT>>
+ getDataSource()
+ {
+ return source_;
+ }
+
+ /**
+ * \brief Sets the local feature estimator
+ */
+ void
+ setFeatureEstimator(std::shared_ptr<LocalEstimator<PointInT, FeatureT>>& feat)
+ {
+ estimator_ = feat;
+ }
+
+ /**
+ * \brief Sets the CG algorithm
+ */
+ void
+ setCGAlgorithm(std::shared_ptr<CorrespondenceGrouping<PointInT, PointInT>>& alg)
+ {
+ cg_algorithm_ = alg;
+ }
+
+ /**
+ * \brief Sets the HV algorithm
+ */
+ void
+ setHVAlgorithm(std::shared_ptr<HypothesisVerification<PointInT, PointInT>>& alg)
+ {
+ hv_algorithm_ = alg;
+ }
+
+ /**
+ * \brief Sets the input cloud to be classified
+ */
+ void
+ setInputCloud(const PointInTPtr& cloud)
+ {
+ input_ = cloud;
+ }
+
+ /**
+ * \brief Sets the descriptor name
+ */
+ void
+ setDescriptorName(std::string& name)
+ {
+ descr_name_ = name;
+ }
+
+ /**
+ * \brief Filesystem dir where to keep the generated training data
+ */
+ void
+ setTrainingDir(std::string& dir)
+ {
+ training_dir_ = dir;
+ }
+
+ void
+ setComputeTablePlane(bool b)
+ {
+ compute_table_plane_ = b;
+ }
+
+ void
+ getProcessed(PointInTPtr& cloud)
+ {
+ cloud = processed_;
+ }
+
+ /**
+ * \brief Initializes the FLANN structure from the provided source
+ * It does training for the models that haven't been trained yet
+ */
+
+ void
+ initialize(bool force_retrain = false);
+
+ /**
+ * \brief Performs recognition and pose estimation on the input cloud
+ */
+
+ void
+ recognize();
+};
+
+} // namespace rec_3d_framework
+} // namespace pcl
#include <mutex>
-namespace OpenNIFrameSource
-{
-
- using PointT = pcl::PointXYZRGBA;
- using PointCloud = pcl::PointCloud<PointT>;
- using PointCloudPtr = pcl::PointCloud<PointT>::Ptr;
- using PointCloudConstPtr = pcl::PointCloud<PointT>::ConstPtr;
-
- /* A simple class for capturing data from an OpenNI camera */
- class PCL_EXPORTS OpenNIFrameSource
- {
- public:
- OpenNIFrameSource (const std::string& device_id = "");
- ~OpenNIFrameSource ();
-
- const PointCloudPtr
- snap ();
- bool
- isActive () const;
- void
- onKeyboardEvent (const pcl::visualization::KeyboardEvent & event);
-
- protected:
- void
- onNewFrame (const PointCloudConstPtr &cloud);
-
- pcl::OpenNIGrabber grabber_;
- PointCloudPtr most_recent_frame_;
- int frame_counter_;
- std::mutex mutex_;
- bool active_;
- };
-
-}
+namespace OpenNIFrameSource {
+
+using PointT = pcl::PointXYZRGBA;
+using PointCloud = pcl::PointCloud<PointT>;
+using PointCloudPtr = pcl::PointCloud<PointT>::Ptr;
+using PointCloudConstPtr = pcl::PointCloud<PointT>::ConstPtr;
+
+/* A simple class for capturing data from an OpenNI camera */
+class PCL_EXPORTS OpenNIFrameSource {
+public:
+ OpenNIFrameSource(const std::string& device_id = "");
+ ~OpenNIFrameSource();
+
+ const PointCloudPtr
+ snap();
+ bool
+ isActive() const;
+ void
+ onKeyboardEvent(const pcl::visualization::KeyboardEvent& event);
+
+protected:
+ void
+ onNewFrame(const PointCloudConstPtr& cloud);
+
+ pcl::OpenNIGrabber grabber_;
+ PointCloudPtr most_recent_frame_;
+ int frame_counter_;
+ std::mutex mutex_;
+ bool active_;
+};
+
+} // namespace OpenNIFrameSource
#pragma once
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
#include <cmath>
#include <cstdlib>
-namespace Metrics
-{
- using ::std::abs;
-
- template<typename T>
- struct Accumulator
- {
- using Type = T;
- };
-
- template<>
- struct Accumulator<unsigned char>
- {
- using Type = float;
- };
- template<>
- struct Accumulator<unsigned short>
- {
- using Type = float;
- };
- template<>
- struct Accumulator<unsigned int>
- {
- using Type = float;
- };
- template<>
- struct Accumulator<char>
- {
- using Type = float;
- };
- template<>
- struct Accumulator<short>
- {
- using Type = float;
- };
- template<>
- struct Accumulator<int>
- {
- using Type = float;
- };
-
- template<class T>
- struct HistIntersectionUnionDistance
- {
- using ElementType = T;
- using ResultType = typename Accumulator<T>::Type;
-
- /**
- * Compute a distance between two vectors using (1 - (1 + sum(min(a_i,b_i))) / (1 + sum(max(a_i, b_i))) )
- *
- * This distance is not a valid kdtree distance, it's not dimensionwise additive
- * and ignores worst_dist parameter.
- */
-
- template<typename Iterator1, typename Iterator2>
- ResultType
- operator() (Iterator1 a, Iterator2 b, std::size_t size, ResultType worst_dist = -1) const
- {
- (void)worst_dist;
- ResultType result = ResultType ();
- ResultType min0, min1, min2, min3;
- ResultType max0, max1, max2, max3;
- Iterator1 last = a + size;
- Iterator1 lastgroup = last - 3;
-
- ResultType sum_min, sum_max;
- sum_min = 0;
- sum_max = 0;
-
- while (a < lastgroup)
- {
- min0 = (a[0] < b[0] ? a[0] : b[0]);
- max0 = (a[0] > b[0] ? a[0] : b[0]);
- min1 = (a[1] < b[1] ? a[1] : b[1]);
- max1 = (a[1] > b[1] ? a[1] : b[1]);
- min2 = (a[2] < b[2] ? a[2] : b[2]);
- max2 = (a[2] > b[2] ? a[2] : b[2]);
- min3 = (a[3] < b[3] ? a[3] : b[3]);
- max3 = (a[3] > b[3] ? a[3] : b[3]);
- sum_min += min0 + min1 + min2 + min3;
- sum_max += max0 + max1 + max2 + max3;
- a += 4;
- b += 4;
- /*if (worst_dist>0 && result>worst_dist) {
- return result;
- }*/
- }
-
- while (a < last)
- {
- min0 = *a < *b ? *a : *b;
- max0 = *a > *b ? *a : *b;
- sum_min += min0;
- sum_max += max0;
- a++;
- b++;
- //std::cout << a << " " << last << std::endl;
- }
-
- result = static_cast<ResultType> (1.0 - ((1 + sum_min) / (1 + sum_max)));
- return result;
- }
-
- /* This distance functor is not dimension-wise additive, which
- * makes it an invalid kd-tree distance, not implementing the accum_dist method */
- /**
- * Partial distance, used by the kd-tree.
- */
- template<typename U, typename V>
- inline ResultType
- accum_dist (const U& a, const V& b, int) const
- {
- //printf("New code being used, accum_dist\n");
- ResultType min0;
- ResultType max0;
- min0 = (a < b ? a : b) + 1.0;
- max0 = (a > b ? a : b) + 1.0;
- return (1 - (min0 / max0));
- //return (a+b-2*(a<b?a:b));
- }
- };
-}
+namespace Metrics {
+
+using ::std::abs;
+
+template <typename T>
+struct Accumulator {
+ using Type = T;
+};
+
+template <>
+struct Accumulator<unsigned char> {
+ using Type = float;
+};
+template <>
+struct Accumulator<unsigned short> {
+ using Type = float;
+};
+template <>
+struct Accumulator<unsigned int> {
+ using Type = float;
+};
+template <>
+struct Accumulator<char> {
+ using Type = float;
+};
+template <>
+struct Accumulator<short> {
+ using Type = float;
+};
+template <>
+struct Accumulator<int> {
+ using Type = float;
+};
+
+template <class T>
+struct HistIntersectionUnionDistance {
+ using ElementType = T;
+ using ResultType = typename Accumulator<T>::Type;
+
+ /**
+ * Compute a distance between two vectors using (1 - (1 + sum(min(a_i,b_i))) / (1 +
+ * sum(max(a_i, b_i))) )
+ *
+ * This distance is not a valid kdtree distance, it's not dimensionwise additive
+ * and ignores worst_dist parameter.
+ */
+
+ template <typename Iterator1, typename Iterator2>
+ ResultType
+ operator()(Iterator1 a,
+ Iterator2 b,
+ std::size_t size,
+ ResultType worst_dist = -1) const
+ {
+ pcl::utils::ignore(worst_dist);
+ ResultType result = ResultType();
+ ResultType min0, min1, min2, min3;
+ ResultType max0, max1, max2, max3;
+ Iterator1 last = a + size;
+ Iterator1 lastgroup = last - 3;
+
+ ResultType sum_min, sum_max;
+ sum_min = 0;
+ sum_max = 0;
+
+ while (a < lastgroup) {
+ min0 = (a[0] < b[0] ? a[0] : b[0]);
+ max0 = (a[0] > b[0] ? a[0] : b[0]);
+ min1 = (a[1] < b[1] ? a[1] : b[1]);
+ max1 = (a[1] > b[1] ? a[1] : b[1]);
+ min2 = (a[2] < b[2] ? a[2] : b[2]);
+ max2 = (a[2] > b[2] ? a[2] : b[2]);
+ min3 = (a[3] < b[3] ? a[3] : b[3]);
+ max3 = (a[3] > b[3] ? a[3] : b[3]);
+ sum_min += min0 + min1 + min2 + min3;
+ sum_max += max0 + max1 + max2 + max3;
+ a += 4;
+ b += 4;
+ }
+
+ while (a < last) {
+ min0 = *a < *b ? *a : *b;
+ max0 = *a > *b ? *a : *b;
+ sum_min += min0;
+ sum_max += max0;
+ a++;
+ b++;
+ }
+
+ result = static_cast<ResultType>(1.0 - ((1 + sum_min) / (1 + sum_max)));
+ return result;
+ }
+
+ /* This distance functor is not dimension-wise additive, which
+ * makes it an invalid kd-tree distance, not implementing the accum_dist method */
+ /**
+ * Partial distance, used by the kd-tree.
+ */
+ template <typename U, typename V>
+ inline ResultType
+ accum_dist(const U& a, const V& b, int) const
+ {
+ ResultType min0;
+ ResultType max0;
+ min0 = (a < b ? a : b) + 1.0;
+ max0 = (a > b ? a : b) + 1.0;
+ return (1 - (min0 / max0));
+ }
+};
+
+} // namespace Metrics
-#include <fstream>
-#include <boost/filesystem.hpp>
-#include <boost/algorithm/string.hpp>
#include <pcl/io/pcd_io.h>
-namespace pcl
+#include <boost/algorithm/string.hpp>
+#include <boost/filesystem.hpp>
+
+#include <fstream>
+
+namespace pcl {
+namespace rec_3d_framework {
+namespace PersistenceUtils {
+
+inline bool
+writeCentroidToFile(const std::string& file, Eigen::Vector3f& centroid)
+{
+ std::ofstream out(file.c_str());
+ if (!out) {
+ std::cout << "Cannot open file.\n";
+ return false;
+ }
+
+ out << centroid[0] << " " << centroid[1] << " " << centroid[2] << std::endl;
+ out.close();
+
+ return true;
+}
+
+inline bool
+getCentroidFromFile(const std::string& file, Eigen::Vector3f& centroid)
+{
+ std::ifstream in;
+ in.open(file.c_str(), std::ifstream::in);
+
+ char linebuf[256];
+ in.getline(linebuf, 256);
+ std::string line(linebuf);
+ std::vector<std::string> strs;
+ boost::split(strs, line, boost::is_any_of(" "));
+ centroid[0] = static_cast<float>(atof(strs[0].c_str()));
+ centroid[1] = static_cast<float>(atof(strs[1].c_str()));
+ centroid[2] = static_cast<float>(atof(strs[2].c_str()));
+
+ return true;
+}
+
+inline bool
+writeMatrixToFile(const std::string& file, Eigen::Matrix4f& matrix)
+{
+ std::ofstream out(file.c_str());
+ if (!out) {
+ std::cout << "Cannot open file.\n";
+ return false;
+ }
+
+ for (std::size_t i = 0; i < 4; i++) {
+ for (std::size_t j = 0; j < 4; j++) {
+ out << matrix(i, j);
+ if (!(i == 3 && j == 3))
+ out << " ";
+ }
+ }
+ out.close();
+
+ return true;
+}
+
+inline bool
+writeFloatToFile(const std::string& file, float value)
+{
+ std::ofstream out(file.c_str());
+ if (!out) {
+ std::cout << "Cannot open file.\n";
+ return false;
+ }
+
+ out << value;
+ out.close();
+
+ return true;
+}
+
+inline std::string
+getViewId(std::string id)
+{
+ std::vector<std::string> strs;
+ boost::split(strs, id, boost::is_any_of("_"));
+
+ return strs[strs.size() - 2];
+}
+
+inline bool
+readFloatFromFile(const std::string& dir, std::string file, float& value)
+{
+
+ std::vector<std::string> strs;
+ boost::split(strs, file, boost::is_any_of("/"));
+
+ std::string str;
+ for (std::size_t i = 0; i < (strs.size() - 1); i++) {
+ str += strs[i] + "/";
+ }
+
+ std::stringstream matrix_file;
+ matrix_file << dir << "/" << str << "entropy_" << getViewId(file) << ".txt";
+
+ std::ifstream in;
+ in.open(matrix_file.str().c_str(), std::ifstream::in);
+
+ char linebuf[1024];
+ in.getline(linebuf, 1024);
+ value = static_cast<float>(atof(linebuf));
+
+ return true;
+}
+
+inline bool
+readFloatFromFile(const std::string& file, float& value)
{
- namespace rec_3d_framework
- {
- namespace PersistenceUtils
- {
-
- inline bool
- writeCentroidToFile (const std::string& file, Eigen::Vector3f & centroid)
- {
- std::ofstream out (file.c_str ());
- if (!out)
- {
- std::cout << "Cannot open file.\n";
- return false;
- }
-
- out << centroid[0] << " " << centroid[1] << " " << centroid[2] << std::endl;
- out.close ();
-
- return true;
- }
-
- inline bool
- getCentroidFromFile (const std::string& file, Eigen::Vector3f & centroid)
- {
- std::ifstream in;
- in.open (file.c_str (), std::ifstream::in);
-
- char linebuf[256];
- in.getline (linebuf, 256);
- std::string line (linebuf);
- std::vector < std::string > strs;
- boost::split (strs, line, boost::is_any_of (" "));
- centroid[0] = static_cast<float> (atof (strs[0].c_str ()));
- centroid[1] = static_cast<float> (atof (strs[1].c_str ()));
- centroid[2] = static_cast<float> (atof (strs[2].c_str ()));
-
- return true;
- }
-
- inline bool
- writeMatrixToFile (const std::string& file, Eigen::Matrix4f & matrix)
- {
- std::ofstream out (file.c_str ());
- if (!out)
- {
- std::cout << "Cannot open file.\n";
- return false;
- }
-
- for (std::size_t i = 0; i < 4; i++)
- {
- for (std::size_t j = 0; j < 4; j++)
- {
- out << matrix (i, j);
- if (!(i == 3 && j == 3))
- out << " ";
- }
- }
- out.close ();
-
- return true;
- }
-
- inline bool
- writeFloatToFile (const std::string& file, float value)
- {
- std::ofstream out (file.c_str ());
- if (!out)
- {
- std::cout << "Cannot open file.\n";
- return false;
- }
-
- out << value;
- out.close ();
-
- return true;
- }
-
- inline std::string
- getViewId (std::string id)
- {
- //descriptor_xxx_xx.pcd
- //and we want xxx
-
- std::vector < std::string > strs;
- boost::split (strs, id, boost::is_any_of ("_"));
-
- //std::cout << "id:" << id << std::endl;
- //std::cout << "returned:" << strs[strs.size() - 2] << std::endl;
-
- return strs[strs.size () - 2];
- }
-
- inline bool
- readFloatFromFile (const std::string& dir, std::string file, float& value)
- {
-
- std::vector < std::string > strs;
- boost::split (strs, file, boost::is_any_of ("/"));
-
- std::string str;
- for (std::size_t i = 0; i < (strs.size () - 1); i++)
- {
- str += strs[i] + "/";
- }
-
- std::stringstream matrix_file;
- matrix_file << dir << "/" << str << "entropy_" << getViewId (file) << ".txt";
-
- std::ifstream in;
- in.open (matrix_file.str ().c_str (), std::ifstream::in);
-
- char linebuf[1024];
- in.getline (linebuf, 1024);
- value = static_cast<float> (atof (linebuf));
-
- return true;
- }
-
- inline bool
- readFloatFromFile (const std::string& file, float& value)
- {
-
- std::ifstream in;
- in.open (file.c_str (), std::ifstream::in);
-
- char linebuf[1024];
- in.getline (linebuf, 1024);
- value = static_cast<float> (atof (linebuf));
-
- return true;
- }
-
- inline bool
- readMatrixFromFile (std::string dir, std::string file, Eigen::Matrix4f & matrix)
- {
-
- //get the descriptor name from dir
- std::vector < std::string > path;
- boost::split (path, dir, boost::is_any_of ("/"));
-
- std::string dname = path[path.size () - 1];
- std::string file_replaced;
- for (std::size_t i = 0; i < (path.size () - 1); i++)
- {
- file_replaced += path[i] + "/";
- }
-
- boost::split (path, file, boost::is_any_of ("/"));
- std::string id;
-
- for (std::size_t i = 0; i < (path.size () - 1); i++)
- {
- id += path[i];
- if (i < (path.size () - 1))
- {
- id += "/";
- }
- }
-
- boost::split (path, file, boost::is_any_of ("/"));
- std::string filename = path[path.size () - 1];
-
- std::stringstream matrix_file;
- matrix_file << file_replaced << id << "/" << dname << "/pose_" << getViewId (file) << ".txt";
- //std::cout << matrix_file.str() << std::endl;
-
- std::ifstream in;
- in.open (matrix_file.str ().c_str (), std::ifstream::in);
-
- char linebuf[1024];
- in.getline (linebuf, 1024);
- std::string line (linebuf);
- std::vector < std::string > strs_2;
- boost::split (strs_2, line, boost::is_any_of (" "));
-
- for (int i = 0; i < 16; i++)
- {
- matrix (i % 4, i / 4) = static_cast<float> (atof (strs_2[i].c_str ()));
- }
-
- return true;
- }
-
- inline bool
- readMatrixFromFile (const std::string& file, Eigen::Matrix4f & matrix)
- {
-
- std::ifstream in;
- in.open (file.c_str (), std::ifstream::in);
-
- char linebuf[1024];
- in.getline (linebuf, 1024);
- std::string line (linebuf);
- std::vector < std::string > strs_2;
- boost::split (strs_2, line, boost::is_any_of (" "));
-
- for (int i = 0; i < 16; i++)
- {
- matrix (i % 4, i / 4) = static_cast<float> (atof (strs_2[i].c_str ()));
- }
-
- return true;
- }
-
- inline bool
- readMatrixFromFile2 (const std::string& file, Eigen::Matrix4f & matrix)
- {
-
- std::ifstream in;
- in.open (file.c_str (), std::ifstream::in);
-
- char linebuf[1024];
- in.getline (linebuf, 1024);
- std::string line (linebuf);
- std::vector < std::string > strs_2;
- boost::split (strs_2, line, boost::is_any_of (" "));
-
- for (int i = 0; i < 16; i++)
- {
- matrix (i / 4, i % 4) = static_cast<float> (atof (strs_2[i].c_str ()));
- }
-
- return true;
- }
-
- template<typename PointInT>
- inline
- void
- getPointCloudFromFile (std::string dir, std::string file, typename pcl::PointCloud<PointInT>::Ptr & cloud)
- {
-
- //get the descriptor name from dir
- std::vector < std::string > path;
- boost::split (path, dir, boost::is_any_of ("/"));
-
- std::string dname = path[path.size () - 1];
- std::string file_replaced;
- for (std::size_t i = 0; i < (path.size () - 1); i++)
- {
- file_replaced += path[i] + "/";
- }
-
- boost::split (path, file, boost::is_any_of ("/"));
- std::string id;
-
- for (std::size_t i = 0; i < (path.size () - 1); i++)
- {
- id += path[i];
- if (i < (path.size () - 1))
- {
- id += "/";
- }
- }
-
- std::stringstream view_file;
- view_file << file_replaced << id << "/" << dname << "/view_" << getViewId (file) << ".pcd";
-
- pcl::io::loadPCDFile (view_file.str (), *cloud);
- }
+
+ std::ifstream in;
+ in.open(file.c_str(), std::ifstream::in);
+
+ char linebuf[1024];
+ in.getline(linebuf, 1024);
+ value = static_cast<float>(atof(linebuf));
+
+ return true;
+}
+
+inline bool
+readMatrixFromFile(std::string dir, std::string file, Eigen::Matrix4f& matrix)
+{
+
+ // get the descriptor name from dir
+ std::vector<std::string> path;
+ boost::split(path, dir, boost::is_any_of("/"));
+
+ std::string dname = path[path.size() - 1];
+ std::string file_replaced;
+ for (std::size_t i = 0; i < (path.size() - 1); i++) {
+ file_replaced += path[i] + "/";
+ }
+
+ boost::split(path, file, boost::is_any_of("/"));
+ std::string id;
+
+ for (std::size_t i = 0; i < (path.size() - 1); i++) {
+ id += path[i];
+ if (i < (path.size() - 1)) {
+ id += "/";
+ }
+ }
+
+ boost::split(path, file, boost::is_any_of("/"));
+ std::string filename = path[path.size() - 1];
+
+ std::stringstream matrix_file;
+ matrix_file << file_replaced << id << "/" << dname << "/pose_" << getViewId(file)
+ << ".txt";
+
+ std::ifstream in;
+ in.open(matrix_file.str().c_str(), std::ifstream::in);
+
+ char linebuf[1024];
+ in.getline(linebuf, 1024);
+ std::string line(linebuf);
+ std::vector<std::string> strs_2;
+ boost::split(strs_2, line, boost::is_any_of(" "));
+
+ for (int i = 0; i < 16; i++) {
+ matrix(i % 4, i / 4) = static_cast<float>(atof(strs_2[i].c_str()));
+ }
+
+ return true;
+}
+
+inline bool
+readMatrixFromFile(const std::string& file, Eigen::Matrix4f& matrix)
+{
+
+ std::ifstream in;
+ in.open(file.c_str(), std::ifstream::in);
+
+ char linebuf[1024];
+ in.getline(linebuf, 1024);
+ std::string line(linebuf);
+ std::vector<std::string> strs_2;
+ boost::split(strs_2, line, boost::is_any_of(" "));
+
+ for (int i = 0; i < 16; i++) {
+ matrix(i % 4, i / 4) = static_cast<float>(atof(strs_2[i].c_str()));
+ }
+
+ return true;
+}
+
+inline bool
+readMatrixFromFile2(const std::string& file, Eigen::Matrix4f& matrix)
+{
+
+ std::ifstream in;
+ in.open(file.c_str(), std::ifstream::in);
+
+ char linebuf[1024];
+ in.getline(linebuf, 1024);
+ std::string line(linebuf);
+ std::vector<std::string> strs_2;
+ boost::split(strs_2, line, boost::is_any_of(" "));
+
+ for (int i = 0; i < 16; i++) {
+ matrix(i / 4, i % 4) = static_cast<float>(atof(strs_2[i].c_str()));
+ }
+
+ return true;
+}
+
+template <typename PointInT>
+inline void
+getPointCloudFromFile(std::string dir,
+ std::string file,
+ typename pcl::PointCloud<PointInT>::Ptr& cloud)
+{
+
+ // get the descriptor name from dir
+ std::vector<std::string> path;
+ boost::split(path, dir, boost::is_any_of("/"));
+
+ std::string dname = path[path.size() - 1];
+ std::string file_replaced;
+ for (std::size_t i = 0; i < (path.size() - 1); i++) {
+ file_replaced += path[i] + "/";
+ }
+
+ boost::split(path, file, boost::is_any_of("/"));
+ std::string id;
+
+ for (std::size_t i = 0; i < (path.size() - 1); i++) {
+ id += path[i];
+ if (i < (path.size() - 1)) {
+ id += "/";
}
}
+
+ std::stringstream view_file;
+ view_file << file_replaced << id << "/" << dname << "/view_" << getViewId(file)
+ << ".pcd";
+
+ pcl::io::loadPCDFile(view_file.str(), *cloud);
}
+} // namespace PersistenceUtils
+} // namespace rec_3d_framework
+} // namespace pcl
#pragma once
-#include <vtkPolyData.h>
-#include <vtkTriangle.h>
-#include <vtkSmartPointer.h>
+#include <pcl/common/common.h>
+
#include <vtkCellArray.h>
#include <vtkPLYReader.h>
+#include <vtkPolyData.h>
#include <vtkPolyDataMapper.h>
+#include <vtkSmartPointer.h>
#include <vtkTransform.h>
#include <vtkTransformFilter.h>
-#include <pcl/common/common.h>
+#include <vtkTriangle.h>
-namespace pcl
+namespace pcl {
+namespace rec_3d_framework {
+
+inline double
+uniform_deviate(int seed)
{
- namespace rec_3d_framework
- {
-
- inline double
- uniform_deviate (int seed)
- {
- double ran = seed * (1.0 / (RAND_MAX + 1.0));
- return ran;
- }
-
- inline void
- randomPointTriangle (double a1, double a2, double a3, double b1, double b2, double b3, double c1, double c2, double c3, Eigen::Vector4f& p)
- {
- float r1 = static_cast<float> (uniform_deviate (rand ()));
- float r2 = static_cast<float> (uniform_deviate (rand ()));
- float r1sqr = std::sqrt (r1);
- float OneMinR1Sqr = (1 - r1sqr);
- float OneMinR2 = (1 - r2);
- a1 *= OneMinR1Sqr;
- a2 *= OneMinR1Sqr;
- a3 *= OneMinR1Sqr;
- b1 *= OneMinR2;
- b2 *= OneMinR2;
- b3 *= OneMinR2;
- c1 = r1sqr * (r2 * c1 + b1) + a1;
- c2 = r1sqr * (r2 * c2 + b2) + a2;
- c3 = r1sqr * (r2 * c3 + b3) + a3;
- p[0] = static_cast<float> (c1);
- p[1] = static_cast<float> (c2);
- p[2] = static_cast<float> (c3);
- p[3] = 0.f;
- }
-
- inline void
- randPSurface (vtkPolyData * polydata, std::vector<double> * cumulativeAreas, double totalArea, Eigen::Vector4f& p)
- {
- float r = static_cast<float> (uniform_deviate (rand ()) * totalArea);
-
- std::vector<double>::iterator low = std::lower_bound (cumulativeAreas->begin (), cumulativeAreas->end (), r);
- vtkIdType el = static_cast<vtkIdType> (low - cumulativeAreas->begin ());
-
- double A[3], B[3], C[3];
- vtkIdType npts = 0;
- vtkIdType *ptIds = nullptr;
- polydata->GetCellPoints (el, npts, ptIds);
-
- if (ptIds == nullptr)
- return;
-
- polydata->GetPoint (ptIds[0], A);
- polydata->GetPoint (ptIds[1], B);
- polydata->GetPoint (ptIds[2], C);
- randomPointTriangle (A[0], A[1], A[2], B[0], B[1], B[2], C[0], C[1], C[2], p);
- }
-
- template<typename PointT>
- inline void
- uniform_sampling (const vtkSmartPointer<vtkPolyData>& polydata, std::size_t n_samples, typename pcl::PointCloud<PointT> & cloud_out)
- {
- polydata->BuildCells ();
- vtkSmartPointer < vtkCellArray > cells = polydata->GetPolys ();
-
- double p1[3], p2[3], p3[3], totalArea = 0;
- std::vector<double> cumulativeAreas (cells->GetNumberOfCells (), 0);
- std::size_t i = 0;
- vtkIdType npts = 0, *ptIds = nullptr;
- for (cells->InitTraversal (); cells->GetNextCell (npts, ptIds); i++)
- {
- polydata->GetPoint (ptIds[0], p1);
- polydata->GetPoint (ptIds[1], p2);
- polydata->GetPoint (ptIds[2], p3);
- totalArea += vtkTriangle::TriangleArea (p1, p2, p3);
- cumulativeAreas[i] = totalArea;
- }
-
- cloud_out.points.resize (n_samples);
- cloud_out.width = static_cast<int> (n_samples);
- cloud_out.height = 1;
-
- for (i = 0; i < n_samples; i++)
- {
- Eigen::Vector4f p (0.f, 0.f, 0.f, 0.f);
- randPSurface (polydata, &cumulativeAreas, totalArea, p);
- cloud_out.points[i].x = static_cast<float> (p[0]);
- cloud_out.points[i].y = static_cast<float> (p[1]);
- cloud_out.points[i].z = static_cast<float> (p[2]);
- }
- }
-
- template<typename PointT>
- inline void
- uniform_sampling (std::string & file, std::size_t n_samples, typename pcl::PointCloud<PointT> & cloud_out, float scale = 1.f)
- {
-
- vtkSmartPointer < vtkPLYReader > reader = vtkSmartPointer<vtkPLYReader>::New ();
- reader->SetFileName (file.c_str ());
-
- vtkSmartPointer < vtkPolyDataMapper > mapper = vtkSmartPointer<vtkPolyDataMapper>::New ();
-
- if (scale == 1.f)
- {
- mapper->SetInputConnection (reader->GetOutputPort ());
- mapper->Update ();
- }
- else
- {
- vtkSmartPointer<vtkTransform> trans = vtkSmartPointer<vtkTransform>::New ();
- trans->Scale (scale, scale, scale);
- vtkSmartPointer < vtkTransformFilter > trans_filter = vtkSmartPointer<vtkTransformFilter>::New ();
- trans_filter->SetTransform (trans);
- trans_filter->SetInputConnection (reader->GetOutputPort ());
- trans_filter->Update ();
- mapper->SetInputConnection (trans_filter->GetOutputPort ());
- mapper->Update ();
- }
-
- vtkSmartPointer<vtkPolyData> poly = mapper->GetInput ();
-
- uniform_sampling (poly, n_samples, cloud_out);
-
- }
-
- inline void
- getVerticesAsPointCloud (const vtkSmartPointer<vtkPolyData>& polydata, pcl::PointCloud<pcl::PointXYZ> & cloud_out)
- {
- vtkPoints *points = polydata->GetPoints ();
- cloud_out.points.resize (points->GetNumberOfPoints ());
- cloud_out.width = static_cast<int> (cloud_out.points.size ());
- cloud_out.height = 1;
- cloud_out.is_dense = false;
-
- for (vtkIdType i = 0; i < points->GetNumberOfPoints (); i++)
- {
- double p[3];
- points->GetPoint (i, p);
- cloud_out.points[i].x = static_cast<float> (p[0]);
- cloud_out.points[i].y = static_cast<float> (p[1]);
- cloud_out.points[i].z = static_cast<float> (p[2]);
- }
- }
+ double ran = seed * (1.0 / (RAND_MAX + 1.0));
+ return ran;
+}
+
+inline void
+randomPointTriangle(double a1,
+ double a2,
+ double a3,
+ double b1,
+ double b2,
+ double b3,
+ double c1,
+ double c2,
+ double c3,
+ Eigen::Vector4f& p)
+{
+ float r1 = static_cast<float>(uniform_deviate(rand()));
+ float r2 = static_cast<float>(uniform_deviate(rand()));
+ float r1sqr = std::sqrt(r1);
+ float OneMinR1Sqr = (1 - r1sqr);
+ float OneMinR2 = (1 - r2);
+ a1 *= OneMinR1Sqr;
+ a2 *= OneMinR1Sqr;
+ a3 *= OneMinR1Sqr;
+ b1 *= OneMinR2;
+ b2 *= OneMinR2;
+ b3 *= OneMinR2;
+ c1 = r1sqr * (r2 * c1 + b1) + a1;
+ c2 = r1sqr * (r2 * c2 + b2) + a2;
+ c3 = r1sqr * (r2 * c3 + b3) + a3;
+ p[0] = static_cast<float>(c1);
+ p[1] = static_cast<float>(c2);
+ p[2] = static_cast<float>(c3);
+ p[3] = 0.f;
+}
+
+inline void
+randPSurface(vtkPolyData* polydata,
+ std::vector<double>* cumulativeAreas,
+ double totalArea,
+ Eigen::Vector4f& p)
+{
+ float r = static_cast<float>(uniform_deviate(rand()) * totalArea);
+
+ std::vector<double>::iterator low =
+ std::lower_bound(cumulativeAreas->begin(), cumulativeAreas->end(), r);
+ vtkIdType el = static_cast<vtkIdType>(low - cumulativeAreas->begin());
+
+ double A[3], B[3], C[3];
+ vtkIdType npts = 0;
+ vtkIdType* ptIds = nullptr;
+ polydata->GetCellPoints(el, npts, ptIds);
+
+ if (ptIds == nullptr)
+ return;
+
+ polydata->GetPoint(ptIds[0], A);
+ polydata->GetPoint(ptIds[1], B);
+ polydata->GetPoint(ptIds[2], C);
+ randomPointTriangle(A[0], A[1], A[2], B[0], B[1], B[2], C[0], C[1], C[2], p);
+}
+
+template <typename PointT>
+inline void
+uniform_sampling(const vtkSmartPointer<vtkPolyData>& polydata,
+ std::size_t n_samples,
+ typename pcl::PointCloud<PointT>& cloud_out)
+{
+ polydata->BuildCells();
+ vtkSmartPointer<vtkCellArray> cells = polydata->GetPolys();
+
+ double p1[3], p2[3], p3[3], totalArea = 0;
+ std::vector<double> cumulativeAreas(cells->GetNumberOfCells(), 0);
+ std::size_t i = 0;
+ vtkIdType npts = 0, *ptIds = nullptr;
+ for (cells->InitTraversal(); cells->GetNextCell(npts, ptIds); i++) {
+ polydata->GetPoint(ptIds[0], p1);
+ polydata->GetPoint(ptIds[1], p2);
+ polydata->GetPoint(ptIds[2], p3);
+ totalArea += vtkTriangle::TriangleArea(p1, p2, p3);
+ cumulativeAreas[i] = totalArea;
+ }
+
+ cloud_out.resize(n_samples);
+ cloud_out.width = n_samples;
+ cloud_out.height = 1;
+
+ for (auto& point : cloud_out) {
+ Eigen::Vector4f p(0.f, 0.f, 0.f, 0.f);
+ randPSurface(polydata, &cumulativeAreas, totalArea, p);
+ point.getVector3fMap() = p.head<3>();
+ }
+}
+
+template <typename PointT>
+inline void
+uniform_sampling(std::string& file,
+ std::size_t n_samples,
+ typename pcl::PointCloud<PointT>& cloud_out,
+ float scale = 1.f)
+{
+
+ vtkSmartPointer<vtkPLYReader> reader = vtkSmartPointer<vtkPLYReader>::New();
+ reader->SetFileName(file.c_str());
+
+ vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
+
+ if (scale == 1.f) {
+ mapper->SetInputConnection(reader->GetOutputPort());
+ mapper->Update();
+ }
+ else {
+ vtkSmartPointer<vtkTransform> trans = vtkSmartPointer<vtkTransform>::New();
+ trans->Scale(scale, scale, scale);
+ vtkSmartPointer<vtkTransformFilter> trans_filter =
+ vtkSmartPointer<vtkTransformFilter>::New();
+ trans_filter->SetTransform(trans);
+ trans_filter->SetInputConnection(reader->GetOutputPort());
+ trans_filter->Update();
+ mapper->SetInputConnection(trans_filter->GetOutputPort());
+ mapper->Update();
+ }
+
+ vtkSmartPointer<vtkPolyData> poly = mapper->GetInput();
+
+ uniform_sampling(poly, n_samples, cloud_out);
+}
+
+inline void
+getVerticesAsPointCloud(const vtkSmartPointer<vtkPolyData>& polydata,
+ pcl::PointCloud<pcl::PointXYZ>& cloud_out)
+{
+ vtkPoints* points = polydata->GetPoints();
+ cloud_out.resize(points->GetNumberOfPoints());
+ cloud_out.width = cloud_out.size();
+ cloud_out.height = 1;
+ cloud_out.is_dense = false;
+
+ for (vtkIdType i = 0; i < points->GetNumberOfPoints(); i++) {
+ double p[3];
+ points->GetPoint(i, p);
+ cloud_out[i].x = static_cast<float>(p[0]);
+ cloud_out[i].y = static_cast<float>(p[1]);
+ cloud_out[i].z = static_cast<float>(p[2]);
}
}
+
+} // namespace rec_3d_framework
+} // namespace pcl
*/
#include <pcl/apps/3d_rec_framework/pipeline/impl/global_nn_classifier.hpp>
-#include "pcl/apps/3d_rec_framework/utils/metrics.h"
+#include <pcl/apps/3d_rec_framework/utils/metrics.h>
-//Instantiation
-template class pcl::rec_3d_framework::GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNPipeline<Metrics::HistIntersectionUnionDistance, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::ESFSignature640>;
+// Instantiation
+template class pcl::rec_3d_framework::
+ GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::GlobalNNPipeline<
+ Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZ,
+ pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::
+ GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::ESFSignature640>;
template class pcl::rec_3d_framework::GlobalClassifier<pcl::PointXYZ>;
*/
#include <pcl/apps/3d_rec_framework/pipeline/impl/global_nn_recognizer_crh.hpp>
-#include "pcl/apps/3d_rec_framework/utils/metrics.h"
+#include <pcl/apps/3d_rec_framework/utils/metrics.h>
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<90>,
- (float[90], histogram, histogram90)
-)
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<90>,
+ (float[90], histogram, histogram90))
-//Instantiation
-template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<flann::L2, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<flann::ChiSquareDistance, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<flann::L1, pcl::PointXYZ, pcl::ESFSignature640>;
-template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<flann::L2, pcl::PointXYZ, pcl::ESFSignature640>;
+// Instantiation
+template class pcl::rec_3d_framework::
+ GlobalNNCRHRecognizer<flann::L2, pcl::PointXYZ, pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::
+ GlobalNNCRHRecognizer<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<
+ Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZ,
+ pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::GlobalNNCRHRecognizer<flann::ChiSquareDistance,
+ pcl::PointXYZ,
+ pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::
+ GlobalNNCRHRecognizer<flann::L1, pcl::PointXYZ, pcl::ESFSignature640>;
+template class pcl::rec_3d_framework::
+ GlobalNNCRHRecognizer<flann::L2, pcl::PointXYZ, pcl::ESFSignature640>;
*/
#include <pcl/apps/3d_rec_framework/pipeline/impl/global_nn_recognizer_cvfh.hpp>
-#include "pcl/apps/3d_rec_framework/utils/metrics.h"
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<367>,
- (float[367], histogram, histogram367)
-)
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<431>,
- (float[431], histogram, histogram431)
-)
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<559>,
- (float[559], histogram, histogram559)
-)
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<815>,
- (float[815], histogram, histogram815)
-)
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<879>,
- (float[879], histogram, histogram879)
-)
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<1327>,
- (float[1327], histogram, histogram1327)
-)
-
-
-//Instantiation
-//template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<flann::L2, pcl::PointXYZ, pcl::VFHSignature308>;
-//template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZ, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZRGB, pcl::VFHSignature308>;
-template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZRGB, pcl::Histogram<431> >;
-template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZRGB, pcl::Histogram<559> >;
-//template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZRGB, pcl::Histogram<815> >;
-//template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZRGB, pcl::Histogram<879> >;
-template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Metrics::HistIntersectionUnionDistance, pcl::PointXYZRGB, pcl::Histogram<1327> >;
-//template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<flann::L2, pcl::PointXYZRGB, pcl::Histogram<431> >;
-//template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<flann::L2, pcl::PointXYZRGB, pcl::Histogram<431> >;
-//template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<flann::L1, pcl::PointXYZRGB, pcl::Histogram<431> >;
+#include <pcl/apps/3d_rec_framework/utils/metrics.h>
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<367>,
+ (float[367], histogram, histogram367))
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<431>,
+ (float[431], histogram, histogram431))
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<559>,
+ (float[559], histogram, histogram559))
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<815>,
+ (float[815], histogram, histogram815))
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<879>,
+ (float[879], histogram, histogram879))
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<1327>,
+ (float[1327], histogram, histogram1327))
+
+// Instantiation
+template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<
+ Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZ,
+ pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<
+ Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZRGB,
+ pcl::VFHSignature308>;
+template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<
+ Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZRGB,
+ pcl::Histogram<431>>;
+template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<
+ Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZRGB,
+ pcl::Histogram<559>>;
+template class pcl::rec_3d_framework::GlobalNNCVFHRecognizer<
+ Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZRGB,
+ pcl::Histogram<1327>>;
#include <pcl/apps/3d_rec_framework/pipeline/impl/local_recognizer.hpp>
-//This stuff is needed to be able to make the SHOT histograms persistent
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<352>,
- (float[352], histogram, histogram352)
-)
+// This stuff is needed to be able to make the SHOT histograms persistent
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<352>,
+ (float[352], histogram, histogram352))
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Histogram<1344>,
- (float[1344], histogram, histogram1344)
-)
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::Histogram<1344>,
+ (float[1344], histogram, histogram1344))
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::Histogram<352> >;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZRGB, pcl::Histogram<352> >;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZRGB, pcl::Histogram<1344> >;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::FPFHSignature33>;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZRGB, pcl::FPFHSignature33>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::Histogram<352>>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZRGB, pcl::Histogram<352>>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZRGB, pcl::Histogram<1344>>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::FPFHSignature33>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZRGB, pcl::FPFHSignature33>;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L2, pcl::PointXYZ, pcl::Histogram<352> >;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L2, pcl::PointXYZRGB, pcl::Histogram<352> >;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L2, pcl::PointXYZRGB, pcl::Histogram<1344> >;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L2, pcl::PointXYZ, pcl::FPFHSignature33>;
-template class PCL_EXPORTS pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L2, pcl::PointXYZRGB, pcl::FPFHSignature33>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L2, pcl::PointXYZ, pcl::Histogram<352>>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L2, pcl::PointXYZRGB, pcl::Histogram<352>>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L2, pcl::PointXYZRGB, pcl::Histogram<1344>>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L2, pcl::PointXYZ, pcl::FPFHSignature33>;
+template class PCL_EXPORTS pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L2, pcl::PointXYZRGB, pcl::FPFHSignature33>;
* Author: aitor
*/
-#include <pcl/pcl_macros.h>
-#include <pcl/apps/3d_rec_framework/pipeline/global_nn_classifier.h>
-#include <pcl/apps/3d_rec_framework/pc_source/mesh_source.h>
-#include <pcl/apps/3d_rec_framework/feature_wrapper/global/vfh_estimator.h>
-#include <pcl/apps/3d_rec_framework/feature_wrapper/global/esf_estimator.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/global/cvfh_estimator.h>
+#include <pcl/apps/3d_rec_framework/feature_wrapper/global/esf_estimator.h>
+#include <pcl/apps/3d_rec_framework/feature_wrapper/global/vfh_estimator.h>
+#include <pcl/apps/3d_rec_framework/pc_source/mesh_source.h>
+#include <pcl/apps/3d_rec_framework/pipeline/global_nn_classifier.h>
#include <pcl/apps/3d_rec_framework/tools/openni_frame_source.h>
#include <pcl/apps/3d_rec_framework/utils/metrics.h>
-#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/apps/dominant_plane_segmentation.h>
#include <pcl/console/parse.h>
+#include <pcl/visualization/pcl_visualizer.h>
+#include <pcl/pcl_macros.h>
-template<template<class > class DistT, typename PointT, typename FeatureT>
+template <template <class> class DistT, typename PointT, typename FeatureT>
void
-segmentAndClassify (typename pcl::rec_3d_framework::GlobalNNPipeline<DistT, PointT, FeatureT> & global)
+segmentAndClassify(
+ typename pcl::rec_3d_framework::GlobalNNPipeline<DistT, PointT, FeatureT>& global)
{
- //get point cloud from the kinect, segment it and classify it
+ // get point cloud from the kinect, segment it and classify it
OpenNIFrameSource::OpenNIFrameSource camera;
OpenNIFrameSource::PointCloudPtr frame;
- pcl::visualization::PCLVisualizer vis ("kinect");
+ pcl::visualization::PCLVisualizer vis("kinect");
- //keyboard callback to stop getting frames and finalize application
- std::function<void (const pcl::visualization::KeyboardEvent&)> keyboard_cb = [&] (const pcl::visualization::KeyboardEvent& event)
- {
- camera.onKeyboardEvent (event);
- };
- vis.registerKeyboardCallback (keyboard_cb);
+ // keyboard callback to stop getting frames and finalize application
+ std::function<void(const pcl::visualization::KeyboardEvent&)> keyboard_cb =
+ [&](const pcl::visualization::KeyboardEvent& event) {
+ camera.onKeyboardEvent(event);
+ };
+ vis.registerKeyboardCallback(keyboard_cb);
std::size_t previous_cluster_size = 0;
std::size_t previous_categories_size = 0;
float Z_DIST_ = 1.25f;
float text_scale = 0.015f;
- while (camera.isActive ())
- {
- pcl::ScopeTime frame_process ("Global frame processing ------------- ");
- frame = camera.snap ();
+ while (camera.isActive()) {
+ pcl::ScopeTime frame_process("Global frame processing ------------- ");
+ frame = camera.snap();
- pcl::PointCloud<pcl::PointXYZ>::Ptr xyz_points (new pcl::PointCloud<pcl::PointXYZ>);
- pcl::copyPointCloud (*frame, *xyz_points);
+ pcl::PointCloud<pcl::PointXYZ>::Ptr xyz_points(new pcl::PointCloud<pcl::PointXYZ>);
+ pcl::copyPointCloud(*frame, *xyz_points);
- //Step 1 -> Segment
+ // Step 1 -> Segment
pcl::apps::DominantPlaneSegmentation<pcl::PointXYZ> dps;
- dps.setInputCloud (xyz_points);
- dps.setMaxZBounds (Z_DIST_);
- dps.setObjectMinHeight (0.005);
- dps.setMinClusterSize (1000);
- dps.setWSize (9);
- dps.setDistanceBetweenClusters (0.1f);
+ dps.setInputCloud(xyz_points);
+ dps.setMaxZBounds(Z_DIST_);
+ dps.setObjectMinHeight(0.005);
+ dps.setMinClusterSize(1000);
+ dps.setWSize(9);
+ dps.setDistanceBetweenClusters(0.1f);
std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clusters;
std::vector<pcl::PointIndices> indices;
- dps.setDownsamplingSize (0.02f);
- dps.compute_fast (clusters);
- dps.getIndicesClusters (indices);
+ dps.setDownsamplingSize(0.02f);
+ dps.compute_fast(clusters);
+ dps.getIndicesClusters(indices);
Eigen::Vector4f table_plane_;
- Eigen::Vector3f normal_plane_ = Eigen::Vector3f (table_plane_[0], table_plane_[1], table_plane_[2]);
- dps.getTableCoefficients (table_plane_);
+ Eigen::Vector3f normal_plane_ =
+ Eigen::Vector3f(table_plane_[0], table_plane_[1], table_plane_[2]);
+ dps.getTableCoefficients(table_plane_);
- vis.removePointCloud ("frame");
- vis.addPointCloud<OpenNIFrameSource::PointT> (frame, "frame");
+ vis.removePointCloud("frame");
+ vis.addPointCloud<OpenNIFrameSource::PointT>(frame, "frame");
- for (std::size_t i = 0; i < previous_cluster_size; i++)
- {
+ for (std::size_t i = 0; i < previous_cluster_size; i++) {
std::stringstream cluster_name;
cluster_name << "cluster_" << i;
- vis.removePointCloud (cluster_name.str ());
+ vis.removePointCloud(cluster_name.str());
cluster_name << "_ply_model_";
- vis.removeShape (cluster_name.str ());
+ vis.removeShape(cluster_name.str());
}
- for (std::size_t i = 0; i < previous_categories_size; i++)
- {
+ for (std::size_t i = 0; i < previous_categories_size; i++) {
std::stringstream cluster_text;
cluster_text << "cluster_" << i << "_text";
- vis.removeText3D (cluster_text.str ());
+ vis.removeText3D(cluster_text.str());
}
previous_categories_size = 0;
float dist_ = 0.03f;
- for (std::size_t i = 0; i < clusters.size (); i++)
- {
+ for (std::size_t i = 0; i < clusters.size(); i++) {
std::stringstream cluster_name;
cluster_name << "cluster_" << i;
- pcl::visualization::PointCloudColorHandlerRandom<pcl::PointXYZ> random_handler (clusters[i]);
- vis.addPointCloud<pcl::PointXYZ> (clusters[i], random_handler, cluster_name.str ());
+ pcl::visualization::PointCloudColorHandlerRandom<pcl::PointXYZ> random_handler(
+ clusters[i]);
+ vis.addPointCloud<pcl::PointXYZ>(clusters[i], random_handler, cluster_name.str());
- global.setInputCloud (xyz_points);
- global.setIndices (indices[i].indices);
- global.classify ();
+ global.setInputCloud(xyz_points);
+ global.setIndices(indices[i].indices);
+ global.classify();
- std::vector < std::string > categories;
+ std::vector<std::string> categories;
std::vector<float> conf;
- global.getCategory (categories);
- global.getConfidence (conf);
+ global.getCategory(categories);
+ global.getConfidence(conf);
Eigen::Vector4f centroid;
- pcl::compute3DCentroid (*xyz_points, indices[i].indices, centroid);
- for (std::size_t kk = 0; kk < categories.size (); kk++)
- {
+ pcl::compute3DCentroid(*xyz_points, indices[i].indices, centroid);
+ for (std::size_t kk = 0; kk < categories.size(); kk++) {
pcl::PointXYZ pos;
- pos.x = centroid[0] + normal_plane_[0] * static_cast<float> (kk + 1) * dist_;
- pos.y = centroid[1] + normal_plane_[1] * static_cast<float> (kk + 1) * dist_;
- pos.z = centroid[2] + normal_plane_[2] * static_cast<float> (kk + 1) * dist_;
+ pos.x = centroid[0] + normal_plane_[0] * static_cast<float>(kk + 1) * dist_;
+ pos.y = centroid[1] + normal_plane_[1] * static_cast<float>(kk + 1) * dist_;
+ pos.z = centroid[2] + normal_plane_[2] * static_cast<float>(kk + 1) * dist_;
std::ostringstream prob_str;
- prob_str.precision (1);
+ prob_str.precision(1);
prob_str << categories[kk] << " [" << conf[kk] << "]";
std::stringstream cluster_text;
cluster_text << "cluster_" << previous_categories_size << "_text";
- vis.addText3D (prob_str.str (), pos, text_scale, 1, 0, 1, cluster_text.str (), 0);
+ vis.addText3D(prob_str.str(), pos, text_scale, 1, 0, 1, cluster_text.str(), 0);
previous_categories_size++;
}
}
- previous_cluster_size = clusters.size ();
+ previous_cluster_size = clusters.size();
- vis.spinOnce ();
+ vis.spinOnce();
}
}
-//bin/pcl_global_classification -models_dir /home/aitor/data/3d-net_one_class/ -descriptor_name esf -training_dir /home/aitor/data/3d-net_one_class_trained_level_1 -nn 10
-
int
-main (int argc, char ** argv)
+main(int argc, char** argv)
{
std::string path = "models/";
std::string training_dir = "trained_models/";
int NN = 1;
- pcl::console::parse_argument (argc, argv, "-models_dir", path);
- pcl::console::parse_argument (argc, argv, "-training_dir", training_dir);
- pcl::console::parse_argument (argc, argv, "-descriptor_name", desc_name);
- pcl::console::parse_argument (argc, argv, "-nn", NN);
-
- //pcl::console::parse_argument (argc, argv, "-z_dist", chop_at_z_);
- //pcl::console::parse_argument (argc, argv, "-tesselation_level", views_level_);
-
- std::shared_ptr<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>> mesh_source (new pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>);
- mesh_source->setPath (path);
- mesh_source->setResolution (150);
- mesh_source->setTesselationLevel (1);
- mesh_source->setViewAngle (57.f);
- mesh_source->setRadiusSphere (1.5f);
- mesh_source->setModelScale (1.f);
- mesh_source->generate (training_dir);
-
- std::shared_ptr<pcl::rec_3d_framework::Source<pcl::PointXYZ>> cast_source (
- std::static_pointer_cast<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>> (mesh_source)
- );
-
- std::shared_ptr<pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ, pcl::Normal>> normal_estimator;
- normal_estimator.reset (new pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ, pcl::Normal>);
- normal_estimator->setCMR (true);
- normal_estimator->setDoVoxelGrid (true);
- normal_estimator->setRemoveOutliers (true);
- normal_estimator->setFactorsForCMR (3, 7);
-
- if (desc_name == "vfh")
- {
- std::shared_ptr<pcl::rec_3d_framework::VFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>> vfh_estimator (
- new pcl::rec_3d_framework::VFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>
- );
- vfh_estimator->setNormalEstimator (normal_estimator);
-
- std::shared_ptr<pcl::rec_3d_framework::GlobalEstimator<pcl::PointXYZ, pcl::VFHSignature308>> cast_estimator (
- std::dynamic_pointer_cast<pcl::rec_3d_framework::VFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>> (vfh_estimator)
- );
-
- pcl::rec_3d_framework::GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::VFHSignature308> global;
- global.setDataSource (cast_source);
- global.setTrainingDir (training_dir);
- global.setDescriptorName (desc_name);
- global.setNN (NN);
- global.setFeatureEstimator (cast_estimator);
- global.initialize (true);
-
- segmentAndClassify<flann::L1, pcl::PointXYZ, pcl::VFHSignature308> (global);
+ pcl::console::parse_argument(argc, argv, "-models_dir", path);
+ pcl::console::parse_argument(argc, argv, "-training_dir", training_dir);
+ pcl::console::parse_argument(argc, argv, "-descriptor_name", desc_name);
+ pcl::console::parse_argument(argc, argv, "-nn", NN);
+
+ std::shared_ptr<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>> mesh_source(
+ new pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>);
+ mesh_source->setPath(path);
+ mesh_source->setResolution(150);
+ mesh_source->setTesselationLevel(1);
+ mesh_source->setViewAngle(57.f);
+ mesh_source->setRadiusSphere(1.5f);
+ mesh_source->setModelScale(1.f);
+ mesh_source->generate(training_dir);
+
+ std::shared_ptr<pcl::rec_3d_framework::Source<pcl::PointXYZ>> cast_source(
+ std::static_pointer_cast<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>>(
+ mesh_source));
+
+ std::shared_ptr<
+ pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ, pcl::Normal>>
+ normal_estimator;
+ normal_estimator.reset(
+ new pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ,
+ pcl::Normal>);
+ normal_estimator->setCMR(true);
+ normal_estimator->setDoVoxelGrid(true);
+ normal_estimator->setRemoveOutliers(true);
+ normal_estimator->setFactorsForCMR(3, 7);
+
+ if (desc_name == "vfh") {
+ std::shared_ptr<
+ pcl::rec_3d_framework::VFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>>
+ vfh_estimator(new pcl::rec_3d_framework::VFHEstimation<pcl::PointXYZ,
+ pcl::VFHSignature308>);
+ vfh_estimator->setNormalEstimator(normal_estimator);
+
+ std::shared_ptr<
+ pcl::rec_3d_framework::GlobalEstimator<pcl::PointXYZ, pcl::VFHSignature308>>
+ cast_estimator(std::dynamic_pointer_cast<
+ pcl::rec_3d_framework::VFHEstimation<pcl::PointXYZ,
+ pcl::VFHSignature308>>(
+ vfh_estimator));
+
+ pcl::rec_3d_framework::
+ GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>
+ global;
+ global.setDataSource(cast_source);
+ global.setTrainingDir(training_dir);
+ global.setDescriptorName(desc_name);
+ global.setNN(NN);
+ global.setFeatureEstimator(cast_estimator);
+ global.initialize(true);
+
+ segmentAndClassify<flann::L1, pcl::PointXYZ, pcl::VFHSignature308>(global);
}
- if (desc_name == "cvfh")
- {
- std::shared_ptr<pcl::rec_3d_framework::CVFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>> vfh_estimator (
- new pcl::rec_3d_framework::CVFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>
- );
- vfh_estimator->setNormalEstimator (normal_estimator);
-
- std::shared_ptr<pcl::rec_3d_framework::GlobalEstimator<pcl::PointXYZ, pcl::VFHSignature308>> cast_estimator (
- std::dynamic_pointer_cast<pcl::rec_3d_framework::CVFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>> (vfh_estimator)
- );
-
- pcl::rec_3d_framework::GlobalNNPipeline<Metrics::HistIntersectionUnionDistance, pcl::PointXYZ, pcl::VFHSignature308> global;
- global.setDataSource (cast_source);
- global.setTrainingDir (training_dir);
- global.setDescriptorName (desc_name);
- global.setFeatureEstimator (cast_estimator);
- global.setNN (NN);
- global.initialize (false);
-
- segmentAndClassify<Metrics::HistIntersectionUnionDistance, pcl::PointXYZ, pcl::VFHSignature308> (global);
+ if (desc_name == "cvfh") {
+ auto vfh_estimator = std::make_shared<
+ pcl::rec_3d_framework::CVFHEstimation<pcl::PointXYZ, pcl::VFHSignature308>>();
+ vfh_estimator->setNormalEstimator(normal_estimator);
+
+ auto cast_estimator = std::dynamic_pointer_cast<
+ pcl::rec_3d_framework::GlobalEstimator<pcl::PointXYZ, pcl::VFHSignature308>>(
+ vfh_estimator);
+
+ pcl::rec_3d_framework::GlobalNNPipeline<Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZ,
+ pcl::VFHSignature308>
+ global;
+ global.setDataSource(cast_source);
+ global.setTrainingDir(training_dir);
+ global.setDescriptorName(desc_name);
+ global.setFeatureEstimator(cast_estimator);
+ global.setNN(NN);
+ global.initialize(false);
+
+ segmentAndClassify<Metrics::HistIntersectionUnionDistance,
+ pcl::PointXYZ,
+ pcl::VFHSignature308>(global);
}
- if (desc_name == "esf")
- {
- std::shared_ptr<pcl::rec_3d_framework::ESFEstimation<pcl::PointXYZ, pcl::ESFSignature640>> estimator (
- new pcl::rec_3d_framework::ESFEstimation<pcl::PointXYZ, pcl::ESFSignature640>
- );
-
- std::shared_ptr<pcl::rec_3d_framework::GlobalEstimator<pcl::PointXYZ, pcl::ESFSignature640>> cast_estimator (
- std::dynamic_pointer_cast<pcl::rec_3d_framework::ESFEstimation<pcl::PointXYZ, pcl::ESFSignature640>> (estimator)
- );
-
- pcl::rec_3d_framework::GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::ESFSignature640> global;
- global.setDataSource (cast_source);
- global.setTrainingDir (training_dir);
- global.setDescriptorName (desc_name);
- global.setFeatureEstimator (cast_estimator);
- global.setNN (NN);
- global.initialize (false);
-
- segmentAndClassify<flann::L1, pcl::PointXYZ, pcl::ESFSignature640> (global);
+ if (desc_name == "esf") {
+ auto estimator = std::make_shared<
+ pcl::rec_3d_framework::ESFEstimation<pcl::PointXYZ, pcl::ESFSignature640>>();
+
+ auto cast_estimator = std::dynamic_pointer_cast<
+ pcl::rec_3d_framework::GlobalEstimator<pcl::PointXYZ, pcl::ESFSignature640>>(
+ estimator);
+
+ pcl::rec_3d_framework::
+ GlobalNNPipeline<flann::L1, pcl::PointXYZ, pcl::ESFSignature640>
+ global;
+ global.setDataSource(cast_source);
+ global.setTrainingDir(training_dir);
+ global.setDescriptorName(desc_name);
+ global.setFeatureEstimator(cast_estimator);
+ global.setNN(NN);
+ global.initialize(false);
+
+ segmentAndClassify<flann::L1, pcl::PointXYZ, pcl::ESFSignature640>(global);
}
-
}
* Author: aitor
*/
-#include <flann/algorithms/dist.h>
-
-#include <pcl/recognition/hv/hv_papazov.h>
-#include <pcl/console/parse.h>
-#include <pcl/apps/3d_rec_framework/pipeline/local_recognizer.h>
-#include <pcl/apps/3d_rec_framework/pc_source/mesh_source.h>
-#include <pcl/recognition/cg/geometric_consistency.h>
+#include <pcl/apps/3d_rec_framework/feature_wrapper/local/fpfh_local_estimator.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/shot_local_estimator.h>
#include <pcl/apps/3d_rec_framework/feature_wrapper/local/shot_local_estimator_omp.h>
-#include <pcl/apps/3d_rec_framework/feature_wrapper/local/fpfh_local_estimator.h>
-#include <pcl/visualization/pcl_visualizer.h>
+#include <pcl/apps/3d_rec_framework/pc_source/mesh_source.h>
+#include <pcl/apps/3d_rec_framework/pipeline/local_recognizer.h>
+#include <pcl/console/parse.h>
#include <pcl/recognition/cg/correspondence_grouping.h>
#include <pcl/recognition/cg/geometric_consistency.h>
#include <pcl/recognition/cg/hough_3d.h>
-#include <pcl/recognition/hv/hv_papazov.h>
-#include <pcl/recognition/hv/hv_go.h>
#include <pcl/recognition/hv/greedy_verification.h>
+#include <pcl/recognition/hv/hv_go.h>
+#include <pcl/recognition/hv/hv_papazov.h>
+#include <pcl/visualization/pcl_visualizer.h>
+
+#include <flann/algorithms/dist.h>
void
-getScenesInDirectory (bf::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths)
+getScenesInDirectory(bf::path& dir,
+ std::string& rel_path_so_far,
+ std::vector<std::string>& relative_paths)
{
- //list models in MODEL_FILES_DIR_ and return list
- for (const auto& dir_entry : bf::directory_iterator(dir))
- {
- //check if its a directory, then get models in it
- if (bf::is_directory (dir_entry))
- {
- std::string so_far = rel_path_so_far + (dir_entry.path ().filename ()).string() + "/";
- bf::path curr_path = dir_entry.path ();
- getScenesInDirectory (curr_path, so_far, relative_paths);
+ // list models in MODEL_FILES_DIR_ and return list
+ for (const auto& dir_entry : bf::directory_iterator(dir)) {
+ // check if its a directory, then get models in it
+ if (bf::is_directory(dir_entry)) {
+ std::string so_far =
+ rel_path_so_far + (dir_entry.path().filename()).string() + "/";
+ bf::path curr_path = dir_entry.path();
+ getScenesInDirectory(curr_path, so_far, relative_paths);
}
- else
- {
- //check that it is a ply file and then add, otherwise ignore..
- std::vector < std::string > strs;
- std::string file = (dir_entry.path ().filename ()).string();
+ else {
+ // check that it is a ply file and then add, otherwise ignore..
+ std::vector<std::string> strs;
+ std::string file = (dir_entry.path().filename()).string();
- boost::split (strs, file, boost::is_any_of ("."));
- std::string extension = strs[strs.size () - 1];
+ boost::split(strs, file, boost::is_any_of("."));
+ std::string extension = strs[strs.size() - 1];
- if (extension == "pcd")
- {
- std::string path = rel_path_so_far + (dir_entry.path ().filename ()).string();
- relative_paths.push_back (path);
+ if (extension == "pcd") {
+ std::string path = rel_path_so_far + (dir_entry.path().filename()).string();
+ relative_paths.push_back(path);
}
}
}
}
inline bool
-sortFiles (const std::string & file1, const std::string & file2)
+sortFiles(const std::string& file1, const std::string& file2)
{
- std::vector < std::string > strs1;
- boost::split (strs1, file1, boost::is_any_of ("/"));
+ std::vector<std::string> strs1;
+ boost::split(strs1, file1, boost::is_any_of("/"));
- std::vector < std::string > strs2;
- boost::split (strs2, file2, boost::is_any_of ("/"));
+ std::vector<std::string> strs2;
+ boost::split(strs2, file2, boost::is_any_of("/"));
- std::string id_1 = strs1[strs1.size () - 1];
- std::string id_2 = strs2[strs2.size () - 1];
+ std::string id_1 = strs1[strs1.size() - 1];
+ std::string id_2 = strs2[strs2.size() - 1];
- std::size_t pos1 = id_1.find (".ply.pcd");
- std::size_t pos2 = id_2.find (".ply.pcd");
+ std::size_t pos1 = id_1.find(".ply.pcd");
+ std::size_t pos2 = id_2.find(".ply.pcd");
- id_1 = id_1.substr (0, pos1);
- id_2 = id_2.substr (0, pos2);
+ id_1 = id_1.substr(0, pos1);
+ id_2 = id_2.substr(0, pos2);
- id_1 = id_1.substr (2);
- id_2 = id_2.substr (2);
+ id_1 = id_1.substr(2);
+ id_2 = id_2.substr(2);
- return atoi (id_1.c_str ()) < atoi (id_2.c_str ());
+ return atoi(id_1.c_str()) < atoi(id_2.c_str());
}
-template<template<class > class DistT, typename PointT, typename FeatureT>
- void
- recognizeAndVisualize (typename pcl::rec_3d_framework::LocalRecognitionPipeline<DistT, PointT, FeatureT> & local, std::string & scenes_dir,
- int scene = -1, bool single_model = false)
- {
+template <template <class> class DistT, typename PointT, typename FeatureT>
+void
+recognizeAndVisualize(
+ typename pcl::rec_3d_framework::LocalRecognitionPipeline<DistT, PointT, FeatureT>&
+ local,
+ std::string& scenes_dir,
+ int scene = -1,
+ bool single_model = false)
+{
- //read mians scenes
- bf::path ply_files_dir = scenes_dir;
- std::vector < std::string > files;
- std::string start;
- getScenesInDirectory (ply_files_dir, start, files);
+ // read mians scenes
+ bf::path ply_files_dir = scenes_dir;
+ std::vector<std::string> files;
+ std::string start;
+ getScenesInDirectory(ply_files_dir, start, files);
- std::sort (files.begin (), files.end (), sortFiles);
+ std::sort(files.begin(), files.end(), sortFiles);
- auto model_source_ = local.getDataSource ();
- using ConstPointInTPtr = typename pcl::PointCloud<PointT>::ConstPtr;
+ auto model_source_ = local.getDataSource();
+ using ConstPointInTPtr = typename pcl::PointCloud<PointT>::ConstPtr;
- if (!single_model)
- {
- pcl::visualization::PCLVisualizer vis ("Mians dataset");
+ if (!single_model) {
+ pcl::visualization::PCLVisualizer vis("Mians dataset");
- for (std::size_t i = 0; i < files.size (); i++)
- {
- std::cout << files[i] << std::endl;
- if (scene != -1)
- if ((std::size_t) scene != i)
- continue;
-
- std::stringstream file;
- file << ply_files_dir.string () << files[i];
-
- typename pcl::PointCloud<PointT>::Ptr scene (new pcl::PointCloud<PointT> ());
- pcl::io::loadPCDFile (file.str (), *scene);
-
- local.setVoxelSizeICP (0.005f);
- local.setInputCloud (scene);
- {
- pcl::ScopeTime ttt ("Recognition");
- local.recognize ();
- }
-
- std::stringstream scene_name;
- scene_name << "Scene " << (i + 1);
- vis.addPointCloud<PointT> (scene, "scene_cloud");
- vis.addText (scene_name.str (), 1, 30, 24, 1, 0, 0, "scene_text");
+ for (std::size_t i = 0; i < files.size(); i++) {
+ std::cout << files[i] << std::endl;
+ if (scene != -1)
+ if ((std::size_t)scene != i)
+ continue;
- //visualize results
- auto models = local.getModels ();
- auto transforms = local.getTransforms ();
+ std::stringstream file;
+ file << ply_files_dir.string() << files[i];
- for (std::size_t j = 0; j < models->size (); j++)
- {
- std::stringstream name;
- name << "cloud_" << j;
+ typename pcl::PointCloud<PointT>::Ptr scene(new pcl::PointCloud<PointT>());
+ pcl::io::loadPCDFile(file.str(), *scene);
- ConstPointInTPtr model_cloud = models->at (j).getAssembled (0.0025f);
- typename pcl::PointCloud<PointT>::Ptr model_aligned (new pcl::PointCloud<PointT>);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms->at (j));
+ local.setVoxelSizeICP(0.005f);
+ local.setInputCloud(scene);
+ {
+ pcl::ScopeTime ttt("Recognition");
+ local.recognize();
+ }
- float r, g, b;
- std::cout << models->at (j).id_ << std::endl;
- r = 255.0f;
- g = 0.0f;
+ std::stringstream scene_name;
+ scene_name << "Scene " << (i + 1);
+ vis.addPointCloud<PointT>(scene, "scene_cloud");
+ vis.addText(scene_name.str(), 1, 30, 24, 1, 0, 0, "scene_text");
+
+ // visualize results
+ auto models = local.getModels();
+ auto transforms = local.getTransforms();
+
+ for (std::size_t j = 0; j < models->size(); j++) {
+ std::stringstream name;
+ name << "cloud_" << j;
+
+ ConstPointInTPtr model_cloud = models->at(j).getAssembled(0.0025f);
+ typename pcl::PointCloud<PointT>::Ptr model_aligned(
+ new pcl::PointCloud<PointT>);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms->at(j));
+
+ float r, g, b;
+ std::cout << models->at(j).id_ << std::endl;
+ r = 255.0f;
+ g = 0.0f;
+ b = 0.0f;
+
+ if (models->at(j).id_ == "cheff") {
+ r = 0.0f;
+ g = 255.0f;
b = 0.0f;
-
- if (models->at (j).id_ == "cheff")
- {
- r = 0.0f;
- g = 255.0f;
- b = 0.0f;
- }
- else if (models->at (j).id_ == "chicken_high")
- {
- r = 0.0f;
- g = 255.0f;
- b = 255.0f;
- }
- else if (models->at (j).id_ == "parasaurolophus_high")
- {
- r = 255.0f;
- g = 255.0f;
- b = 0.f;
- }
- else
- {
-
- }
-
- pcl::visualization::PointCloudColorHandlerCustom<PointT> random_handler (model_aligned, r, g, b);
- vis.addPointCloud<PointT> (model_aligned, random_handler, name.str ());
+ }
+ else if (models->at(j).id_ == "chicken_high") {
+ r = 0.0f;
+ g = 255.0f;
+ b = 255.0f;
+ }
+ else if (models->at(j).id_ == "parasaurolophus_high") {
+ r = 255.0f;
+ g = 255.0f;
+ b = 0.f;
+ }
+ else {
}
- vis.spin ();
+ pcl::visualization::PointCloudColorHandlerCustom<PointT> random_handler(
+ model_aligned, r, g, b);
+ vis.addPointCloud<PointT>(model_aligned, random_handler, name.str());
+ }
- vis.removePointCloud ("scene_cloud");
- vis.removeShape ("scene_text");
- for (std::size_t j = 0; j < models->size (); j++)
- {
- std::stringstream name;
- name << "cloud_" << j;
- vis.removePointCloud (name.str ());
- }
+ vis.spin();
+
+ vis.removePointCloud("scene_cloud");
+ vis.removeShape("scene_text");
+ for (std::size_t j = 0; j < models->size(); j++) {
+ std::stringstream name;
+ name << "cloud_" << j;
+ vis.removePointCloud(name.str());
}
}
+ }
- if (single_model)
- {
- //some applications prefer to search for a single object only
- std::string id = "chicken_high";
- pcl::visualization::PCLVisualizer vis ("Single model - chicken");
- local.setSearchModel (id);
- local.initialize ();
+ if (single_model) {
+ // some applications prefer to search for a single object only
+ std::string id = "chicken_high";
+ pcl::visualization::PCLVisualizer vis("Single model - chicken");
+ local.setSearchModel(id);
+ local.initialize();
- for (std::size_t i = 0; i < files.size (); i++)
- {
- std::stringstream file;
- file << ply_files_dir.string () << files[i];
+ for (std::size_t i = 0; i < files.size(); i++) {
+ std::stringstream file;
+ file << ply_files_dir.string() << files[i];
- typename pcl::PointCloud<PointT>::Ptr scene (new pcl::PointCloud<PointT> ());
- pcl::io::loadPCDFile (file.str (), *scene);
+ typename pcl::PointCloud<PointT>::Ptr scene(new pcl::PointCloud<PointT>());
+ pcl::io::loadPCDFile(file.str(), *scene);
- local.setInputCloud (scene);
- local.recognize ();
+ local.setInputCloud(scene);
+ local.recognize();
- std::stringstream scene_name;
- scene_name << "Scene " << (i + 1);
- vis.addPointCloud<PointT> (scene, "scene_cloud");
- vis.addText (scene_name.str (), 1, 30, 24, 1, 0, 0, "scene_text");
+ std::stringstream scene_name;
+ scene_name << "Scene " << (i + 1);
+ vis.addPointCloud<PointT>(scene, "scene_cloud");
+ vis.addText(scene_name.str(), 1, 30, 24, 1, 0, 0, "scene_text");
- //visualize results
- auto models = local.getModels ();
- auto transforms = local.getTransforms ();
+ // visualize results
+ auto models = local.getModels();
+ auto transforms = local.getTransforms();
- for (std::size_t j = 0; j < models->size (); j++)
- {
- std::stringstream name;
- name << "cloud_" << j;
+ for (std::size_t j = 0; j < models->size(); j++) {
+ std::stringstream name;
+ name << "cloud_" << j;
- ConstPointInTPtr model_cloud = models->at (j).getAssembled (0.0025f);
- typename pcl::PointCloud<PointT>::Ptr model_aligned (new pcl::PointCloud<PointT>);
- pcl::transformPointCloud (*model_cloud, *model_aligned, transforms->at (j));
+ ConstPointInTPtr model_cloud = models->at(j).getAssembled(0.0025f);
+ typename pcl::PointCloud<PointT>::Ptr model_aligned(
+ new pcl::PointCloud<PointT>);
+ pcl::transformPointCloud(*model_cloud, *model_aligned, transforms->at(j));
- pcl::visualization::PointCloudColorHandlerRandom<PointT> random_handler (model_aligned);
- vis.addPointCloud<PointT> (model_aligned, random_handler, name.str ());
- }
+ pcl::visualization::PointCloudColorHandlerRandom<PointT> random_handler(
+ model_aligned);
+ vis.addPointCloud<PointT>(model_aligned, random_handler, name.str());
+ }
- vis.spin ();
+ vis.spin();
- vis.removePointCloud ("scene_cloud");
- vis.removeShape ("scene_text");
- for (std::size_t j = 0; j < models->size (); j++)
- {
- std::stringstream name;
- name << "cloud_" << j;
- vis.removePointCloud (name.str ());
- }
+ vis.removePointCloud("scene_cloud");
+ vis.removeShape("scene_text");
+ for (std::size_t j = 0; j < models->size(); j++) {
+ std::stringstream name;
+ name << "cloud_" << j;
+ vis.removePointCloud(name.str());
}
}
}
+}
void
-getModelsInDirectory (bf::path & dir, std::string & rel_path_so_far, std::vector<std::string> & relative_paths, std::string & ext)
+getModelsInDirectory(bf::path& dir,
+ std::string& rel_path_so_far,
+ std::vector<std::string>& relative_paths,
+ std::string& ext)
{
- for (const auto& dir_entry : bf::directory_iterator(dir))
- {
- //check if its a directory, then get models in it
- if (bf::is_directory (dir_entry))
- {
- std::string so_far = rel_path_so_far + (dir_entry.path ().filename ()).string() + "/";
-
- bf::path curr_path = dir_entry.path ();
- getModelsInDirectory (curr_path, so_far, relative_paths, ext);
+ for (const auto& dir_entry : bf::directory_iterator(dir)) {
+ // check if its a directory, then get models in it
+ if (bf::is_directory(dir_entry)) {
+ std::string so_far =
+ rel_path_so_far + (dir_entry.path().filename()).string() + "/";
+
+ bf::path curr_path = dir_entry.path();
+ getModelsInDirectory(curr_path, so_far, relative_paths, ext);
}
- else
- {
- //check that it is a ply file and then add, otherwise ignore..
- std::vector < std::string > strs;
- std::string file = (dir_entry.path ().filename ()).string();
+ else {
+ // check that it is a ply file and then add, otherwise ignore..
+ std::vector<std::string> strs;
+ std::string file = (dir_entry.path().filename()).string();
- boost::split (strs, file, boost::is_any_of ("."));
- std::string extension = strs[strs.size () - 1];
+ boost::split(strs, file, boost::is_any_of("."));
+ std::string extension = strs[strs.size() - 1];
- if (extension == ext)
- {
- std::string path = rel_path_so_far + (dir_entry.path ().filename ()).string();
+ if (extension == ext) {
+ std::string path = rel_path_so_far + (dir_entry.path().filename()).string();
- relative_paths.push_back (path);
+ relative_paths.push_back(path);
}
}
}
float CG_THRESHOLD_ = 0.005f;
/** Based on the paper:
- * "A Global Hypotheses Verification Method for 3D Object Recognition",
- * A. Aldoma and F. Tombari and L. Di Stefano and Markus Vincze, ECCV 2012
- *
- * Note: Due to changes PCL experimented between submission and the current status,
- * you might find some inconsistencies between parameter value in code and in the paper.
- * (tested on revision 7453)
- */
+ * "A Global Hypotheses Verification Method for 3D Object Recognition",
+ * A. Aldoma and F. Tombari and L. Di Stefano and Markus Vincze, ECCV 2012
+ *
+ * Note: Due to changes PCL experimented between submission and the current status,
+ * you might find some inconsistencies between parameter value in code and in the paper.
+ * (tested on revision 7453)
+ */
int
-main (int argc, char ** argv)
+main(int argc, char** argv)
{
std::string path;
std::string desc_name = "shot_omp";
float thres_hyp_ = 0.2f;
float desc_radius = 0.04f;
- pcl::console::parse_argument (argc, argv, "-models_dir", path);
- pcl::console::parse_argument (argc, argv, "-training_dir", training_dir);
- pcl::console::parse_argument (argc, argv, "-descriptor_name", desc_name);
- pcl::console::parse_argument (argc, argv, "-mians_scenes_dir", mians_scenes);
- pcl::console::parse_argument (argc, argv, "-force_retrain", force_retrain);
- pcl::console::parse_argument (argc, argv, "-icp_iterations", icp_iterations);
- pcl::console::parse_argument (argc, argv, "-use_cache", use_cache);
- pcl::console::parse_argument (argc, argv, "-splits", splits);
- pcl::console::parse_argument (argc, argv, "-gc_size", CG_SIZE_);
- pcl::console::parse_argument (argc, argv, "-gc_threshold", CG_THRESHOLD_);
- pcl::console::parse_argument (argc, argv, "-scene", scene);
- pcl::console::parse_argument (argc, argv, "-detect_clutter", detect_clutter);
- pcl::console::parse_argument (argc, argv, "-hv_method", hv_method);
- pcl::console::parse_argument (argc, argv, "-use_hv", use_hv);
- pcl::console::parse_argument (argc, argv, "-thres_hyp", thres_hyp_);
-
- if (mians_scenes.empty())
- {
- PCL_ERROR("Set the directory containing mians scenes using the -mians_scenes_dir [dir] option\n");
+ pcl::console::parse_argument(argc, argv, "-models_dir", path);
+ pcl::console::parse_argument(argc, argv, "-training_dir", training_dir);
+ pcl::console::parse_argument(argc, argv, "-descriptor_name", desc_name);
+ pcl::console::parse_argument(argc, argv, "-mians_scenes_dir", mians_scenes);
+ pcl::console::parse_argument(argc, argv, "-force_retrain", force_retrain);
+ pcl::console::parse_argument(argc, argv, "-icp_iterations", icp_iterations);
+ pcl::console::parse_argument(argc, argv, "-use_cache", use_cache);
+ pcl::console::parse_argument(argc, argv, "-splits", splits);
+ pcl::console::parse_argument(argc, argv, "-gc_size", CG_SIZE_);
+ pcl::console::parse_argument(argc, argv, "-gc_threshold", CG_THRESHOLD_);
+ pcl::console::parse_argument(argc, argv, "-scene", scene);
+ pcl::console::parse_argument(argc, argv, "-detect_clutter", detect_clutter);
+ pcl::console::parse_argument(argc, argv, "-hv_method", hv_method);
+ pcl::console::parse_argument(argc, argv, "-use_hv", use_hv);
+ pcl::console::parse_argument(argc, argv, "-thres_hyp", thres_hyp_);
+
+ if (mians_scenes.empty()) {
+ PCL_ERROR("Set the directory containing mians scenes using the -mians_scenes_dir "
+ "[dir] option\n");
return -1;
}
- if (path.empty())
- {
- PCL_ERROR("Set the directory containing the models of mian dataset using the -models_dir [dir] option\n");
+ if (path.empty()) {
+ PCL_ERROR("Set the directory containing the models of mian dataset using the "
+ "-models_dir [dir] option\n");
return -1;
}
bf::path models_dir_path = path;
- if (!bf::exists (models_dir_path))
- {
- PCL_ERROR("Models dir path %s does not exist, use -models_dir [dir] option\n", path.c_str());
+ if (!bf::exists(models_dir_path)) {
+ PCL_ERROR("Models dir path %s does not exist, use -models_dir [dir] option\n",
+ path.c_str());
return -1;
}
- std::vector < std::string > files;
+ std::vector<std::string> files;
std::string start;
- std::string ext = std::string ("ply");
+ std::string ext = std::string("ply");
bf::path dir = models_dir_path;
- getModelsInDirectory (dir, start, files, ext);
- assert (files.size () == 4);
-
- //configure mesh source
- std::shared_ptr<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>> mesh_source (new pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>);
- mesh_source->setPath (path);
- mesh_source->setResolution (250);
- mesh_source->setTesselationLevel (1);
- mesh_source->setViewAngle (57.f);
- mesh_source->setRadiusSphere (1.5f);
- mesh_source->setModelScale (0.001f);
- mesh_source->generate (training_dir);
-
- std::shared_ptr<pcl::rec_3d_framework::Source<pcl::PointXYZ>> cast_source (
- std::static_pointer_cast<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>> (mesh_source)
- );
-
- //configure normal estimator
- std::shared_ptr<pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ, pcl::Normal> > normal_estimator;
- normal_estimator.reset (new pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ, pcl::Normal>);
- normal_estimator->setCMR (false);
- normal_estimator->setDoVoxelGrid (true);
- normal_estimator->setRemoveOutliers (true);
- normal_estimator->setValuesForCMRFalse (0.003f, 0.012f);
-
- //configure keypoint extractor
- std::shared_ptr<pcl::rec_3d_framework::UniformSamplingExtractor<pcl::PointXYZ>> uniform_keypoint_extractor (
- new pcl::rec_3d_framework::UniformSamplingExtractor<pcl::PointXYZ>
- );
- //uniform_keypoint_extractor->setSamplingDensity (0.01f);
- uniform_keypoint_extractor->setSamplingDensity (0.005f);
- uniform_keypoint_extractor->setFilterPlanar (true);
-
- std::shared_ptr<pcl::rec_3d_framework::KeypointExtractor<pcl::PointXYZ>> keypoint_extractor (
- std::static_pointer_cast<pcl::rec_3d_framework::KeypointExtractor<pcl::PointXYZ>> (uniform_keypoint_extractor)
- );
-
- //configure cg algorithm (geometric consistency grouping)
- std::shared_ptr<pcl::CorrespondenceGrouping<pcl::PointXYZ, pcl::PointXYZ>> cast_cg_alg;
- std::shared_ptr<pcl::GeometricConsistencyGrouping<pcl::PointXYZ, pcl::PointXYZ> > gcg_alg (
- new pcl::GeometricConsistencyGrouping<pcl::PointXYZ,
- pcl::PointXYZ>);
- gcg_alg->setGCThreshold (CG_SIZE_);
- gcg_alg->setGCSize (CG_THRESHOLD_);
- cast_cg_alg = std::static_pointer_cast<pcl::CorrespondenceGrouping<pcl::PointXYZ, pcl::PointXYZ>> (gcg_alg);
-
- //configure hypothesis verificator
- std::shared_ptr<pcl::PapazovHV<pcl::PointXYZ, pcl::PointXYZ>> papazov (new pcl::PapazovHV<pcl::PointXYZ, pcl::PointXYZ>);
- papazov->setResolution (0.005f);
- papazov->setInlierThreshold (0.005f);
- papazov->setSupportThreshold (0.08f);
- papazov->setPenaltyThreshold (0.05f);
- papazov->setConflictThreshold (0.02f);
- papazov->setOcclusionThreshold (0.01f);
-
- std::shared_ptr<pcl::GlobalHypothesesVerification<pcl::PointXYZ, pcl::PointXYZ>> go (
- new pcl::GlobalHypothesesVerification<pcl::PointXYZ, pcl::PointXYZ>
- );
- go->setResolution (0.005f);
- go->setMaxIterations (7000);
- go->setInlierThreshold (0.005f);
- go->setRadiusClutter (0.04f);
- go->setRegularizer (3.f);
- go->setClutterRegularizer (7.5f);
- go->setDetectClutter (detect_clutter);
- go->setOcclusionThreshold (0.01f);
-
- std::shared_ptr<pcl::GreedyVerification<pcl::PointXYZ, pcl::PointXYZ>> greedy (new pcl::GreedyVerification<pcl::PointXYZ, pcl::PointXYZ> (3.f));
- greedy->setResolution (0.005f);
- greedy->setInlierThreshold (0.005f);
- greedy->setOcclusionThreshold (0.01f);
-
- std::shared_ptr<pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>> cast_hv_alg;
-
- switch (hv_method)
- {
- case 1:
- cast_hv_alg = std::static_pointer_cast<pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>> (greedy);
- break;
- case 2:
- cast_hv_alg = std::static_pointer_cast<pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>> (papazov);
- break;
- default:
- cast_hv_alg = std::static_pointer_cast<pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>> (go);
+ getModelsInDirectory(dir, start, files, ext);
+ assert(files.size() == 4);
+
+ // configure mesh source
+ std::shared_ptr<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>> mesh_source(
+ new pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>);
+ mesh_source->setPath(path);
+ mesh_source->setResolution(250);
+ mesh_source->setTesselationLevel(1);
+ mesh_source->setViewAngle(57.f);
+ mesh_source->setRadiusSphere(1.5f);
+ mesh_source->setModelScale(0.001f);
+ mesh_source->generate(training_dir);
+
+ std::shared_ptr<pcl::rec_3d_framework::Source<pcl::PointXYZ>> cast_source(
+ std::static_pointer_cast<pcl::rec_3d_framework::MeshSource<pcl::PointXYZ>>(
+ mesh_source));
+
+ // configure normal estimator
+ std::shared_ptr<
+ pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ, pcl::Normal>>
+ normal_estimator;
+ normal_estimator.reset(
+ new pcl::rec_3d_framework::PreProcessorAndNormalEstimator<pcl::PointXYZ,
+ pcl::Normal>);
+ normal_estimator->setCMR(false);
+ normal_estimator->setDoVoxelGrid(true);
+ normal_estimator->setRemoveOutliers(true);
+ normal_estimator->setValuesForCMRFalse(0.003f, 0.012f);
+
+ // configure keypoint extractor
+ std::shared_ptr<pcl::rec_3d_framework::UniformSamplingExtractor<pcl::PointXYZ>>
+ uniform_keypoint_extractor(
+ new pcl::rec_3d_framework::UniformSamplingExtractor<pcl::PointXYZ>);
+ // uniform_keypoint_extractor->setSamplingDensity (0.01f);
+ uniform_keypoint_extractor->setSamplingDensity(0.005f);
+ uniform_keypoint_extractor->setFilterPlanar(true);
+
+ std::shared_ptr<pcl::rec_3d_framework::KeypointExtractor<pcl::PointXYZ>>
+ keypoint_extractor(std::static_pointer_cast<
+ pcl::rec_3d_framework::KeypointExtractor<pcl::PointXYZ>>(
+ uniform_keypoint_extractor));
+
+ // configure cg algorithm (geometric consistency grouping)
+ std::shared_ptr<pcl::CorrespondenceGrouping<pcl::PointXYZ, pcl::PointXYZ>>
+ cast_cg_alg;
+ std::shared_ptr<pcl::GeometricConsistencyGrouping<pcl::PointXYZ, pcl::PointXYZ>>
+ gcg_alg(new pcl::GeometricConsistencyGrouping<pcl::PointXYZ, pcl::PointXYZ>);
+ gcg_alg->setGCThreshold(CG_SIZE_);
+ gcg_alg->setGCSize(CG_THRESHOLD_);
+ cast_cg_alg = std::static_pointer_cast<
+ pcl::CorrespondenceGrouping<pcl::PointXYZ, pcl::PointXYZ>>(gcg_alg);
+
+ // configure hypothesis verificator
+ std::shared_ptr<pcl::PapazovHV<pcl::PointXYZ, pcl::PointXYZ>> papazov(
+ new pcl::PapazovHV<pcl::PointXYZ, pcl::PointXYZ>);
+ papazov->setResolution(0.005f);
+ papazov->setInlierThreshold(0.005f);
+ papazov->setSupportThreshold(0.08f);
+ papazov->setPenaltyThreshold(0.05f);
+ papazov->setConflictThreshold(0.02f);
+ papazov->setOcclusionThreshold(0.01f);
+
+ std::shared_ptr<pcl::GlobalHypothesesVerification<pcl::PointXYZ, pcl::PointXYZ>> go(
+ new pcl::GlobalHypothesesVerification<pcl::PointXYZ, pcl::PointXYZ>);
+ go->setResolution(0.005f);
+ go->setMaxIterations(7000);
+ go->setInlierThreshold(0.005f);
+ go->setRadiusClutter(0.04f);
+ go->setRegularizer(3.f);
+ go->setClutterRegularizer(7.5f);
+ go->setDetectClutter(detect_clutter);
+ go->setOcclusionThreshold(0.01f);
+
+ std::shared_ptr<pcl::GreedyVerification<pcl::PointXYZ, pcl::PointXYZ>> greedy(
+ new pcl::GreedyVerification<pcl::PointXYZ, pcl::PointXYZ>(3.f));
+ greedy->setResolution(0.005f);
+ greedy->setInlierThreshold(0.005f);
+ greedy->setOcclusionThreshold(0.01f);
+
+ std::shared_ptr<pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>>
+ cast_hv_alg;
+
+ switch (hv_method) {
+ case 1:
+ cast_hv_alg = std::static_pointer_cast<
+ pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>>(greedy);
+ break;
+ case 2:
+ cast_hv_alg = std::static_pointer_cast<
+ pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>>(papazov);
+ break;
+ default:
+ cast_hv_alg = std::static_pointer_cast<
+ pcl::HypothesisVerification<pcl::PointXYZ, pcl::PointXYZ>>(go);
}
- if (desc_name == "shot")
- {
- std::shared_ptr<pcl::rec_3d_framework::SHOTLocalEstimation<pcl::PointXYZ, pcl::Histogram<352>>> estimator;
- estimator.reset (new pcl::rec_3d_framework::SHOTLocalEstimation<pcl::PointXYZ, pcl::Histogram<352> >);
- estimator->setNormalEstimator (normal_estimator);
- estimator->addKeypointExtractor (keypoint_extractor);
- estimator->setSupportRadius (0.04f);
-
- std::shared_ptr<pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::Histogram<352>>> cast_estimator (
- std::dynamic_pointer_cast<pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::Histogram<352>>> (estimator)
- );
-
- pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::Histogram<352> > local;
- local.setDataSource (cast_source);
- local.setTrainingDir (training_dir);
- local.setDescriptorName (desc_name);
- local.setFeatureEstimator (cast_estimator);
- local.setCGAlgorithm (cast_cg_alg);
+ if (desc_name == "shot") {
+ std::shared_ptr<
+ pcl::rec_3d_framework::SHOTLocalEstimation<pcl::PointXYZ, pcl::Histogram<352>>>
+ estimator;
+ estimator.reset(
+ new pcl::rec_3d_framework::SHOTLocalEstimation<pcl::PointXYZ,
+ pcl::Histogram<352>>);
+ estimator->setNormalEstimator(normal_estimator);
+ estimator->addKeypointExtractor(keypoint_extractor);
+ estimator->setSupportRadius(0.04f);
+
+ std::shared_ptr<
+ pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::Histogram<352>>>
+ cast_estimator(
+ std::dynamic_pointer_cast<
+ pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ,
+ pcl::Histogram<352>>>(estimator));
+
+ pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::Histogram<352>>
+ local;
+ local.setDataSource(cast_source);
+ local.setTrainingDir(training_dir);
+ local.setDescriptorName(desc_name);
+ local.setFeatureEstimator(cast_estimator);
+ local.setCGAlgorithm(cast_cg_alg);
if (use_hv)
- local.setHVAlgorithm (cast_hv_alg);
- local.setUseCache (static_cast<bool> (use_cache));
- local.initialize (static_cast<bool> (force_retrain));
-
- uniform_keypoint_extractor->setSamplingDensity (0.005f);
- local.setICPIterations (icp_iterations);
- local.setKdtreeSplits (splits);
+ local.setHVAlgorithm(cast_hv_alg);
+ local.setUseCache(static_cast<bool>(use_cache));
+ local.initialize(static_cast<bool>(force_retrain));
- recognizeAndVisualize<flann::L1, pcl::PointXYZ, pcl::Histogram<352> > (local, mians_scenes);
+ uniform_keypoint_extractor->setSamplingDensity(0.005f);
+ local.setICPIterations(icp_iterations);
+ local.setKdtreeSplits(splits);
+ recognizeAndVisualize<flann::L1, pcl::PointXYZ, pcl::Histogram<352>>(local,
+ mians_scenes);
}
- if (desc_name == "shot_omp")
- {
- desc_name = std::string ("shot");
- std::shared_ptr<pcl::rec_3d_framework::SHOTLocalEstimationOMP<pcl::PointXYZ, pcl::Histogram<352>>> estimator;
- estimator.reset (new pcl::rec_3d_framework::SHOTLocalEstimationOMP<pcl::PointXYZ, pcl::Histogram<352> >);
- estimator->setNormalEstimator (normal_estimator);
- estimator->addKeypointExtractor (keypoint_extractor);
- //estimator->setSupportRadius (0.04f);
- estimator->setSupportRadius (desc_radius);
-
- std::shared_ptr<pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::Histogram<352>>> cast_estimator (
- std::dynamic_pointer_cast<pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::Histogram<352>>> (estimator)
- );
-
- pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::Histogram<352> > local;
- local.setDataSource (cast_source);
- local.setTrainingDir (training_dir);
- local.setDescriptorName (desc_name);
- local.setFeatureEstimator (cast_estimator);
- local.setCGAlgorithm (cast_cg_alg);
+ if (desc_name == "shot_omp") {
+ desc_name = std::string("shot");
+ std::shared_ptr<pcl::rec_3d_framework::SHOTLocalEstimationOMP<pcl::PointXYZ,
+ pcl::Histogram<352>>>
+ estimator;
+ estimator.reset(
+ new pcl::rec_3d_framework::SHOTLocalEstimationOMP<pcl::PointXYZ,
+ pcl::Histogram<352>>);
+ estimator->setNormalEstimator(normal_estimator);
+ estimator->addKeypointExtractor(keypoint_extractor);
+ // estimator->setSupportRadius (0.04f);
+ estimator->setSupportRadius(desc_radius);
+
+ std::shared_ptr<
+ pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::Histogram<352>>>
+ cast_estimator(
+ std::dynamic_pointer_cast<
+ pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ,
+ pcl::Histogram<352>>>(estimator));
+
+ pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::Histogram<352>>
+ local;
+ local.setDataSource(cast_source);
+ local.setTrainingDir(training_dir);
+ local.setDescriptorName(desc_name);
+ local.setFeatureEstimator(cast_estimator);
+ local.setCGAlgorithm(cast_cg_alg);
if (use_hv)
- local.setHVAlgorithm (cast_hv_alg);
-
- local.setUseCache (static_cast<bool> (use_cache));
- local.initialize (static_cast<bool> (force_retrain));
- local.setThresholdAcceptHyp (thres_hyp_);
+ local.setHVAlgorithm(cast_hv_alg);
- uniform_keypoint_extractor->setSamplingDensity (0.005f);
- local.setICPIterations (icp_iterations);
- local.setKdtreeSplits (splits);
+ local.setUseCache(static_cast<bool>(use_cache));
+ local.initialize(static_cast<bool>(force_retrain));
+ local.setThresholdAcceptHyp(thres_hyp_);
- recognizeAndVisualize<flann::L1, pcl::PointXYZ, pcl::Histogram<352> > (local, mians_scenes, scene);
+ uniform_keypoint_extractor->setSamplingDensity(0.005f);
+ local.setICPIterations(icp_iterations);
+ local.setKdtreeSplits(splits);
+ recognizeAndVisualize<flann::L1, pcl::PointXYZ, pcl::Histogram<352>>(
+ local, mians_scenes, scene);
}
- if (desc_name == "fpfh")
- {
- std::shared_ptr<pcl::rec_3d_framework::FPFHLocalEstimation<pcl::PointXYZ, pcl::FPFHSignature33>> estimator;
- estimator.reset (new pcl::rec_3d_framework::FPFHLocalEstimation<pcl::PointXYZ, pcl::FPFHSignature33>);
- estimator->setNormalEstimator (normal_estimator);
- estimator->addKeypointExtractor (keypoint_extractor);
- estimator->setSupportRadius (0.04f);
-
- std::shared_ptr<pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::FPFHSignature33>> cast_estimator (
- std::dynamic_pointer_cast<pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::FPFHSignature33>> (estimator)
- );
-
- pcl::rec_3d_framework::LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::FPFHSignature33> local;
- local.setDataSource (cast_source);
- local.setTrainingDir (training_dir);
- local.setDescriptorName (desc_name);
- local.setFeatureEstimator (cast_estimator);
- local.setCGAlgorithm (cast_cg_alg);
+ if (desc_name == "fpfh") {
+ std::shared_ptr<
+ pcl::rec_3d_framework::FPFHLocalEstimation<pcl::PointXYZ, pcl::FPFHSignature33>>
+ estimator;
+ estimator.reset(
+ new pcl::rec_3d_framework::FPFHLocalEstimation<pcl::PointXYZ,
+ pcl::FPFHSignature33>);
+ estimator->setNormalEstimator(normal_estimator);
+ estimator->addKeypointExtractor(keypoint_extractor);
+ estimator->setSupportRadius(0.04f);
+
+ std::shared_ptr<
+ pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ, pcl::FPFHSignature33>>
+ cast_estimator(std::dynamic_pointer_cast<
+ pcl::rec_3d_framework::LocalEstimator<pcl::PointXYZ,
+ pcl::FPFHSignature33>>(
+ estimator));
+
+ pcl::rec_3d_framework::
+ LocalRecognitionPipeline<flann::L1, pcl::PointXYZ, pcl::FPFHSignature33>
+ local;
+ local.setDataSource(cast_source);
+ local.setTrainingDir(training_dir);
+ local.setDescriptorName(desc_name);
+ local.setFeatureEstimator(cast_estimator);
+ local.setCGAlgorithm(cast_cg_alg);
if (use_hv)
- local.setHVAlgorithm (cast_hv_alg);
+ local.setHVAlgorithm(cast_hv_alg);
- local.setUseCache (static_cast<bool> (use_cache));
- local.initialize (static_cast<bool> (force_retrain));
+ local.setUseCache(static_cast<bool>(use_cache));
+ local.initialize(static_cast<bool>(force_retrain));
- uniform_keypoint_extractor->setSamplingDensity (0.005f);
- local.setICPIterations (icp_iterations);
- local.setKdtreeSplits (splits);
+ uniform_keypoint_extractor->setSamplingDensity(0.005f);
+ local.setICPIterations(icp_iterations);
+ local.setKdtreeSplits(splits);
- recognizeAndVisualize<flann::L1, pcl::PointXYZ, pcl::FPFHSignature33> (local, mians_scenes);
+ recognizeAndVisualize<flann::L1, pcl::PointXYZ, pcl::FPFHSignature33>(local,
+ mians_scenes);
}
}
-#include "pcl/apps/3d_rec_framework/tools/openni_frame_source.h"
+#include <pcl/apps/3d_rec_framework/tools/openni_frame_source.h>
#include <pcl/io/pcd_io.h>
#include <pcl/memory.h>
-namespace OpenNIFrameSource
-{
+namespace OpenNIFrameSource {
- OpenNIFrameSource::OpenNIFrameSource (const std::string& device_id) :
- grabber_ (device_id), frame_counter_ (0), active_ (true)
- {
- std::function<void
- (const PointCloudConstPtr&)> frame_cb = [this] (const PointCloudConstPtr& cloud){ onNewFrame (cloud); };
- grabber_.registerCallback (frame_cb);
- grabber_.start ();
- }
+OpenNIFrameSource::OpenNIFrameSource(const std::string& device_id)
+: grabber_(device_id), frame_counter_(0), active_(true)
+{
+ std::function<void(const PointCloudConstPtr&)> frame_cb =
+ [this](const PointCloudConstPtr& cloud) { onNewFrame(cloud); };
+ grabber_.registerCallback(frame_cb);
+ grabber_.start();
+}
- OpenNIFrameSource::~OpenNIFrameSource ()
- {
- // Stop the grabber when shutting down
- grabber_.stop ();
- }
+OpenNIFrameSource::~OpenNIFrameSource()
+{
+ // Stop the grabber when shutting down
+ grabber_.stop();
+}
- bool
- OpenNIFrameSource::isActive () const
- {
- return active_;
- }
+bool
+OpenNIFrameSource::isActive() const
+{
+ return active_;
+}
- const PointCloudPtr
- OpenNIFrameSource::snap ()
- {
- return (most_recent_frame_);
- }
+const PointCloudPtr
+OpenNIFrameSource::snap()
+{
+ return (most_recent_frame_);
+}
- void
- OpenNIFrameSource::onNewFrame (const PointCloudConstPtr &cloud)
- {
- mutex_.lock ();
- ++frame_counter_;
- most_recent_frame_ = pcl::make_shared<PointCloud> (*cloud); // Make a copy of the frame
- mutex_.unlock ();
- }
+void
+OpenNIFrameSource::onNewFrame(const PointCloudConstPtr& cloud)
+{
+ mutex_.lock();
+ ++frame_counter_;
+ most_recent_frame_ = pcl::make_shared<PointCloud>(*cloud); // Make a copy of the frame
+ mutex_.unlock();
+}
- void
- OpenNIFrameSource::onKeyboardEvent (const pcl::visualization::KeyboardEvent & event)
- {
- // When the spacebar is pressed, trigger a frame capture
- mutex_.lock ();
- if (event.keyDown () && event.getKeySym () == "e")
- {
- active_ = false;
- }
- mutex_.unlock ();
+void
+OpenNIFrameSource::onKeyboardEvent(const pcl::visualization::KeyboardEvent& event)
+{
+ // When the spacebar is pressed, trigger a frame capture
+ mutex_.lock();
+ if (event.keyDown() && event.getKeySym() == "e") {
+ active_ = false;
}
-
+ mutex_.unlock();
}
+
+} // namespace OpenNIFrameSource
include_directories(SYSTEM "${OPENGL_INCLUDE_DIR}")
endif()
PCL_ADD_EXECUTABLE(pcl_grabcut_2d COMPONENT ${SUBSYS_NAME} SOURCES src/grabcut_2d.cpp BUNDLE)
- target_link_libraries (pcl_grabcut_2d pcl_common pcl_io pcl_segmentation pcl_search GLUT::GLUT ${OPENGL_LIBRARIES})
+ if(APPLE)
+ set(_glut_target ${GLUT_glut_LIBRARY})
+ else()
+ set(_glut_target GLUT::GLUT)
+ endif()
+ target_link_libraries (pcl_grabcut_2d pcl_common pcl_io pcl_segmentation pcl_search ${_glut_target} ${OPENGL_LIBRARIES})
endif()
collect_subproject_directory_names("${CMAKE_CURRENT_SOURCE_DIR}" "CMakeLists.txt" PCL_APPS_MODULES_NAMES PCL_APPS_MODULES_DIRS ${SUBSYS_NAME})
vis->removePointCloud (getId ().toStdString ());
qDebug () << QString("Adding point cloud normals, level=%1, scale=%2").arg(level).arg(scale);
vis->addPointCloudNormals<pcl::PointXYZ, pcl::Normal> (cloud, normals_ptr_, level, scale, getId ().toStdString ());
- std::cout << cloud->points[0]<<std::endl;
- std::cout << normals_ptr_->points[0]<<std::endl;
+ std::cout << (*cloud)[0]<<std::endl;
+ std::cout << (*normals_ptr_)[0]<<std::endl;
}
else
#pragma once
#include <pcl/common/common.h>
-#include <pcl/console/parse.h>
#include <pcl/features/normal_3d.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/filters/passthrough.h>
#include <pcl/filters/project_inliers.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/kdtree.h> // for KdTree
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/segmentation/extract_polygonal_prism_data.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/common/time.h>
#include <pcl/features/integral_image_normal.h>
+#include <pcl/filters/extract_indices.h> // for ExtractIndices
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/memory.h> // for pcl::make_shared
pass_.setInputCloud(input_);
pass_.filter(*cloud_filtered_);
- if (int(cloud_filtered_->points.size()) < k_) {
+ if (int(cloud_filtered_->size()) < k_) {
PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.",
- cloud_filtered_->points.size());
+ cloud_filtered_->size());
return;
}
// Need to flip the plane normal towards the viewpoint
Eigen::Vector4f vp(0, 0, 0, 0);
// See if we need to flip any plane normals
- vp -= table_hull->points[0].getVector4fMap();
+ vp -= (*table_hull)[0].getVector4fMap();
vp[3] = 0;
// Dot product between the (viewpoint - point) and the plane normal
float cos_theta = vp.dot(model_coefficients);
model_coefficients[3] = 0;
// Hessian form (D = nc . p_plane (centroid here) + p)
model_coefficients[3] =
- -1 * (model_coefficients.dot(table_hull->points[0].getVector4fMap()));
+ -1 * (model_coefficients.dot((*table_hull)[0].getVector4fMap()));
}
// Set table_coeffs
pass_.setInputCloud(input_);
pass_.filter(*cloud_filtered_);
- if (int(cloud_filtered_->points.size()) < k_) {
+ if (int(cloud_filtered_->size()) < k_) {
PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.",
- cloud_filtered_->points.size());
+ cloud_filtered_->size());
return;
}
// Need to flip the plane normal towards the viewpoint
Eigen::Vector4f vp(0, 0, 0, 0);
// See if we need to flip any plane normals
- vp -= table_hull->points[0].getVector4fMap();
+ vp -= (*table_hull)[0].getVector4fMap();
vp[3] = 0;
// Dot product between the (viewpoint - point) and the plane normal
float cos_theta = vp.dot(model_coefficients);
model_coefficients[3] = 0;
// Hessian form (D = nc . p_plane (centroid here) + p)
model_coefficients[3] =
- -1 * (model_coefficients.dot(table_hull->points[0].getVector4fMap()));
+ -1 * (model_coefficients.dot((*table_hull)[0].getVector4fMap()));
}
// Set table_coeffs
{
binary_cloud->width = input_->width;
binary_cloud->height = input_->height;
- binary_cloud->points.resize(input_->points.size());
+ binary_cloud->points.resize(input_->size());
binary_cloud->is_dense = input_->is_dense;
for (const int& idx : cloud_object_indices.indices) {
- binary_cloud->points[idx].getVector4fMap() = input_->points[idx].getVector4fMap();
- binary_cloud->points[idx].intensity = 1.0;
+ (*binary_cloud)[idx].getVector4fMap() = (*input_)[idx].getVector4fMap();
+ (*binary_cloud)[idx].intensity = 1.0;
}
}
pass_.setInputCloud(input_);
pass_.filter(*cloud_filtered_);
- if (int(cloud_filtered_->points.size()) < k_) {
+ if (int(cloud_filtered_->size()) < k_) {
PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.",
- cloud_filtered_->points.size());
+ cloud_filtered_->size());
return;
}
"(%f -> %f): %lu out of %lu\n",
min_z_bounds_,
max_z_bounds_,
- cloud_downsampled_->points.size(),
- input_->points.size());
+ cloud_downsampled_->size(),
+ input_->size());
// ---[ Estimate the point normals
n3d_.setInputCloud(cloud_downsampled_);
n3d_.compute(*cloud_normals_);
PCL_INFO("[DominantPlaneSegmentation] %lu normals estimated. \n",
- cloud_normals_->points.size());
+ cloud_normals_->size());
// ---[ Perform segmentation
seg_.setInputCloud(cloud_downsampled_);
// Need to flip the plane normal towards the viewpoint
Eigen::Vector4f vp(0, 0, 0, 0);
// See if we need to flip any plane normals
- vp -= table_hull->points[0].getVector4fMap();
+ vp -= (*table_hull)[0].getVector4fMap();
vp[3] = 0;
// Dot product between the (viewpoint - point) and the plane normal
float cos_theta = vp.dot(model_coefficients);
model_coefficients[3] = 0;
// Hessian form (D = nc . p_plane (centroid here) + p)
model_coefficients[3] =
- -1 * (model_coefficients.dot(table_hull->points[0].getVector4fMap()));
+ -1 * (model_coefficients.dot((*table_hull)[0].getVector4fMap()));
}
// Set table_coeffs
pass_.setInputCloud(input_);
pass_.filter(*cloud_filtered_);
- if (int(cloud_filtered_->points.size()) < k_) {
+ if (int(cloud_filtered_->size()) < k_) {
PCL_WARN("[DominantPlaneSegmentation] Filtering returned %lu points! Aborting.",
- cloud_filtered_->points.size());
+ cloud_filtered_->size());
return;
}
"filtering&downsampling (%f -> %f): %lu out of %lu\n",
min_z_bounds_,
max_z_bounds_,
- cloud_downsampled_->points.size(),
- input_->points.size());
+ cloud_downsampled_->size(),
+ input_->size());
// ---[ Estimate the point normals
n3d_.setInputCloud(cloud_downsampled_);
n3d_.compute(*cloud_normals_);
PCL_INFO("[DominantPlaneSegmentation] %lu normals estimated. \n",
- cloud_normals_->points.size());
+ cloud_normals_->size());
// ---[ Perform segmentation
seg_.setInputCloud(cloud_downsampled_);
// Need to flip the plane normal towards the viewpoint
Eigen::Vector4f vp(0, 0, 0, 0);
// See if we need to flip any plane normals
- vp -= table_hull->points[0].getVector4fMap();
+ vp -= (*table_hull)[0].getVector4fMap();
vp[3] = 0;
// Dot product between the (viewpoint - point) and the plane normal
float cos_theta = vp.dot(model_coefficients);
model_coefficients[3] = 0;
// Hessian form (D = nc . p_plane (centroid here) + p)
model_coefficients[3] =
- -1 * (model_coefficients.dot(table_hull->points[0].getVector4fMap()));
+ -1 * (model_coefficients.dot((*table_hull)[0].getVector4fMap()));
}
// Set table_coeffs
while (getline(f, label))
if (!label.empty())
labels.push_back(label);
- if (labels.size() != cloud->points.size())
+ if (labels.size() != cloud->size())
return false;
setTrainingFeatures(cloud);
setTrainingLabels(labels);
{
typename pcl::PointCloud<PointT>::ConstPtr training_features =
tree_->getInputCloud();
- if (labels_idx_.size() == training_features->points.size()) {
+ if (labels_idx_.size() == training_features->size()) {
if (pcl::io::savePCDFile(file_name.c_str(), *training_features) != 0)
return false;
std::ofstream f(labels_file_name.c_str());
saveTrainingFeatures(const std::string& file_name,
const std::string& labels_file_name)
{
- if (labels_.size() == training_features_->points.size()) {
+ if (labels_.size() == training_features_->size()) {
if (pcl::io::savePCDFile(file_name, *training_features_) != 0)
return false;
std::ofstream f(labels_file_name.c_str());
addTrainingFeatures(const FeatureCloudPtr& training_features,
const std::vector<std::string>& labels)
{
- if (labels.size() == training_features->points.size()) {
+ if (labels.size() == training_features->size()) {
labels_.insert(labels_.end(), labels.begin(), labels.end());
training_features_->points.insert(training_features_->points.end(),
training_features->points.begin(),
training_features->points.end());
training_features_->header = training_features->header;
training_features_->height = 1;
- training_features_->width =
- static_cast<std::uint32_t>(training_features_->points.size());
+ training_features_->width = training_features_->size();
training_features_->is_dense &= training_features->is_dense;
training_features_->sensor_origin_ = training_features->sensor_origin_;
training_features_->sensor_orientation_ = training_features->sensor_orientation_;
// If the dataset has no invalid values, just copy all of them
if (cloud_->is_dense) {
- vtkIdType nr_points = cloud_->points.size();
+ vtkIdType nr_points = cloud_->size();
data->SetNumberOfValues(3 * nr_points);
for (vtkIdType i = 0; i < nr_points; ++i) {
- data->SetValue(i * 3 + 0, cloud_->points[i].x);
- data->SetValue(i * 3 + 1, cloud_->points[i].y);
- data->SetValue(i * 3 + 2, cloud_->points[i].z);
+ data->SetValue(i * 3 + 0, (*cloud_)[i].x);
+ data->SetValue(i * 3 + 1, (*cloud_)[i].y);
+ data->SetValue(i * 3 + 2, (*cloud_)[i].z);
}
}
// Need to check for NaNs, Infs, ec
for (vtkIdType i = 0, i_end = indices->size(); i < i_end; ++i) {
vtkIdType idx = (*indices)[i];
- data->SetValue(i * 3 + 0, cloud_->points[idx].x);
- data->SetValue(i * 3 + 1, cloud_->points[idx].y);
- data->SetValue(i * 3 + 2, cloud_->points[idx].z);
+ data->SetValue(i * 3 + 0, (*cloud_)[idx].x);
+ data->SetValue(i * 3 + 1, (*cloud_)[idx].y);
+ data->SetValue(i * 3 + 2, (*cloud_)[idx].z);
}
}
data->Squeeze();
for (std::size_t i = 0, i_end = indices->size(); i < i_end; ++i) {
std::size_t dest = (*indices)[i];
- cloud->points[dest].normal_x = normals.points[i].normal_x;
- cloud->points[dest].normal_y = normals.points[i].normal_y;
- cloud->points[dest].normal_z = normals.points[i].normal_z;
+ (*cloud)[dest].normal_x = normals[i].normal_x;
+ (*cloud)[dest].normal_y = normals[i].normal_y;
+ (*cloud)[dest].normal_z = normals[i].normal_z;
}
}
data->SetNumberOfComponents(3);
if (cloud->is_dense) {
- vtkIdType nr_normals =
- static_cast<vtkIdType>((cloud->points.size() - 1) / level_ + 1);
+ vtkIdType nr_normals = static_cast<vtkIdType>((cloud->size() - 1) / level_ + 1);
data->SetNumberOfValues(2 * 3 * nr_normals);
for (vtkIdType i = 0, j = 0; j < nr_normals;
j++, i = static_cast<vtkIdType>(j * level_)) {
- const CloudMesh::PointT& p = cloud->points[i];
+ const CloudMesh::PointT& p = (*cloud)[i];
data->SetValue(2 * j * 3 + 0, p.x);
data->SetValue(2 * j * 3 + 1, p.y);
data->SetValue(2 * j * 3 + 2, p.z);
data->SetNumberOfValues(2 * 3 * nr_normals);
for (vtkIdType i = 0, j = 0; j < nr_normals;
j++, i = static_cast<vtkIdType>(j * level_)) {
- const CloudMesh::PointT& p = cloud->points[(*indices)[i]];
+ const CloudMesh::PointT& p = (*cloud)[(*indices)[i]];
data->SetValue(2 * j * 3 + 0, p.x);
data->SetValue(2 * j * 3 + 1, p.y);
data->SetValue(2 * j * 3 + 2, p.z);
{
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(Point3D),
- &(cloud_.points[0].b));
+ &(cloud_[0].b));
draw();
}
glTranslatef(-center_xyz_[0], -center_xyz_[1], -center_xyz_[2]);
glEnableClientState(GL_VERTEX_ARRAY);
- glVertexPointer(3, GL_FLOAT, sizeof(Point3D), &(cloud_.points[0].x));
+ glVertexPointer(3, GL_FLOAT, sizeof(Point3D), &(cloud_[0].x));
if (disable_highlight || (!selection_ptr) || selection_ptr->empty())
{
{
unsigned int pos = cloud_.size();
for (auto rit = selection.rbegin(); rit != selection.rend(); ++rit)
- std::swap(cloud_.points[--pos], cloud_.points[*rit]);
+ std::swap(cloud_[--pos], cloud_[*rit]);
resize(cloud_.size()-selection.size());
}
append(copied_cloud);
unsigned int pos = cloud_.size();
for (auto rit = selection.rbegin(); rit != selection.rend(); ++rit)
- std::swap(cloud_.points[--pos], cloud_.points[*rit]);
+ std::swap(cloud_[--pos], cloud_[*rit]);
}
std::string
std::fill_n(min_xyz_, XYZ_SIZE, 0.0f);
std::fill_n(max_xyz_, XYZ_SIZE, 0.0f);
- float *pt = &(cloud_.points[0].data[X]);
+ float *pt = &(cloud_[0].data[X]);
std::copy(pt, pt+XYZ_SIZE, max_xyz_);
std::copy(max_xyz_, max_xyz_+XYZ_SIZE, min_xyz_);
for (std::size_t i = 1; i < cloud_.size(); ++i)
{
for (unsigned int j = 0; j < XYZ_SIZE; ++j)
{
- min_xyz_[j] = std::min(min_xyz_[j], cloud_.points[i].data[j]);
- max_xyz_[j] = std::max(max_xyz_[j], cloud_.points[i].data[j]);
+ min_xyz_[j] = std::min(min_xyz_[j], cloud_[i].data[j]);
+ max_xyz_[j] = std::max(max_xyz_[j], cloud_[i].data[j]);
}
}
float range = 0.0f;
glEnable(GL_TEXTURE_1D);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(1, GL_FLOAT, sizeof(Point3D),
- &(cloud_.points[0].data[color_ramp_axis_]));
+ &(cloud_[0].data[color_ramp_axis_]));
glMatrixMode(GL_TEXTURE);
glPushMatrix();
glLoadIdentity();
float rgb_m;
bool exists_m;
pcl::for_each_type<FieldListM>(pcl::CopyIfFieldExists<PointInT, float>(
- scene_vis->points[0], "rgb", exists_m, rgb_m));
+ (*scene_vis)[0], "rgb", exists_m, rgb_m));
std::cout << "Color exists:" << static_cast<int>(exists_m) << std::endl;
if (exists_m) {
#include <pcl/ModelCoefficients.h>
#include <pcl/memory.h> // for pcl::dynamic_pointer_cast
-#include <sstream>
#include <string>
#include <vector>
std::cout << "keypoint detection..." << std::flush;
keypoint_detector_->setInputCloud(input);
keypoint_detector_->compute(*keypoints);
- std::cout << "OK. keypoints found: " << keypoints->points.size() << std::endl;
+ std::cout << "OK. keypoints found: " << keypoints->size() << std::endl;
}
template <typename FeatureType>
{
typename pcl::PointCloud<pcl::PointXYZRGB>::Ptr kpts(
new pcl::PointCloud<pcl::PointXYZRGB>);
- kpts->points.resize(keypoints->points.size());
+ kpts->points.resize(keypoints->size());
pcl::copyPointCloud(*keypoints, *kpts);
GrabCutHelper::buildImages()
{
using namespace pcl::segmentation::grabcut;
- memset(&n_links_image_->points[0], 0, sizeof(float) * n_links_image_->size());
+ memset(&(*n_links_image_)[0], 0, sizeof(float) * n_links_image_->size());
for (int y = 0; y < static_cast<int>(image_->height); ++y) {
for (int x = 0; x < static_cast<int>(image_->width); ++x) {
std::size_t index = y * image_->width + x;
}
// TLinks cloud
- pcl::segmentation::grabcut::Color& tlink_point = t_links_image_->points[index];
- pcl::segmentation::grabcut::Color& gmm_point = gmm_image_->points[index];
- float& alpha_point = alpha_image_->points[index];
+ pcl::segmentation::grabcut::Color& tlink_point = (*t_links_image_)[index];
+ pcl::segmentation::grabcut::Color& gmm_point = (*gmm_image_)[index];
+ float& alpha_point = (*alpha_image_)[index];
double red = pow(graph_.getSourceEdgeCapacity(index) / L_, 0.25); // red
double green = pow(graph_.getTargetEdgeCapacity(index) / L_, 0.25); // green
tlink_point.r = static_cast<float>(red);
{
switch (display_type) {
case 0:
- glDrawPixels(image_->width, image_->height, GL_RGB, GL_FLOAT, &(image_->points[0]));
+ glDrawPixels(image_->width, image_->height, GL_RGB, GL_FLOAT, &((*image_)[0]));
break;
case 1:
gmm_image_->height,
GL_RGB,
GL_FLOAT,
- &(gmm_image_->points[0]));
+ &((*gmm_image_)[0]));
break;
case 2:
n_links_image_->height,
GL_LUMINANCE,
GL_FLOAT,
- &(n_links_image_->points[0]));
+ &((*n_links_image_)[0]));
break;
case 3:
t_links_image_->height,
GL_RGB,
GL_FLOAT,
- &(t_links_image_->points[0]));
+ &((*t_links_image_)[0]));
break;
default:
alpha_image_->height,
GL_ALPHA,
GL_FLOAT,
- &(alpha_image_->points[0]));
+ &((*alpha_image_)[0]));
}
/* GUI interface */
display_image->height,
GL_RGB,
GL_FLOAT,
- &(display_image->points[0]));
+ &((*display_image)[0]));
else
grabcut.display(display_type);
for (std::size_t j = 0; j < scene->width; ++j) {
const pcl::PointXYZRGB& p = (*scene)(j, i);
std::size_t reverse_index = (height_1 - i) * scene->width + j;
- display_image->points[reverse_index].r = static_cast<float>(p.r) / 255.0;
- display_image->points[reverse_index].g = static_cast<float>(p.g) / 255.0;
- display_image->points[reverse_index].b = static_cast<float>(p.b) / 255.0;
- tmp->points[reverse_index] = p;
+ (*display_image)[reverse_index].r = static_cast<float>(p.r) / 255.0;
+ (*display_image)[reverse_index].g = static_cast<float>(p.g) / 255.0;
+ (*display_image)[reverse_index].b = static_cast<float>(p.b) / 255.0;
+ (*tmp)[reverse_index] = p;
}
}
}
#include <vtkRendererCollection.h>
using namespace pcl;
-using namespace std;
//////////////////////////////////////////////////////////////////////////////////////////////////////////
ManualRegistration::ManualRegistration()
{
if (src_point_selected_) {
src_pc_.points.push_back(src_point_);
- PCL_INFO("Selected %d source points\n", src_pc_.points.size());
+ PCL_INFO("Selected %zu source points\n",
+ static_cast<std::size_t>(src_pc_.size()));
src_point_selected_ = false;
- src_pc_.width = src_pc_.points.size();
+ src_pc_.width = src_pc_.size();
}
else {
PCL_INFO("Please select a point in the source window first\n");
{
if (dst_point_selected_) {
dst_pc_.points.push_back(dst_point_);
- PCL_INFO("Selected %d destination points\n", dst_pc_.points.size());
+ PCL_INFO("Selected %zu destination points\n",
+ static_cast<std::size_t>(dst_pc_.size()));
dst_point_selected_ = false;
- dst_pc_.width = dst_pc_.points.size();
+ dst_pc_.width = dst_pc_.size();
}
else {
PCL_INFO("Please select a point in the destination window first\n");
void
ManualRegistration::calculatePressed()
{
- if (dst_pc_.points.size() != src_pc_.points.size()) {
+ if (dst_pc_.size() != src_pc_.size()) {
PCL_INFO("You haven't selected an equal amount of points, please do so\n");
return;
}
PointCloud<Normal>::Ptr cloud_subsampled_normals;
subsampleAndCalculateNormals(cloud_scene, cloud_subsampled, cloud_subsampled_normals);
- PCL_INFO("STATS:\ninitial point cloud size: %u\nsubsampled point cloud size: %u\n",
- cloud_scene->points.size(),
- cloud_subsampled->points.size());
+ PCL_INFO("STATS:\ninitial point cloud size: %zu\nsubsampled point cloud size: %zu\n",
+ static_cast<std::size_t>(cloud_scene->size()),
+ static_cast<std::size_t>(cloud_subsampled->size()));
visualization::CloudViewer viewer(
"Multiscale Feature Persistence Example Visualization");
viewer.showCloud(cloud_scene, "scene");
auto output_indices = pcl::make_shared<std::vector<int>>();
feature_persistence.determinePersistentFeatures(*output_features, output_indices);
- PCL_INFO("persistent features cloud size: %u\n", output_features->points.size());
+ PCL_INFO("persistent features cloud size: %zu\n",
+ static_cast<std::size_t>(output_features->size()));
ExtractIndices<PointXYZ> extract_indices_filter;
extract_indices_filter.setInputCloud(cloud_subsampled);
#define SHOW_FPS 1
#include <pcl/apps/timer.h>
-#include <pcl/common/angles.h>
-#include <pcl/common/common.h>
-#include <pcl/common/time.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/agast_2d.h>
#include <thread>
using namespace pcl;
-using namespace std;
using namespace std::chrono_literals;
using KeyPointT = PointUV;
}
/////////////////////////////////////////////////////////////////////////
- string
+ std::string
getStrBool(bool state)
{
- stringstream ss;
+ std::stringstream ss;
ss << state;
return ss.str();
}
std::size_t j = 0;
for (std::size_t i = 0; i < keypoints->size(); ++i) {
const PointT& pt =
- (*cloud)(static_cast<long unsigned int>(keypoints->points[i].u),
- static_cast<long unsigned int>(keypoints->points[i].v));
+ (*cloud)(static_cast<long unsigned int>((*keypoints)[i].u),
+ static_cast<long unsigned int>((*keypoints)[i].v));
if (!std::isfinite(pt.x) || !std::isfinite(pt.y) || !std::isfinite(pt.z))
continue;
- keypoints3d.points[j].x = pt.x;
- keypoints3d.points[j].y = pt.y;
- keypoints3d.points[j].z = pt.z;
+ keypoints3d[j].x = pt.x;
+ keypoints3d[j].y = pt.y;
+ keypoints3d[j].z = pt.z;
++j;
}
if (keypoints && !keypoints->empty()) {
image_viewer_.removeLayer(getStrBool(keypts));
for (std::size_t i = 0; i < keypoints->size(); ++i) {
- int u = int(keypoints->points[i].u);
- int v = int(keypoints->points[i].v);
+ int u = int((*keypoints)[i].u);
+ int v = int((*keypoints)[i].v);
image_viewer_.markPoint(u,
v,
visualization::red_color,
else
pcl::console::setVerbosityLevel(pcl::console::L_INFO);
- string device_id("#1");
+ std::string device_id("#1");
OpenNIGrabber grabber(device_id);
AGASTDemo<PointXYZRGBA> openni_viewer(grabber);
#include <thread>
using namespace pcl;
-using namespace std;
using namespace std::chrono_literals;
using PointT = PointXYZRGBA;
}
/////////////////////////////////////////////////////////////////////////
- string
+ std::string
getStrBool(bool state)
{
- stringstream ss;
+ std::stringstream ss;
ss << state;
return ss.str();
}
std::size_t j = 0;
for (std::size_t i = 0; i < keypoints->size(); ++i) {
PointT pt =
- bilinearInterpolation(cloud, keypoints->points[i].x, keypoints->points[i].y);
+ bilinearInterpolation(cloud, (*keypoints)[i].x, (*keypoints)[i].y);
- keypoints3d.points[j].x = pt.x;
- keypoints3d.points[j].y = pt.y;
- keypoints3d.points[j].z = pt.z;
+ keypoints3d[j].x = pt.x;
+ keypoints3d[j].y = pt.y;
+ keypoints3d[j].z = pt.z;
++j;
}
image_viewer_.removeLayer(getStrBool(keypts));
for (std::size_t i = 0; i < keypoints->size(); ++i) {
- int u = int(keypoints->points[i].x);
- int v = int(keypoints->points[i].y);
+ int u = int((*keypoints)[i].x);
+ int v = int((*keypoints)[i].y);
image_viewer_.markPoint(u,
v,
visualization::red_color,
int
main(int, char**)
{
- string device_id("#1");
+ std::string device_id("#1");
OpenNIGrabber grabber(device_id);
BRISKDemo openni_viewer(grabber);
#include <thread>
using namespace pcl;
-using namespace std;
using namespace std::chrono_literals;
using PointT = PointXYZRGBA;
// Remove the plane indices from the data
PointIndices::Ptr everything_but_the_plane(new PointIndices);
- if (indices_fullset_.size() != cloud->points.size()) {
- indices_fullset_.resize(cloud->points.size());
+ if (indices_fullset_.size() != cloud->size()) {
+ indices_fullset_.resize(cloud->size());
for (int p_it = 0; p_it < static_cast<int>(indices_fullset_.size()); ++p_it)
indices_fullset_[p_it] = p_it;
}
PointCloud<Label>::Ptr scene(new PointCloud<Label>(cloud->width, cloud->height, l));
// Mask the objects that we want to split into clusters
for (const int& index : points_above_plane->indices)
- scene->points[index].label = 1;
+ (*scene)[index].label = 1;
euclidean_cluster_comparator->setLabels(scene);
OrganizedConnectedComponentSegmentation<PointT, Label> euclidean_segmentation(
labels,
label_indices,
boundary_indices);
- PCL_DEBUG("Number of planar regions detected: %lu for a cloud of %lu points and "
- "%lu normals.\n",
- regions.size(),
- search_.getInputCloud()->points.size(),
- normal_cloud->points.size());
+ PCL_DEBUG("Number of planar regions detected: %zu for a cloud of %zu points and "
+ "%zu normals.\n",
+ static_cast<std::size_t>(regions.size()),
+ static_cast<std::size_t>(search_.getInputCloud()->size()),
+ static_cast<std::size_t>(normal_cloud->size()));
double max_dist = std::numeric_limits<double>::max();
// Compute the distances from all the planar regions to the picked point, and select
event.getPoint(picked_pt.x, picked_pt.y, picked_pt.z);
// Add a sphere to it in the PCLVisualizer window
- stringstream ss;
+ std::stringstream ss;
ss << "sphere_" << idx;
cloud_viewer_.addSphere(picked_pt, 0.01, 1.0, 0.0, 0.0, ss.str());
PlanarRegion<PointT> refined_region;
pcl::approximatePolygon(region, refined_region, 0.01, false, true);
- PCL_INFO("Planar region: %lu points initial, %lu points after refinement.\n",
- region.getContour().size(),
- refined_region.getContour().size());
+ PCL_INFO("Planar region: %zu points initial, %zu points after refinement.\n",
+ static_cast<std::size_t>(region.getContour().size()),
+ static_cast<std::size_t>(refined_region.getContour().size()));
cloud_viewer_.addPolygon(refined_region, 0.0, 0.0, 1.0, "refined_region");
cloud_viewer_.setShapeRenderingProperties(
visualization::PCL_VISUALIZER_LINE_WIDTH, 10, "refined_region");
// Compute the min/max of the object
PointT min_pt, max_pt;
getMinMax3D(*object, min_pt, max_pt);
- stringstream ss2;
+ std::stringstream ss2;
ss2 << "cube_" << idx;
// Visualize the bounding box in 3D...
cloud_viewer_.addCube(min_pt.x,
int
main(int, char**)
{
- string device_id("#1");
+ std::string device_id("#1");
OpenNIGrabber grabber(device_id);
NILinemod openni_viewer(grabber);
#include <pcl/apps/timer.h>
#include <pcl/common/angles.h>
#include <pcl/common/common.h>
-#include <pcl/console/parse.h>
-#include <pcl/console/print.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/susan.h>
#include <thread>
using namespace pcl;
-using namespace std;
using namespace std::chrono_literals;
using PointT = PointXYZRGBA;
}
/////////////////////////////////////////////////////////////////////////
- string
+ std::string
getStrBool(bool state)
{
- stringstream ss;
+ std::stringstream ss;
ss << state;
return ss.str();
}
if (keypoints && !keypoints->empty()) {
image_viewer_.removeLayer(getStrBool(keypts));
for (std::size_t i = 0; i < keypoints->size(); ++i) {
- int u = int(keypoints->points[i].label % cloud->width);
- int v = cloud->height - int(keypoints->points[i].label / cloud->width);
+ int u = int((*keypoints)[i].label % cloud->width);
+ int v = cloud->height - int((*keypoints)[i].label / cloud->width);
image_viewer_.markPoint(u,
v,
visualization::red_color,
int
main(int, char**)
{
- string device_id("#1");
+ std::string device_id("#1");
OpenNIGrabber grabber(device_id);
SUSANDemo openni_viewer(grabber);
*/
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
using namespace pcl;
using namespace pcl::visualization;
-using namespace std;
using namespace std::chrono_literals;
// clang-format off
*/
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
#include <pcl/filters/passthrough.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
#include <mutex>
#include <thread>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace pcl::visualization;
*/
#include <pcl/console/parse.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/octree/octree_pointcloud_changedetector.h>
#include <pcl/visualization/cloud_viewer.h>
void
cloud_cb_(const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr& cloud)
{
- std::cerr << cloud->points.size() << " -- ";
+ std::cerr << cloud->size() << " -- ";
// assign point cloud to octree
octree->setInputCloud(cloud);
filtered_cloud->points.reserve(newPointIdxVector.size());
for (const int& idx : newPointIdxVector)
- filtered_cloud->points[idx].rgba = 255 << 16;
+ (*filtered_cloud)[idx].rgba = 255 << 16;
if (!viewer.wasStopped())
viewer.showCloud(filtered_cloud);
filtered_cloud->points.reserve(newPointIdxVector.size());
for (const int& idx : newPointIdxVector)
- filtered_cloud->points.push_back(cloud->points[idx]);
+ filtered_cloud->points.push_back((*cloud)[idx]);
if (!viewer.wasStopped())
viewer.showCloud(filtered_cloud);
*/
#include <pcl/common/time.h>
-#include <pcl/console/parse.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/io/pcd_io.h>
#include <pcl/surface/organized_fast_mesh.h>
-#include <pcl/visualization/cloud_viewer.h>
+#include <pcl/visualization/pcl_visualizer.h> // for PCLVisualizer
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
using namespace pcl;
using namespace pcl::visualization;
-using namespace std;
using namespace std::chrono_literals;
// clang-format off
extract_indices_filter_.setIndices(feature_indices_);
extract_indices_filter_.filter(*feature_locations_);
- PCL_INFO("Persistent feature locations %d\n", feature_locations_->points.size());
+ PCL_INFO("Persistent feature locations %zu\n", static_cast<std::size_t>(feature_locations_->size()));
cloud_ = cloud;
for (std::size_t i = 0; i < keypoints_->size(); ++i) {
if (points_status_->indices[i] < 0)
continue;
- const pcl::PointUV& uv = keypoints_->points[i];
+ const pcl::PointUV& uv = (*keypoints_)[i];
markers.push_back(uv.u);
markers.push_back(uv.v);
}
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
+#include <pcl/search/kdtree.h> // for KdTree
#include <mutex>
std::size_t j = 0;
for (std::size_t i = 0; i < nr_points; ++i) {
- const pcl::PointXYZRGBA& point = cloud->points[i];
+ const pcl::PointXYZRGBA& point = (*cloud)[i];
if (!std::isfinite(point.x) || !std::isfinite(point.y) || !std::isfinite(point.z))
continue;
#include <boost/asio.hpp>
-#include <cstdio>
-#include <cstdlib>
#include <iostream>
-#include <sstream>
#include <string>
#include <thread>
-#include <vector>
using boost::asio::ip::tcp;
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using namespace std::chrono_literals;
// clang-format off
if (bEnDecode) {
// ENCODING
ofstream compressedPCFile;
- compressedPCFile.open(fileName.c_str(), ios::out | ios::trunc | ios::binary);
+ compressedPCFile.open(fileName.c_str(), std::ios::out | std::ios::trunc | std::ios::binary);
if (!bShowInputCloud) {
EventHelper v(compressedPCFile, octreeCoder, field_name, min_v, max_v);
}
else {
// DECODING
- ifstream compressedPCFile;
- compressedPCFile.open(fileName.c_str(), ios::in | ios::binary);
+ std::ifstream compressedPCFile;
+ compressedPCFile.open(fileName.c_str(), std::ios::in | std::ios::binary);
compressedPCFile.seekg(0);
- compressedPCFile.unsetf(ios_base::skipws);
+ compressedPCFile.unsetf(std::ios_base::skipws);
pcl::visualization::CloudViewer viewer("PCL Compression Viewer");
#include <boost/asio.hpp>
-#include <cstdio>
-#include <cstdlib>
#include <iostream>
-#include <sstream>
#include <string>
#include <thread>
#include <vector>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using namespace std::chrono_literals;
char usage[] = "\n"
}
}
- ostream& outputFile_;
+ std::ostream& outputFile_;
OrganizedPointCloudCompression<PointXYZRGBA>* organizedEncoder_;
bool doColorEncoding_;
bool bShowStatistics_;
if (!bServerFileMode) {
if (bEnDecode) {
// ENCODING
- ofstream compressedPCFile;
- compressedPCFile.open(fileName.c_str(), ios::out | ios::trunc | ios::binary);
+ std::ofstream compressedPCFile;
+ compressedPCFile.open(fileName.c_str(), std::ios::out | std::ios::trunc | std::ios::binary);
if (!bShowInputCloud) {
EventHelper v(compressedPCFile,
}
else {
// DECODING
- ifstream compressedPCFile;
- compressedPCFile.open(fileName.c_str(), ios::in | ios::binary);
+ std::ifstream compressedPCFile;
+ compressedPCFile.open(fileName.c_str(), std::ios::in | std::ios::binary);
compressedPCFile.seekg(0);
- compressedPCFile.unsetf(ios_base::skipws);
+ compressedPCFile.unsetf(std::ios_base::skipws);
pcl::visualization::CloudViewer viewer("PCL Compression Viewer");
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using namespace std::chrono_literals;
class SimpleOpenNIViewer {
// clang-format off
// draw some texts
viz.removeShape("N");
- viz.addText((boost::format("number of Reference PointClouds: %d") %
- tracker_->getReferenceCloud()->points.size()).str(),
+ viz.addText((boost::format("number of Reference PointClouds: %1%") %
+ tracker_->getReferenceCloud()->size()).str(),
10, 20, 20, 1.0, 1.0, 1.0, "N");
viz.removeShape("M");
- viz.addText((boost::format("number of Measured PointClouds: %d") %
- cloud_pass_downsampled_->points.size()).str(),
+ viz.addText((boost::format("number of Measured PointClouds: %1%") %
+ cloud_pass_downsampled_->size()).str(),
10, 40, 20, 1.0, 1.0, 1.0, "M");
viz.removeShape("tracking");
10, 100, 20, 1.0, 1.0, 1.0, "computation");
viz.removeShape("particles");
- viz.addText((boost::format("particles: %d") %
- tracker_->getParticles()->points.size()).str(),
+ viz.addText((boost::format("particles: %1%") %
+ tracker_->getParticles()->size()).str(),
10, 120, 20, 1.0, 1.0, 1.0, "particles");
// clang-format on
}
result.width = cloud->width;
result.height = cloud->height;
result.is_dense = cloud->is_dense;
- for (std::size_t i = 0; i < cloud->points.size(); i++) {
+ for (std::size_t i = 0; i < cloud->size(); i++) {
RefPointType point;
- point.x = cloud->points[i].x;
- point.y = cloud->points[i].y;
- point.z = cloud->points[i].z;
- point.rgba = cloud->points[i].rgba;
+ point.x = (*cloud)[i].x;
+ point.y = (*cloud)[i].y;
+ point.z = (*cloud)[i].z;
+ point.rgba = (*cloud)[i].rgba;
result.points.push_back(point);
}
}
void
removeZeroPoints(const CloudConstPtr& cloud, Cloud& result)
{
- for (std::size_t i = 0; i < cloud->points.size(); i++) {
- PointType point = cloud->points[i];
+ for (const auto& point: *cloud) {
if (!(std::abs(point.x) < 0.01 && std::abs(point.y) < 0.01 &&
std::abs(point.z) < 0.01) &&
!std::isnan(point.x) && !std::isnan(point.y) && !std::isnan(point.z))
result.points.push_back(point);
}
- result.width = static_cast<std::uint32_t>(result.points.size());
+ result.width = result.size();
result.height = 1;
result.is_dense = true;
}
{
pcl::PointIndices segmented_indices = cluster_indices[segment_index];
for (const int& index : segmented_indices.indices) {
- PointType point = cloud->points[index];
+ PointType point = (*cloud)[index];
result.points.push_back(point);
}
- result.width = std::uint32_t(result.points.size());
+ result.width = result.size();
result.height = 1;
result.is_dense = true;
}
tracker_->setReferenceCloud(transed_ref_downsampled);
tracker_->setTrans(trans);
reference_ = transed_ref;
- tracker_->setMinIndices(int(ref_cloud->points.size()) / 2);
+ tracker_->setMinIndices(ref_cloud->size() / 2);
}
else {
PCL_WARN("euclidean segmentation failed\n");
const pcl::visualization::PCLVisualizer::Ptr& viewer)
{
pcl::PointCloud<pcl::PointXYZRGBA> curvature_cloud = cloud;
- for (std::size_t i = 0; i < cloud.points.size(); i++) {
- if (normals.points[i].curvature < 0.04) {
- curvature_cloud.points[i].r = 0;
- curvature_cloud.points[i].g = 255;
- curvature_cloud.points[i].b = 0;
+ for (std::size_t i = 0; i < cloud.size(); i++) {
+ if (normals[i].curvature < 0.04) {
+ curvature_cloud[i].r = 0;
+ curvature_cloud[i].g = 255;
+ curvature_cloud[i].b = 0;
}
else {
- curvature_cloud.points[i].r = 255;
- curvature_cloud.points[i].g = 0;
- curvature_cloud.points[i].b = 0;
+ curvature_cloud[i].r = 255;
+ curvature_cloud[i].g = 0;
+ curvature_cloud[i].b = 0;
}
}
const pcl::visualization::PCLVisualizer::Ptr& viewer)
{
pcl::PointCloud<pcl::PointXYZRGBA> distance_map_cloud = cloud;
- for (std::size_t i = 0; i < cloud.points.size(); i++) {
+ for (std::size_t i = 0; i < cloud.size(); i++) {
if (distance_map[i] < 5.0) {
- distance_map_cloud.points[i].r = 255;
- distance_map_cloud.points[i].g = 0;
- distance_map_cloud.points[i].b = 0;
+ distance_map_cloud[i].r = 255;
+ distance_map_cloud[i].g = 0;
+ distance_map_cloud[i].b = 0;
}
else {
- distance_map_cloud.points[i].r = 0;
- distance_map_cloud.points[i].g = 255;
- distance_map_cloud.points[i].b = 0;
+ distance_map_cloud[i].r = 0;
+ distance_map_cloud[i].g = 255;
+ distance_map_cloud[i].b = 0;
}
}
#include <pcl/common/time.h>
#include <pcl/console/parse.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/features/normal_3d.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/geometry/polygon_operations.h>
#include <pcl/io/io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/segmentation/organized_multi_plane_segmentation.h>
#include <pcl/segmentation/planar_region.h>
#include <pcl/visualization/pcl_visualizer.h>
-#include <pcl/ModelCoefficients.h>
+
+#include <cstdio>
template <typename PointT>
class PCDOrganizedMultiPlaneSegmentation {
pcl::PointXYZ pt2 = pcl::PointXYZ(centroid[0] + (0.5f * model[0]),
centroid[1] + (0.5f * model[1]),
centroid[2] + (0.5f * model[2]));
- sprintf(name, "normal_%d", unsigned(i));
+ std::snprintf(name, sizeof(name), "normal_%zu", i);
viewer.addArrow(pt2, pt1, 1.0, 0, 0, std::string(name));
contour->points = regions[i].getContour();
- sprintf(name, "plane_%02d", int(i));
+ std::snprintf(name, sizeof(name), "plane_%02zu", i);
pcl::visualization::PointCloudColorHandlerCustom<PointT> color(
contour, red[i], grn[i], blu[i]);
viewer.addPointCloud(contour, color, name);
<< std::endl;
typename pcl::PointCloud<PointT>::ConstPtr approx_contour_const = approx_contour;
- for (std::size_t idx = 0; idx < approx_contour->points.size(); ++idx) {
- sprintf(name, "approx_plane_%02d_%03d", int(i), int(idx));
+ for (std::size_t idx = 0; idx < approx_contour->size(); ++idx) {
+ std::snprintf(name,
+ sizeof(name),
+ "approx_plane_%02zu_%03zu",
+ static_cast<std::size_t>(i),
+ static_cast<std::size_t>(idx));
viewer.addLine(
- approx_contour->points[idx],
- approx_contour->points[(idx + 1) % approx_contour->points.size()],
+ (*approx_contour)[idx],
+ (*approx_contour)[(idx + 1) % approx_contour->size()],
0.5 * red[i],
0.5 * grn[i],
0.5 * blu[i],
*/
#include <pcl/common/angles.h>
-#include <pcl/common/common.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
#include <pcl/console/time.h>
using namespace pcl;
using namespace pcl::console;
-using namespace std;
using namespace std::chrono_literals;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
exppd.setInputCloud(cloud);
exppd.setIndices(indices_but_the_plane);
exppd.setInputPlanarHull(plane_hull);
- exppd.setViewPoint(cloud->points[picked_idx].x,
- cloud->points[picked_idx].y,
- cloud->points[picked_idx].z);
+ exppd.setViewPoint((*cloud)[picked_idx].x,
+ (*cloud)[picked_idx].y,
+ (*cloud)[picked_idx].z);
exppd.setHeightLimits(0.001, 0.5); // up to half a meter
exppd.segment(*points_above_plane);
new PointCloud<Label>(cloud->width, cloud->height, l));
// Mask the objects that we want to split into clusters
for (const int& index : points_above_plane->indices)
- scene->points[index].label = 1;
+ (*scene)[index].label = 1;
euclidean_cluster_comparator->setLabels(scene);
typename EuclideanClusterComparator<PointT, Label>::ExcludeLabelSetPtr
picked_pt.z);
// Add a sphere to it in the PCLVisualizer window
- stringstream ss;
+ std::stringstream ss;
ss << "sphere_" << idx;
cloud_viewer_->addSphere(picked_pt, 0.01, 1.0, 0.0, 0.0, ss.str());
for (std::uint32_t i = 0; i < cloud_->width * cloud_->height; ++i) {
RGB rgb;
memcpy(&rgb,
- reinterpret_cast<unsigned char*>(&cloud_->points[i]) + poff,
+ reinterpret_cast<unsigned char*>(&(*cloud_)[i]) + poff,
sizeof(rgb));
rgb_data_[i * 3 + 0] = rgb.r;
#include <iostream>
using namespace pcl;
-using namespace std;
//////////////////////////////////////////////////////////////////////////////////////////////////////////
PCDVideoPlayer::PCDVideoPlayer()
#include <thread>
using namespace pcl;
-using namespace std;
using namespace std::chrono_literals;
const Eigen::Vector4f subsampling_leaf_size(0.02f, 0.02f, 0.02f, 0.0f);
concatenateFields(
*cloud_subsampled, *cloud_subsampled_normals, *cloud_subsampled_with_normals);
- PCL_INFO("Cloud dimensions before / after subsampling: %u / %u\n",
- cloud->points.size(),
- cloud_subsampled->points.size());
+ PCL_INFO("Cloud dimensions before / after subsampling: %zu / %zu\n",
+ static_cast<std::size_t>(cloud->size()),
+ static_cast<std::size_t>(cloud_subsampled->size()));
return cloud_subsampled_with_normals;
}
PCL_INFO("Reading models ...\n");
std::vector<PointCloud<PointXYZ>::Ptr> cloud_models;
- ifstream pcd_file_list(argv[1]);
+ std::ifstream pcd_file_list(argv[1]);
while (!pcd_file_list.eof()) {
char str[512];
pcd_file_list.getline(str, 512);
extract.setNegative(true);
pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients());
pcl::PointIndices::Ptr inliers(new pcl::PointIndices());
- unsigned nr_points = unsigned(cloud_scene->points.size());
- while (cloud_scene->points.size() > 0.3 * nr_points) {
+ const auto nr_points = cloud_scene->size();
+ while (cloud_scene->size() > 0.3 * nr_points) {
seg.setInputCloud(cloud_scene);
seg.segment(*inliers, *coefficients);
- PCL_INFO("Plane inliers: %u\n", inliers->indices.size());
+ PCL_INFO("Plane inliers: %zu\n",
+ static_cast<std::size_t>(inliers->indices.size()));
if (inliers->indices.size() < 50000)
break;
pcl::transformPointCloud(
*cloud_models[model_i], *cloud_output, final_transformation);
- stringstream ss;
+ std::stringstream ss;
ss << "model_" << model_i;
visualization::PointCloudColorHandlerRandom<PointXYZ> random_color(
cloud_output->makeShared());
using namespace pcl;
#include <iostream>
-using namespace std;
const Eigen::Vector4f subsampling_leaf_size(0.02f, 0.02f, 0.02f, 0.0f);
const float normal_estimation_search_radius = 0.05f;
normal_estimation_filter.setRadiusSearch(normal_estimation_search_radius);
normal_estimation_filter.compute(*cloud_subsampled_normals);
- std::cerr << "Before -> After subsampling: " << cloud->points.size() << " -> "
- << cloud_subsampled->points.size() << std::endl;
+ std::cerr << "Before -> After subsampling: " << cloud->size() << " -> "
+ << cloud_subsampled->size() << std::endl;
}
int
ppf_estimator.setInputNormals(cloud_subsampled_with_normals_b);
ppf_estimator.compute(*ppf_signature_b);
- PCL_INFO("Feature cloud sizes: %u , %u\n",
- ppf_signature_a->points.size(),
- ppf_signature_b->points.size());
+ PCL_INFO("Feature cloud sizes: %zu , %zu\n",
+ static_cast<std::size_t>(ppf_signature_a->size()),
+ static_cast<std::size_t>(ppf_signature_b->size()));
PCL_INFO("Finished calculating the features ...\n");
- std::vector<pair<float, float>> dim_range_input, dim_range_target;
+ std::vector<std::pair<float, float>> dim_range_input, dim_range_target;
for (std::size_t i = 0; i < 3; ++i)
dim_range_input.emplace_back(float(-M_PI), float(M_PI));
dim_range_input.emplace_back(0.0f, 1.0f);
worldPicker->Pick(x, y, value, renderer);
worldPicker->GetPickPosition(coords);
- cloud->points[count_valid_depth_pixels].x = static_cast<float>(coords[0]);
- cloud->points[count_valid_depth_pixels].y = static_cast<float>(coords[1]);
- cloud->points[count_valid_depth_pixels].z = static_cast<float>(coords[2]);
- cloud->points[count_valid_depth_pixels].getVector4fMap() =
+ (*cloud)[count_valid_depth_pixels].x = static_cast<float>(coords[0]);
+ (*cloud)[count_valid_depth_pixels].y = static_cast<float>(coords[1]);
+ (*cloud)[count_valid_depth_pixels].z = static_cast<float>(coords[2]);
+ (*cloud)[count_valid_depth_pixels].getVector4fMap() =
backToRealScale_eigen *
- cloud->points[count_valid_depth_pixels].getVector4fMap();
+ (*cloud)[count_valid_depth_pixels].getVector4fMap();
count_valid_depth_pixels++;
}
}
#include <pcl/io/pcd_io.h>
using namespace pcl;
-using namespace std;
const float subsampling_leaf_size = 0.003f;
const float base_scale = 0.005f;
PCDReader reader;
reader.read(argv[1], *cloud);
PCL_INFO("Cloud read: %s\n", argv[1]);
- std::cerr << "cloud has #points: " << cloud->points.size() << std::endl;
+ std::cerr << "cloud has #points: " << cloud->size() << std::endl;
PointCloud<PointXYZ>::Ptr cloud_subsampled(new PointCloud<PointXYZ>());
VoxelGrid<PointXYZ> subsampling_filter;
subsampling_filter.setLeafSize(
subsampling_leaf_size, subsampling_leaf_size, subsampling_leaf_size);
subsampling_filter.filter(*cloud_subsampled);
- std::cerr << "subsampled cloud has #points: " << cloud_subsampled->points.size()
+ std::cerr << "subsampled cloud has #points: " << cloud_subsampled->size()
<< std::endl;
StatisticalMultiscaleInterestRegionExtraction<PointXYZ> region_extraction;
*
*/
+#include <pcl/common/centroid.h> // for computeMeanAndCovarianceMatrix
#include <pcl/common/distances.h>
#include <pcl/common/intersections.h>
#include <pcl/common/time.h>
#include <pcl/features/integral_image_normal.h>
-#include <pcl/features/normal_3d.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/io/io.h>
#include <pcl/io/openni_grabber.h>
#include <pcl/io/pcd_grabber.h>
if (region_index.indices.size() > 1000) {
for (std::size_t j = 0; j < region_index.indices.size(); j++) {
- pcl::PointXYZ ground_pt(cloud->points[region_index.indices[j]].x,
- cloud->points[region_index.indices[j]].y,
- cloud->points[region_index.indices[j]].z);
+ pcl::PointXYZ ground_pt((*cloud)[region_index.indices[j]].x,
+ (*cloud)[region_index.indices[j]].y,
+ (*cloud)[region_index.indices[j]].z);
ground_cloud->points.push_back(ground_pt);
- ground_image->points[region_index.indices[j]].g = static_cast<std::uint8_t>(
- (cloud->points[region_index.indices[j]].g + 255) / 2);
- label_image->points[region_index.indices[j]].r = 0;
- label_image->points[region_index.indices[j]].g = 255;
- label_image->points[region_index.indices[j]].b = 0;
+ (*ground_image)[region_index.indices[j]].g = static_cast<std::uint8_t>(
+ ((*cloud)[region_index.indices[j]].g + 255) / 2);
+ (*label_image)[region_index.indices[j]].r = 0;
+ (*label_image)[region_index.indices[j]].g = 255;
+ (*label_image)[region_index.indices[j]].b = 0;
}
// Compute plane info
if (region_index.indices.size() > 1000) {
for (std::size_t j = 0; j < region_index.indices.size(); j++) {
// Check to see if it has already been labeled
- if (ground_image->points[region_index.indices[j]].g ==
- ground_image->points[region_index.indices[j]].b) {
- pcl::PointXYZ ground_pt(cloud->points[region_index.indices[j]].x,
- cloud->points[region_index.indices[j]].y,
- cloud->points[region_index.indices[j]].z);
+ if ((*ground_image)[region_index.indices[j]].g ==
+ (*ground_image)[region_index.indices[j]].b) {
+ pcl::PointXYZ ground_pt((*cloud)[region_index.indices[j]].x,
+ (*cloud)[region_index.indices[j]].y,
+ (*cloud)[region_index.indices[j]].z);
ground_cloud->points.push_back(ground_pt);
- ground_image->points[region_index.indices[j]].r = static_cast<std::uint8_t>(
- (cloud->points[region_index.indices[j]].r + 255) / 2);
- ground_image->points[region_index.indices[j]].g = static_cast<std::uint8_t>(
- (cloud->points[region_index.indices[j]].g + 255) / 2);
- label_image->points[region_index.indices[j]].r = 128;
- label_image->points[region_index.indices[j]].g = 128;
- label_image->points[region_index.indices[j]].b = 0;
+ (*ground_image)[region_index.indices[j]].r = static_cast<std::uint8_t>(
+ ((*cloud)[region_index.indices[j]].r + 255) / 2);
+ (*ground_image)[region_index.indices[j]].g = static_cast<std::uint8_t>(
+ ((*cloud)[region_index.indices[j]].g + 255) / 2);
+ (*label_image)[region_index.indices[j]].r = 128;
+ (*label_image)[region_index.indices[j]].g = 128;
+ (*label_image)[region_index.indices[j]].b = 0;
}
}
}
if ((ptp_dist > 0.5) && (ptp_dist < 3.0)) {
for (std::size_t j = 0; j < euclidean_label_index.indices.size(); j++) {
- ground_image->points[euclidean_label_index.indices[j]].r = 255;
- label_image->points[euclidean_label_index.indices[j]].r = 255;
- label_image->points[euclidean_label_index.indices[j]].g = 0;
- label_image->points[euclidean_label_index.indices[j]].b = 0;
+ (*ground_image)[euclidean_label_index.indices[j]].r = 255;
+ (*label_image)[euclidean_label_index.indices[j]].r = 255;
+ (*label_image)[euclidean_label_index.indices[j]].g = 0;
+ (*label_image)[euclidean_label_index.indices[j]].b = 0;
}
}
}
}
// note the NAN points in the image as well
- for (std::size_t i = 0; i < cloud->points.size(); i++) {
- if (!pcl::isFinite(cloud->points[i])) {
- ground_image->points[i].b =
- static_cast<std::uint8_t>((cloud->points[i].b + 255) / 2);
- label_image->points[i].r = 0;
- label_image->points[i].g = 0;
- label_image->points[i].b = 255;
+ for (std::size_t i = 0; i < cloud->size(); i++) {
+ if (!pcl::isFinite((*cloud)[i])) {
+ (*ground_image)[i].b =
+ static_cast<std::uint8_t>(((*cloud)[i].b + 255) / 2);
+ (*label_image)[i].r = 0;
+ (*label_image)[i].g = 0;
+ (*label_image)[i].b = 255;
}
}
if (!viewer->updatePointCloud(prev_ground_image, "cloud"))
viewer->addPointCloud(prev_ground_image, "cloud");
- if (prev_normal_cloud->points.size() > 1000 && display_normals) {
+ if (prev_normal_cloud->size() > 1000 && display_normals) {
viewer->removePointCloud("normals");
viewer->addPointCloudNormals<PointT, pcl::Normal>(
prev_ground_image, prev_normal_cloud, 10, 0.15f, "normals");
pcl::visualization::PCL_VISUALIZER_OPACITY, 0.5, "normals");
}
- if (prev_cloud->points.size() > 1000) {
+ if (prev_cloud->size() > 1000) {
image_viewer->addRGBImage<PointT>(prev_ground_image, "rgb_image", 0.3);
}
#include <pcl/search/brute_force.h>
#include <pcl/search/kdtree.h>
-#include <sstream>
#include <string>
#include <vector>
-using namespace std;
-
int
main(int argc, char** argv)
{
return 1;
}
- string pcd_path;
+ std::string pcd_path;
bool use_pcd_file = pcl::console::find_switch(argc, argv, "-pcd");
if (use_pcd_file)
pcl::console::parse(argc, argv, "-pcd", pcd_path);
endif()
include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(ENSENSO
+find_package_handle_standard_args(Ensenso
FOUND_VAR ENSENSO_FOUND
REQUIRED_VARS ENSENSO_LIBRARIES ENSENSO_INCLUDE_DIRS
)
get_filename_component(_config_dir "${flann_CONFIG}" DIRECTORY)
get_filename_component(FLANN_ROOT "${_config_dir}/../../.." ABSOLUTE)
unset(_config_dir)
+ message(STATUS "Found flann version ${flann_VERSION}")
return()
endif()
#Is pcap found ?
include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(PCAP DEFAULT_MSG
+find_package_handle_standard_args(Pcap DEFAULT_MSG
PCAP_LIBRARIES PCAP_INCLUDE_DIRS)
mark_as_advanced(
set(LIBUSB_1_LIBRARIES ${LIBUSB_1_LIBRARY})
include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(LIBUSB_1 DEFAULT_MSG LIBUSB_1_LIBRARY LIBUSB_1_INCLUDE_DIR)
+ find_package_handle_standard_args(libusb-1.0 DEFAULT_MSG LIBUSB_1_LIBRARY LIBUSB_1_INCLUDE_DIR)
# show the LIBUSB_1_INCLUDE_DIRS and LIBUSB_1_LIBRARIES variables only in the advanced view
mark_as_advanced(LIBUSB_1_INCLUDE_DIRS LIBUSB_1_LIBRARIES)
@CPACK_NSIS_DEFINES@
!include Sections.nsh
+ !macroundef RemoveSection
;--- Component support macros: ---
; The code for the add/remove functionality is from:
endif()
set(Boost_ADDITIONAL_VERSIONS
- "1.71.0" "1.71" "1.70.0" "1.70"
+ "1.73.0" "1.73" "1.72.0" "1.72" "1.71.0" "1.71" "1.70.0" "1.70"
"1.69.0" "1.69" "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65"
"1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
"1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55")
endif()
set(CUDA_FIND_QUIETLY TRUE)
-find_package(CUDA 7.5)
+find_package(CUDA 9.0)
if(CUDA_FOUND)
message(STATUS "Found CUDA Toolkit v${CUDA_VERSION_STRING}")
# To add support of older GPU for kinfu, I would embed PTX 11 and 12 into so-file. GPU with sm_13 will run PTX 12 code (no difference for kinfu)
# Find a complete list for CUDA compute capabilities at http://developer.nvidia.com/cuda-gpus
-
- if(NOT ${CUDA_VERSION_STRING} VERSION_LESS "10.0")
+
+ # For a list showing CUDA toolkit version support for compute capabilities see: https://en.wikipedia.org/wiki/CUDA
+ # or the nvidia release notes ie:
+ # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-general-new-features
+ # or
+ # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
+
+ if(NOT ${CUDA_VERSION_STRING} VERSION_LESS "11.0")
+ set(__cuda_arch_bin "5.2 5.3 6.0 6.1 7.0 7.2 7.5")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "10.0")
set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0 7.2 7.5")
- elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "9.1")
- set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0 7.2")
elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "9.0")
- set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0")
- elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "8.0")
- set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2 5.3 6.0 6.1")
- else()
- set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2")
+ set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0 7.2")
endif()
set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
option(BUILD_tools "Useful PCL-based command line tools" ON)
option(WITH_DOCS "Build doxygen documentation" OFF)
+
+# set index size
+set(PCL_INDEX_SIZE -1 CACHE STRING "Set index size. Available options are: 8 16 32 64. A negative value indicates default size (32 for PCL >= 1.12, 8*sizeof(int) i.e., the number of bits in int, otherwise)")
+set_property(CACHE PCL_INDEX_SIZE PROPERTY STRINGS -1 8 16 32 64)
+
+# Set whether indices are signed or unsigned
+set(PCL_INDEX_SIGNED true CACHE BOOL "Set whether indices need to be signed or unsigned. Signed by default.")
+if (PCL_INDEX_SIGNED)
+ set(PCL_INDEX_SIGNED_STR "true")
+else()
+ set (PCL_INDEX_SIGNED_STR "false")
+endif()
+
+# Set whether gpu tests should be run
+# (Used to prevent gpu tests from executing in CI where GPU hardware is unavailable)
+option(PCL_DISABLE_GPU_TESTS "Disable running GPU tests. If disabled, tests will still be built." OFF)
+
if(SUBSYS_EXT_DEPS)
foreach(_dep ${SUBSYS_EXT_DEPS})
string(TOUPPER "${_dep}_found" EXT_DEP_FOUND)
- if(NOT ${EXT_DEP_FOUND} AND (NOT (${EXT_DEP_FOUND} STREQUAL "TRUE")))
+ if(NOT EXT_DEP_FOUND)
set(${_var} FALSE)
PCL_SET_SUBSYS_STATUS(${_name} FALSE "Requires external library ${_dep}.")
endif()
if(SUBSUBSYS_EXT_DEPS)
foreach(_dep ${SUBSUBSYS_EXT_DEPS})
string(TOUPPER "${_dep}_found" EXT_DEP_FOUND)
- if(NOT ${EXT_DEP_FOUND} AND (NOT ("${EXT_DEP_FOUND}" STREQUAL "TRUE")))
+ if(NOT EXT_DEP_FOUND)
set(${_var} FALSE)
PCL_SET_SUBSYS_STATUS(${_parent}_${_name} FALSE "Requires external library ${_dep}.")
endif()
include/pcl/pcl_macros.h
include/pcl/types.h
include/pcl/point_cloud.h
+ include/pcl/point_struct_traits.h
include/pcl/point_traits.h
include/pcl/type_traits.h
include/pcl/point_types_conversion.h
public:
CloudIterator (PointCloud<PointT>& cloud);
- CloudIterator (PointCloud<PointT>& cloud, const std::vector<int>& indices);
+ CloudIterator (PointCloud<PointT>& cloud, const Indices& indices);
CloudIterator (PointCloud<PointT>& cloud, const PointIndices& indices);
public:
ConstCloudIterator (const PointCloud<PointT>& cloud);
- ConstCloudIterator (const PointCloud<PointT>& cloud, const std::vector<int>& indices);
+ ConstCloudIterator (const PointCloud<PointT>& cloud, const Indices& indices);
ConstCloudIterator (const PointCloud<PointT>& cloud, const PointIndices& indices);
*/
template <typename PointT, typename Scalar> inline unsigned int
compute3DCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, 4, 1> ¢roid);
template <typename PointT> inline unsigned int
compute3DCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Vector4f ¢roid)
{
return (compute3DCentroid <PointT, float> (cloud, indices, centroid));
template <typename PointT> inline unsigned int
compute3DCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Vector4d ¢roid)
{
return (compute3DCentroid <PointT, double> (cloud, indices, centroid));
*/
template <typename PointT, typename Scalar> inline unsigned int
compute3DCentroid (const pcl::PointCloud<PointT> &cloud,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
Eigen::Matrix<Scalar, 4, 1> ¢roid);
template <typename PointT> inline unsigned int
compute3DCentroid (const pcl::PointCloud<PointT> &cloud,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
Eigen::Vector4f ¢roid)
{
return (compute3DCentroid <PointT, float> (cloud, indices, centroid));
template <typename PointT> inline unsigned int
compute3DCentroid (const pcl::PointCloud<PointT> &cloud,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
Eigen::Vector4d ¢roid)
{
return (compute3DCentroid <PointT, double> (cloud, indices, centroid));
*/
template <typename PointT, typename Scalar> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix);
template <typename PointT> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4f ¢roid,
Eigen::Matrix3f &covariance_matrix)
{
template <typename PointT> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4d ¢roid,
Eigen::Matrix3d &covariance_matrix)
{
*/
template <typename PointT, typename Scalar> inline unsigned int
computeCovarianceMatrixNormalized (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix);
template <typename PointT> inline unsigned int
computeCovarianceMatrixNormalized (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4f ¢roid,
Eigen::Matrix3f &covariance_matrix)
{
template <typename PointT> inline unsigned int
computeCovarianceMatrixNormalized (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4d ¢roid,
Eigen::Matrix3d &covariance_matrix)
{
*/
template <typename PointT, typename Scalar> inline unsigned int
computeMeanAndCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix,
Eigen::Matrix<Scalar, 4, 1> ¢roid);
template <typename PointT> inline unsigned int
computeMeanAndCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix3f &covariance_matrix,
Eigen::Vector4f ¢roid)
{
template <typename PointT> inline unsigned int
computeMeanAndCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix3d &covariance_matrix,
Eigen::Vector4d ¢roid)
{
*/
template <typename PointT, typename Scalar> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix);
template <typename PointT> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix3f &covariance_matrix)
{
return (computeCovarianceMatrix<PointT, float> (cloud, indices, covariance_matrix));
template <typename PointT> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix3d &covariance_matrix)
{
return (computeCovarianceMatrix<PointT, double> (cloud, indices, covariance_matrix));
*/
template <typename PointT, typename Scalar> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
pcl::PointCloud<PointT> &cloud_out);
template <typename PointT> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4f ¢roid,
pcl::PointCloud<PointT> &cloud_out)
{
template <typename PointT> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4d ¢roid,
pcl::PointCloud<PointT> &cloud_out)
{
*/
template <typename PointT, typename Scalar> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_out);
template <typename PointT> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4f ¢roid,
Eigen::MatrixXf &cloud_out)
{
template <typename PointT> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Vector4d ¢roid,
Eigen::MatrixXd &cloud_out)
{
*/
template <typename PointT, typename Scalar> inline void
computeNDCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> ¢roid);
template <typename PointT> inline void
computeNDCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::VectorXf ¢roid)
{
return (computeNDCentroid<PointT, float> (cloud, indices, centroid));
template <typename PointT> inline void
computeNDCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::VectorXd ¢roid)
{
return (computeNDCentroid<PointT, double> (cloud, indices, centroid));
*/
template <typename PointT, typename Scalar> inline void
computeNDCentroid (const pcl::PointCloud<PointT> &cloud,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> ¢roid);
template <typename PointT> inline void
computeNDCentroid (const pcl::PointCloud<PointT> &cloud,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
Eigen::VectorXf ¢roid)
{
return (computeNDCentroid<PointT, float> (cloud, indices, centroid));
template <typename PointT> inline void
computeNDCentroid (const pcl::PointCloud<PointT> &cloud,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
Eigen::VectorXd ¢roid)
{
return (computeNDCentroid<PointT, double> (cloud, indices, centroid));
* \ingroup common */
template <typename PointInT, typename PointOutT> std::size_t
computeCentroid (const pcl::PointCloud<PointInT>& cloud,
- const std::vector<int>& indices,
+ const Indices& indices,
PointOutT& centroid);
}
#pragma once
#include <pcl/pcl_base.h>
-#include <cfloat>
/**
* \file pcl/common/common.h
*/
template <typename PointT> inline void
getPointsInBox (const pcl::PointCloud<PointT> &cloud, Eigen::Vector4f &min_pt,
- Eigen::Vector4f &max_pt, std::vector<int> &indices);
+ Eigen::Vector4f &max_pt, Indices &indices);
/** \brief Get the point at maximum distance from a given point and a given pointcloud
* \param cloud the point cloud data message
* \ingroup common
*/
template<typename PointT> inline void
- getMaxDistance (const pcl::PointCloud<PointT> &cloud, const std::vector<int> &indices,
+ getMaxDistance (const pcl::PointCloud<PointT> &cloud, const Indices &indices,
const Eigen::Vector4f &pivot_pt, Eigen::Vector4f &max_pt);
/** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
* \ingroup common
*/
template <typename PointT> inline void
- getMinMax3D (const pcl::PointCloud<PointT> &cloud, const std::vector<int> &indices,
+ getMinMax3D (const pcl::PointCloud<PointT> &cloud, const Indices &indices,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
/** \brief Get the minimum and maximum values on each of the 3 (x-y-z) dimensions in a given pointcloud
* \ingroup common
*/
template <typename PointT> inline void
- getMinMax3D (const pcl::PointCloud<PointT> &cloud, const pcl::PointIndices &indices,
+ getMinMax3D (const pcl::PointCloud<PointT> &cloud, const pcl::PointIndices &indices,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
/** \brief Compute the radius of a circumscribed circle for a triangle formed of three points pa, pb, and pc
#include <limits>
-#include <pcl/common/common.h>
+#include <pcl/types.h>
+#include <pcl/point_types.h> // for PointXY
+#include <Eigen/Core> // for VectorXf
/**
* \file pcl/common/distances.h
/*@{*/
namespace pcl
{
+ template <typename PointT> class PointCloud;
+
/** \brief Get the shortest 3D segment between two 3D lines
* \param line_a the coefficients of the first line (point, direction)
* \param line_b the coefficients of the second line (point, direction)
const auto token = std::numeric_limits<std::size_t>::max();
std::size_t i_min = token, i_max = token;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- for (std::size_t j = i; j < cloud.points.size (); ++j)
+ for (std::size_t j = i; j < cloud.size (); ++j)
{
// Compute the distance
- double dist = (cloud.points[i].getVector4fMap () -
- cloud.points[j].getVector4fMap ()).squaredNorm ();
+ double dist = (cloud[i].getVector4fMap () -
+ cloud[j].getVector4fMap ()).squaredNorm ();
if (dist <= max_dist)
continue;
if (i_min == token || i_max == token)
return (max_dist = std::numeric_limits<double>::min ());
- pmin = cloud.points[i_min];
- pmax = cloud.points[i_max];
+ pmin = cloud[i_min];
+ pmax = cloud[i_max];
return (std::sqrt (max_dist));
}
* \ingroup common
*/
template <typename PointT> double inline
- getMaxSegment (const pcl::PointCloud<PointT> &cloud, const std::vector<int> &indices,
+ getMaxSegment (const pcl::PointCloud<PointT> &cloud, const Indices &indices,
PointT &pmin, PointT &pmax)
{
double max_dist = std::numeric_limits<double>::min ();
for (std::size_t j = i; j < indices.size (); ++j)
{
// Compute the distance
- double dist = (cloud.points[indices[i]].getVector4fMap () -
- cloud.points[indices[j]].getVector4fMap ()).squaredNorm ();
+ double dist = (cloud[indices[i]].getVector4fMap () -
+ cloud[indices[j]].getVector4fMap ()).squaredNorm ();
if (dist <= max_dist)
continue;
if (i_min == token || i_max == token)
return (max_dist = std::numeric_limits<double>::min ());
- pmin = cloud.points[indices[i_min]];
- pmax = cloud.points[indices[i_max]];
+ pmin = cloud[indices[i_min]];
+ pmax = cloud[indices[i_max]];
return (std::sqrt (max_dist));
}
#include <pcl/point_cloud.h>
#include <functional>
-#include <sstream>
namespace pcl
{
template <typename PointT, typename Scalar> inline unsigned int
compute3DCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, 4, 1> ¢roid)
{
if (indices.empty ())
template <typename PointT, typename Scalar> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix)
{
template <typename PointT, typename Scalar> inline unsigned int
computeCovarianceMatrixNormalized (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix)
{
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix)
{
- unsigned int point_count = pcl::computeCovarianceMatrix (cloud, indices.indices, centroid, covariance_matrix);
- if (point_count != 0)
- covariance_matrix /= static_cast<Scalar> (point_count);
-
- return point_count;
+ return computeCovarianceMatrixNormalized(cloud, indices.indices, centroid, covariance_matrix);
}
template <typename PointT, typename Scalar> inline unsigned int
computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix)
{
// create the buffer on the stack which is much faster than using cloud[indices[i]] and centroid as a buffer
template <typename PointT, typename Scalar> inline unsigned int
computeMeanAndCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, 3, 3> &covariance_matrix,
Eigen::Matrix<Scalar, 4, 1> ¢roid)
{
template <typename PointT, typename Scalar> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
pcl::PointCloud<PointT> &cloud_out)
{
cloud_out.header = cloud_in.header;
cloud_out.is_dense = cloud_in.is_dense;
- if (indices.size () == cloud_in.points.size ())
+ if (indices.size () == cloud_in.size ())
{
cloud_out.width = cloud_in.width;
cloud_out.height = cloud_in.height;
}
else
{
- cloud_out.width = static_cast<std::uint32_t> (indices.size ());
+ cloud_out.width = indices.size ();
cloud_out.height = 1;
}
cloud_out.resize (indices.size ());
cloud_out (1, i) = cloud_in[i].y - centroid[1];
cloud_out (2, i) = cloud_in[i].z - centroid[2];
// One column at a time
- //cloud_out.block<4, 1> (0, i) = cloud_in.points[i].getVector4fMap () - centroid;
+ //cloud_out.block<4, 1> (0, i) = cloud_in[i].getVector4fMap () - centroid;
}
// Make sure we zero the 4th dimension out (1 row, N columns)
template <typename PointT, typename Scalar> void
demeanPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
const Eigen::Matrix<Scalar, 4, 1> ¢roid,
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_out)
{
cloud_out (1, i) = cloud_in[indices[i]].y - centroid[1];
cloud_out (2, i) = cloud_in[indices[i]].z - centroid[2];
// One column at a time
- //cloud_out.block<4, 1> (0, i) = cloud_in.points[indices[i]].getVector4fMap () - centroid;
+ //cloud_out.block<4, 1> (0, i) = cloud_in[indices[i]].getVector4fMap () - centroid;
}
// Make sure we zero the 4th dimension out (1 row, N columns)
template <typename PointT, typename Scalar> inline void
computeNDCentroid (const pcl::PointCloud<PointT> &cloud,
- const std::vector<int> &indices,
+ const Indices &indices,
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> ¢roid)
{
using FieldList = typename pcl::traits::fieldList<PointT>::type;
template <typename PointInT, typename PointOutT> std::size_t
computeCentroid (const pcl::PointCloud<PointInT>& cloud,
- const std::vector<int>& indices,
+ const Indices& indices,
PointOutT& centroid)
{
pcl::CentroidPoint<PointInT> cp;
#include <pcl/point_types.h>
#include <pcl/common/common.h>
+#include <cfloat> // for FLT_MAX
//////////////////////////////////////////////////////////////////////////////////////////////
inline double
template <typename PointT> inline void
pcl::getPointsInBox (const pcl::PointCloud<PointT> &cloud,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt,
- std::vector<int> &indices)
+ Indices &indices)
{
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
int l = 0;
// If the data is dense, we don't need to check for NaN
if (cloud.is_dense)
{
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
// Check if the point is inside bounds
- if (cloud.points[i].x < min_pt[0] || cloud.points[i].y < min_pt[1] || cloud.points[i].z < min_pt[2])
+ if (cloud[i].x < min_pt[0] || cloud[i].y < min_pt[1] || cloud[i].z < min_pt[2])
continue;
- if (cloud.points[i].x > max_pt[0] || cloud.points[i].y > max_pt[1] || cloud.points[i].z > max_pt[2])
+ if (cloud[i].x > max_pt[0] || cloud[i].y > max_pt[1] || cloud[i].z > max_pt[2])
continue;
indices[l++] = int (i);
}
// NaN or Inf values could exist => check for them
else
{
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
// Check if the point is invalid
- if (!std::isfinite (cloud.points[i].x) ||
- !std::isfinite (cloud.points[i].y) ||
- !std::isfinite (cloud.points[i].z))
+ if (!std::isfinite (cloud[i].x) ||
+ !std::isfinite (cloud[i].y) ||
+ !std::isfinite (cloud[i].z))
continue;
// Check if the point is inside bounds
- if (cloud.points[i].x < min_pt[0] || cloud.points[i].y < min_pt[1] || cloud.points[i].z < min_pt[2])
+ if (cloud[i].x < min_pt[0] || cloud[i].y < min_pt[1] || cloud[i].z < min_pt[2])
continue;
- if (cloud.points[i].x > max_pt[0] || cloud.points[i].y > max_pt[1] || cloud.points[i].z > max_pt[2])
+ if (cloud[i].x > max_pt[0] || cloud[i].y > max_pt[1] || cloud[i].z > max_pt[2])
continue;
indices[l++] = int (i);
}
// If the data is dense, we don't need to check for NaN
if (cloud.is_dense)
{
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- pcl::Vector3fMapConst pt = cloud.points[i].getVector3fMap ();
+ pcl::Vector3fMapConst pt = cloud[i].getVector3fMap ();
dist = (pivot_pt3 - pt).norm ();
if (dist > max_dist)
{
// NaN or Inf values could exist => check for them
else
{
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
// Check if the point is invalid
- if (!std::isfinite (cloud.points[i].x) || !std::isfinite (cloud.points[i].y) || !std::isfinite (cloud.points[i].z))
+ if (!std::isfinite (cloud[i].x) || !std::isfinite (cloud[i].y) || !std::isfinite (cloud[i].z))
continue;
- pcl::Vector3fMapConst pt = cloud.points[i].getVector3fMap ();
+ pcl::Vector3fMapConst pt = cloud[i].getVector3fMap ();
dist = (pivot_pt3 - pt).norm ();
if (dist > max_dist)
{
}
if(max_idx != -1)
- max_pt = cloud.points[max_idx].getVector4fMap ();
+ max_pt = cloud[max_idx].getVector4fMap ();
else
max_pt = Eigen::Vector4f(std::numeric_limits<float>::quiet_NaN(),std::numeric_limits<float>::quiet_NaN(),std::numeric_limits<float>::quiet_NaN(),std::numeric_limits<float>::quiet_NaN());
}
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> inline void
-pcl::getMaxDistance (const pcl::PointCloud<PointT> &cloud, const std::vector<int> &indices,
+pcl::getMaxDistance (const pcl::PointCloud<PointT> &cloud, const Indices &indices,
const Eigen::Vector4f &pivot_pt, Eigen::Vector4f &max_pt)
{
float max_dist = -FLT_MAX;
{
for (std::size_t i = 0; i < indices.size (); ++i)
{
- pcl::Vector3fMapConst pt = cloud.points[indices[i]].getVector3fMap ();
+ pcl::Vector3fMapConst pt = cloud[indices[i]].getVector3fMap ();
dist = (pivot_pt3 - pt).norm ();
if (dist > max_dist)
{
for (std::size_t i = 0; i < indices.size (); ++i)
{
// Check if the point is invalid
- if (!std::isfinite (cloud.points[indices[i]].x) || !std::isfinite (cloud.points[indices[i]].y)
+ if (!std::isfinite (cloud[indices[i]].x) || !std::isfinite (cloud[indices[i]].y)
||
- !std::isfinite (cloud.points[indices[i]].z))
+ !std::isfinite (cloud[indices[i]].z))
continue;
- pcl::Vector3fMapConst pt = cloud.points[indices[i]].getVector3fMap ();
+ pcl::Vector3fMapConst pt = cloud[indices[i]].getVector3fMap ();
dist = (pivot_pt3 - pt).norm ();
if (dist > max_dist)
{
}
if(max_idx != -1)
- max_pt = cloud.points[indices[max_idx]].getVector4fMap ();
+ max_pt = cloud[indices[max_idx]].getVector4fMap ();
else
max_pt = Eigen::Vector4f(std::numeric_limits<float>::quiet_NaN(),std::numeric_limits<float>::quiet_NaN(),std::numeric_limits<float>::quiet_NaN(),std::numeric_limits<float>::quiet_NaN());
}
{
for (const int &index : indices.indices)
{
- pcl::Array4fMapConst pt = cloud.points[index].getArray4fMap ();
+ pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
min_p = min_p.min (pt);
max_p = max_p.max (pt);
}
for (const int &index : indices.indices)
{
// Check if the point is invalid
- if (!std::isfinite (cloud.points[index].x) ||
- !std::isfinite (cloud.points[index].y) ||
- !std::isfinite (cloud.points[index].z))
+ if (!std::isfinite (cloud[index].x) ||
+ !std::isfinite (cloud[index].y) ||
+ !std::isfinite (cloud[index].z))
continue;
- pcl::Array4fMapConst pt = cloud.points[index].getArray4fMap ();
+ pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
min_p = min_p.min (pt);
max_p = max_p.max (pt);
}
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> inline void
-pcl::getMinMax3D (const pcl::PointCloud<PointT> &cloud, const std::vector<int> &indices,
+pcl::getMinMax3D (const pcl::PointCloud<PointT> &cloud, const Indices &indices,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt)
{
min_pt.setConstant (FLT_MAX);
{
for (const int &index : indices)
{
- pcl::Array4fMapConst pt = cloud.points[index].getArray4fMap ();
+ pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
min_pt = min_pt.array ().min (pt);
max_pt = max_pt.array ().max (pt);
}
for (const int &index : indices)
{
// Check if the point is invalid
- if (!std::isfinite (cloud.points[index].x) ||
- !std::isfinite (cloud.points[index].y) ||
- !std::isfinite (cloud.points[index].z))
+ if (!std::isfinite (cloud[index].x) ||
+ !std::isfinite (cloud[index].y) ||
+ !std::isfinite (cloud[index].z))
continue;
- pcl::Array4fMapConst pt = cloud.points[index].getArray4fMap ();
+ pcl::Array4fMapConst pt = cloud[index].getArray4fMap ();
min_pt = min_pt.array ().min (pt);
max_pt = max_pt.array ().max (pt);
}
cloud_out.is_dense = cloud_in.is_dense;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
- cloud_out.points.resize (cloud_in.points.size ());
+ cloud_out.points.resize (cloud_in.size ());
if (cloud_in.points.empty ())
return;
if (isSamePointType<PointInT, PointOutT> ())
// Copy the whole memory block
- memcpy (&cloud_out.points[0], &cloud_in.points[0], cloud_in.points.size () * sizeof (PointInT));
+ memcpy (&cloud_out[0], &cloud_in[0], cloud_in.size () * sizeof (PointInT));
else
// Iterate over each point
- for (std::size_t i = 0; i < cloud_in.points.size (); ++i)
- copyPoint (cloud_in.points[i], cloud_out.points[i]);
+ for (std::size_t i = 0; i < cloud_in.size (); ++i)
+ copyPoint (cloud_in[i], cloud_out[i]);
}
template <typename PointT, typename IndicesVectorAllocator> void
copyPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int, IndicesVectorAllocator> &indices,
+ const IndicesAllocator< IndicesVectorAllocator> &indices,
pcl::PointCloud<PointT> &cloud_out)
{
// Do we want to copy everything?
- if (indices.size () == cloud_in.points.size ())
+ if (indices.size () == cloud_in.size ())
{
cloud_out = cloud_in;
return;
// Allocate enough space and copy the basics
cloud_out.points.resize (indices.size ());
cloud_out.header = cloud_in.header;
- cloud_out.width = static_cast<std::uint32_t>(indices.size ());
+ cloud_out.width = indices.size ();
cloud_out.height = 1;
cloud_out.is_dense = cloud_in.is_dense;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
// Iterate over each point
for (std::size_t i = 0; i < indices.size (); ++i)
- cloud_out.points[i] = cloud_in.points[indices[i]];
+ cloud_out[i] = cloud_in[indices[i]];
}
template <typename PointInT, typename PointOutT, typename IndicesVectorAllocator> void
copyPointCloud (const pcl::PointCloud<PointInT> &cloud_in,
- const std::vector<int, IndicesVectorAllocator> &indices,
+ const IndicesAllocator< IndicesVectorAllocator> &indices,
pcl::PointCloud<PointOutT> &cloud_out)
{
// Allocate enough space and copy the basics
cloud_out.points.resize (indices.size ());
cloud_out.header = cloud_in.header;
- cloud_out.width = std::uint32_t (indices.size ());
+ cloud_out.width = indices.size ();
cloud_out.height = 1;
cloud_out.is_dense = cloud_in.is_dense;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
// Iterate over each point
for (std::size_t i = 0; i < indices.size (); ++i)
- copyPoint (cloud_in.points[indices[i]], cloud_out.points[i]);
+ copyPoint (cloud_in[indices[i]], cloud_out[i]);
}
pcl::PointCloud<PointT> &cloud_out)
{
// Do we want to copy everything?
- if (indices.indices.size () == cloud_in.points.size ())
+ if (indices.indices.size () == cloud_in.size ())
{
cloud_out = cloud_in;
return;
// Iterate over each point
for (std::size_t i = 0; i < indices.indices.size (); ++i)
- cloud_out.points[i] = cloud_in.points[indices.indices[i]];
+ cloud_out[i] = cloud_in[indices.indices[i]];
}
nr_p += index.indices.size ();
// Do we want to copy everything? Remember we assume UNIQUE indices
- if (nr_p == cloud_in.points.size ())
+ if (nr_p == cloud_in.size ())
{
cloud_out = cloud_in;
return;
for (const auto &index : cluster_index.indices)
{
// Iterate over each dimension
- cloud_out.points[cp] = cloud_in.points[index];
+ cloud_out[cp] = cloud_in[index];
cp++;
}
}
[](const auto& acc, const auto& index) { return index.indices.size() + acc; });
// Do we want to copy everything? Remember we assume UNIQUE indices
- if (nr_p == cloud_in.points.size ())
+ if (nr_p == cloud_in.size ())
{
copyPointCloud (cloud_in, cloud_out);
return;
// Iterate over each idx
for (const auto &index : cluster_index.indices)
{
- copyPoint (cloud_in.points[index], cloud_out.points[cp]);
+ copyPoint (cloud_in[index], cloud_out[cp]);
++cp;
}
}
using FieldList1 = typename pcl::traits::fieldList<PointIn1T>::type;
using FieldList2 = typename pcl::traits::fieldList<PointIn2T>::type;
- if (cloud1_in.points.size () != cloud2_in.points.size ())
+ if (cloud1_in.size () != cloud2_in.size ())
{
PCL_ERROR ("[pcl::concatenateFields] The number of points in the two input datasets differs!\n");
return;
}
// Resize the output dataset
- cloud_out.points.resize (cloud1_in.points.size ());
+ cloud_out.points.resize (cloud1_in.size ());
cloud_out.header = cloud1_in.header;
cloud_out.width = cloud1_in.width;
cloud_out.height = cloud1_in.height;
cloud_out.is_dense = true;
// Iterate over each point
- for (std::size_t i = 0; i < cloud_out.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_out.size (); ++i)
{
// Iterate over each dimension
- pcl::for_each_type <FieldList1> (pcl::NdConcatenateFunctor <PointIn1T, PointOutT> (cloud1_in.points[i], cloud_out.points[i]));
- pcl::for_each_type <FieldList2> (pcl::NdConcatenateFunctor <PointIn2T, PointOutT> (cloud2_in.points[i], cloud_out.points[i]));
+ pcl::for_each_type <FieldList1> (pcl::NdConcatenateFunctor <PointIn1T, PointOutT> (cloud1_in[i], cloud_out[i]));
+ pcl::for_each_type <FieldList2> (pcl::NdConcatenateFunctor <PointIn2T, PointOutT> (cloud2_in[i], cloud_out[i]));
}
}
if (border_type == pcl::BORDER_TRANSPARENT)
{
- const PointT* in = &(cloud_in.points[0]);
- PointT* out = &(cloud_out.points[0]);
+ const PointT* in = &(cloud_in[0]);
+ PointT* out = &(cloud_out[0]);
PointT* out_inner = out + cloud_out.width*top + left;
for (std::uint32_t i = 0; i < cloud_in.height; i++, out_inner += cloud_out.width, in += cloud_in.width)
{
for (int i = 0; i < right; i++)
padding[i+left] = pcl::interpolatePointIndex (cloud_in.width+i, cloud_in.width, border_type);
- const PointT* in = &(cloud_in.points[0]);
- PointT* out = &(cloud_out.points[0]);
+ const PointT* in = &(cloud_in[0]);
+ PointT* out = &(cloud_out[0]);
PointT* out_inner = out + cloud_out.width*top + left;
for (std::uint32_t i = 0; i < cloud_in.height; i++, out_inner += cloud_out.width, in += cloud_in.width)
int bottom = cloud_out.height - cloud_in.height - top;
std::vector<PointT> buff (cloud_out.width, value);
PointT* buff_ptr = &(buff[0]);
- const PointT* in = &(cloud_in.points[0]);
- PointT* out = &(cloud_out.points[0]);
+ const PointT* in = &(cloud_in[0]);
+ PointT* out = &(cloud_out[0]);
PointT* out_inner = out + cloud_out.width*top + left;
for (std::uint32_t i = 0; i < cloud_in.height; i++, out_inner += cloud_out.width, in += cloud_in.width)
#pragma once
#include <pcl/common/projection_matrix.h>
+#include <pcl/console/print.h> // for PCL_ERROR
#include <pcl/cloud_iterator.h>
+#include <Eigen/Eigenvalues> // for SelfAdjointEigenSolver
namespace pcl
{
estimateProjectionMatrix (
typename pcl::PointCloud<PointT>::ConstPtr cloud,
Eigen::Matrix<float, 3, 4, Eigen::RowMajor>& projection_matrix,
- const std::vector<int>& indices)
+ const Indices& indices)
{
// internally we calculate with double but store the result into float matrices.
using Scalar = double;
cloud_out.is_dense = cloud_in.is_dense;
cloud_out.width = cloud_in.width;
cloud_out.height = cloud_in.height;
- cloud_out.points.reserve (cloud_in.points.size ());
+ cloud_out.points.reserve (cloud_in.size ());
if (copy_all_fields)
cloud_out.points.assign (cloud_in.points.begin (), cloud_in.points.end ());
else
- cloud_out.points.resize (cloud_in.points.size ());
+ cloud_out.points.resize (cloud_in.size ());
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
}
if (cloud_in.is_dense)
{
// If the dataset is dense, simply transform it!
- for (std::size_t i = 0; i < cloud_out.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_out.size (); ++i)
tf.se3 (cloud_in[i].data, cloud_out[i].data);
}
else
{
// Dataset might contain NaNs and Infs, so check for them first,
// otherwise we get errors during the multiplication (?)
- for (std::size_t i = 0; i < cloud_out.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_out.size (); ++i)
{
- if (!std::isfinite (cloud_in.points[i].x) ||
- !std::isfinite (cloud_in.points[i].y) ||
- !std::isfinite (cloud_in.points[i].z))
+ if (!std::isfinite (cloud_in[i].x) ||
+ !std::isfinite (cloud_in[i].y) ||
+ !std::isfinite (cloud_in[i].z))
continue;
tf.se3 (cloud_in[i].data, cloud_out[i].data);
}
template <typename PointT, typename Scalar> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields)
{
// Copy fields first, then transform xyz data
if (copy_all_fields)
- cloud_out.points[i] = cloud_in.points[indices[i]];
+ cloud_out[i] = cloud_in[indices[i]];
tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
}
}
for (std::size_t i = 0; i < npts; ++i)
{
if (copy_all_fields)
- cloud_out.points[i] = cloud_in.points[indices[i]];
- if (!std::isfinite (cloud_in.points[indices[i]].x) ||
- !std::isfinite (cloud_in.points[indices[i]].y) ||
- !std::isfinite (cloud_in.points[indices[i]].z))
+ cloud_out[i] = cloud_in[indices[i]];
+ if (!std::isfinite (cloud_in[indices[i]].x) ||
+ !std::isfinite (cloud_in[indices[i]].y) ||
+ !std::isfinite (cloud_in[indices[i]].z))
continue;
tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
}
cloud_out.width = cloud_in.width;
cloud_out.height = cloud_in.height;
cloud_out.is_dense = cloud_in.is_dense;
- cloud_out.points.reserve (cloud_out.points.size ());
+ cloud_out.points.reserve (cloud_out.size ());
if (copy_all_fields)
cloud_out.points.assign (cloud_in.points.begin (), cloud_in.points.end ());
else
- cloud_out.points.resize (cloud_in.points.size ());
+ cloud_out.points.resize (cloud_in.size ());
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
}
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
- for (std::size_t i = 0; i < cloud_out.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_out.size (); ++i)
{
tf.se3 (cloud_in[i].data, cloud_out[i].data);
tf.so3 (cloud_in[i].data_n, cloud_out[i].data_n);
// Dataset might contain NaNs and Infs, so check for them first.
else
{
- for (std::size_t i = 0; i < cloud_out.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_out.size (); ++i)
{
- if (!std::isfinite (cloud_in.points[i].x) ||
- !std::isfinite (cloud_in.points[i].y) ||
- !std::isfinite (cloud_in.points[i].z))
+ if (!std::isfinite (cloud_in[i].x) ||
+ !std::isfinite (cloud_in[i].y) ||
+ !std::isfinite (cloud_in[i].z))
continue;
tf.se3 (cloud_in[i].data, cloud_out[i].data);
tf.so3 (cloud_in[i].data_n, cloud_out[i].data_n);
template <typename PointT, typename Scalar> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields)
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
- for (std::size_t i = 0; i < cloud_out.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_out.size (); ++i)
{
// Copy fields first, then transform
if (copy_all_fields)
- cloud_out.points[i] = cloud_in.points[indices[i]];
+ cloud_out[i] = cloud_in[indices[i]];
tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
tf.so3 (cloud_in[indices[i]].data_n, cloud_out[i].data_n);
}
// Dataset might contain NaNs and Infs, so check for them first.
else
{
- for (std::size_t i = 0; i < cloud_out.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_out.size (); ++i)
{
// Copy fields first, then transform
if (copy_all_fields)
- cloud_out.points[i] = cloud_in.points[indices[i]];
+ cloud_out[i] = cloud_in[indices[i]];
- if (!std::isfinite (cloud_in.points[indices[i]].x) ||
- !std::isfinite (cloud_in.points[indices[i]].y) ||
- !std::isfinite (cloud_in.points[indices[i]].z))
+ if (!std::isfinite (cloud_in[indices[i]].x) ||
+ !std::isfinite (cloud_in[indices[i]].y) ||
+ !std::isfinite (cloud_in[indices[i]].z))
continue;
tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
*/
PCL_EXPORTS void
copyPointCloud (const pcl::PCLPointCloud2 &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PCLPointCloud2 &cloud_out);
/** \brief Extract the indices of a given point cloud as a new point cloud
*/
PCL_EXPORTS void
copyPointCloud (const pcl::PCLPointCloud2 &cloud_in,
- const std::vector<int, Eigen::aligned_allocator<int> > &indices,
+ const IndicesAllocator< Eigen::aligned_allocator<int> > &indices,
pcl::PCLPointCloud2 &cloud_out);
/** \brief Copy fields and point cloud data from \a cloud_in to \a cloud_out
*/
template <typename PointT, typename IndicesVectorAllocator = std::allocator<int>> void
copyPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int, IndicesVectorAllocator> &indices,
+ const IndicesAllocator< IndicesVectorAllocator> &indices,
pcl::PointCloud<PointT> &cloud_out);
/** \brief Extract the indices of a given point cloud as a new point cloud
*/
template <typename PointInT, typename PointOutT, typename IndicesVectorAllocator = std::allocator<int>> void
copyPointCloud (const pcl::PointCloud<PointInT> &cloud_in,
- const std::vector<int, IndicesVectorAllocator> &indices,
+ const IndicesAllocator<IndicesVectorAllocator> &indices,
pcl::PointCloud<PointOutT> &cloud_out);
/** \brief Extract the indices of a given point cloud as a new point cloud
#pragma once
+#include <pcl/correspondence.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/correspondence.h>
+#include <pcl/types.h>
namespace pcl
{
{
Eigen::Affine3f transformation = Eigen::Affine3f::Identity (); //!< The estimated transformation between the two coordinate systems
float score = 0; //!< An estimate in [0,1], how good the estimated pose is
- std::vector<int> correspondence_indices; //!< The indices of the used correspondences
+ Indices correspondence_indices; //!< The indices of the used correspondences
struct IsBetter
{
#pragma once
-#include <pcl/common/eigen.h>
-#include <pcl/console/print.h>
+#include <pcl/types.h>
+#include <pcl/point_cloud.h> // for PointCloud
+#include <Eigen/Core> // for Matrix, RowMajor, Matrix3f
/**
* \file common/geometry.h
/*@{*/
namespace pcl
{
- template <typename T> class PointCloud;
-
/** \brief Estimates the projection matrix P = K * (R|-R*t) from organized point clouds, with
* K = [[fx, s, cx], [0, fy, cy], [0, 0, 1]]
* R = rotation matrix and
* \return the resudial error. A high residual indicates, that the point cloud was not from a projective device.
*/
template<typename PointT> double
- estimateProjectionMatrix (typename pcl::PointCloud<PointT>::ConstPtr cloud, Eigen::Matrix<float, 3, 4, Eigen::RowMajor>& projection_matrix, const std::vector<int>& indices = std::vector<int> ());
+ estimateProjectionMatrix (typename pcl::PointCloud<PointT>::ConstPtr cloud, Eigen::Matrix<float, 3, 4, Eigen::RowMajor>& projection_matrix, const Indices& indices = {});
/** \brief Determines the camera matrix from the given projection matrix.
* \note This method does NOT use a RQ decomposition, but uses the fact that the left 3x3 matrix P' of P squared eliminates the rotational part.
*/
template <typename PointT, typename Scalar> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields = true);
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
*/
template <typename PointT, typename Scalar> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields = true)
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
*/
template <typename PointT, typename Scalar> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields = true);
template <typename PointT> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
*/
template <typename PointT, typename Scalar> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform,
bool copy_all_fields = true)
template <typename PointT> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Affine3f &transform,
bool copy_all_fields = true)
*/
template <typename PointT, typename Scalar> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields = true)
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix4f &transform,
bool copy_all_fields = true)
*/
template <typename PointT, typename Scalar> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields = true)
template <typename PointT> void
transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix4f &transform,
bool copy_all_fields = true)
*/
template <typename PointT, typename Scalar> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields = true)
template <typename PointT> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix4f &transform,
bool copy_all_fields = true)
*/
template <typename PointT, typename Scalar> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix<Scalar, 4, 4> &transform,
bool copy_all_fields = true)
template <typename PointT> void
transformPointCloudWithNormals (const pcl::PointCloud<PointT> &cloud_in,
- const pcl::PointIndices &indices,
+ const pcl::PointIndices &indices,
pcl::PointCloud<PointT> &cloud_out,
const Eigen::Matrix4f &transform,
bool copy_all_fields = true)
}
/** \brief Utility function to eliminate unused variable warnings. */
- template<typename T> void
- ignore(const T&)
+ template<typename ...T> void
+ ignore(const T&...)
{
}
} // namespace utils
// Copy point data
std::uint32_t num_points = msg.width * msg.height;
cloud.points.resize (num_points);
- std::uint8_t* cloud_data = reinterpret_cast<std::uint8_t*>(&cloud.points[0]);
+ std::uint8_t* cloud_data = reinterpret_cast<std::uint8_t*>(&cloud[0]);
// Check if we can copy adjacent points in a single memcpy. We can do so if there
// is exactly one field to copy and it is the same size as the source and destination
// Ease the user's burden on specifying width/height for unorganized datasets
if (cloud.width == 0 && cloud.height == 0)
{
- msg.width = static_cast<std::uint32_t>(cloud.points.size ());
+ msg.width = cloud.size ();
msg.height = 1;
}
else
{
- assert (cloud.points.size () == cloud.width * cloud.height);
+ assert (cloud.size () == cloud.width * cloud.height);
msg.height = cloud.height;
msg.width = cloud.width;
}
// Fill point cloud binary data (padding and all)
- std::size_t data_size = sizeof (PointT) * cloud.points.size ();
+ std::size_t data_size = sizeof (PointT) * cloud.size ();
msg.data.resize (data_size);
if (data_size)
{
- memcpy(&msg.data[0], &cloud.points[0], data_size);
+ memcpy(&msg.data[0], &cloud[0], data_size);
}
// Fill fields metadata
throw std::runtime_error("Needs to be a dense like cloud!!");
else
{
- if (cloud.points.size () != cloud.width * cloud.height)
+ if (cloud.size () != cloud.width * cloud.height)
throw std::runtime_error("The width and height do not match the cloud size!");
msg.height = cloud.height;
msg.width = cloud.width;
#pragma GCC system_header
#endif
+#include <pcl/pcl_base.h>
#include <pcl/memory.h>
+#include <pcl/types.h>
#include <Eigen/StdVector>
#include <Eigen/Geometry>
#include <pcl/pcl_exports.h>
struct Correspondence
{
/** \brief Index of the query (source) point. */
- int index_query = 0;
+ index_t index_query = 0;
/** \brief Index of the matching (target) point. Set to -1 if no correspondence found. */
- int index_match = -1;
+ index_t index_match = UNAVAILABLE;
/** \brief Distance between the corresponding points, or the weight denoting the confidence in correspondence estimation */
union
{
inline Correspondence () = default;
/** \brief Constructor. */
- inline Correspondence (int _index_query, int _index_match, float _distance) :
+ inline Correspondence (index_t _index_query, index_t _index_match, float _distance) :
index_query (_index_query), index_match (_index_match), distance (_distance)
{}
void
getRejectedQueryIndices (const pcl::Correspondences &correspondences_before,
const pcl::Correspondences &correspondences_after,
- std::vector<int>& indices,
+ Indices& indices,
bool presorting_required = true);
/**
class IteratorIdx : public CloudIterator<PointT>::Iterator
{
public:
- IteratorIdx (PointCloud<PointT>& cloud, const std::vector<int>& indices)
+ IteratorIdx (PointCloud<PointT>& cloud, const Indices& indices)
: cloud_ (cloud)
, indices_ (indices)
, iterator_ (indices_.begin ())
private:
PointCloud<PointT>& cloud_;
- std::vector<int> indices_;
- std::vector<int>::iterator iterator_;
+ Indices indices_;
+ Indices::iterator iterator_;
};
/** \brief
{
public:
ConstIteratorIdx (const PointCloud<PointT>& cloud,
- const std::vector<int>& indices)
+ const Indices& indices)
: cloud_ (cloud)
, indices_ (indices)
, iterator_ (indices_.begin ())
const PointT& operator* () const override
{
- return (cloud_.points[*iterator_]);
+ return (cloud_[*iterator_]);
}
const PointT* operator-> () const override
private:
const PointCloud<PointT>& cloud_;
- std::vector<int> indices_;
- std::vector<int>::iterator iterator_;
+ Indices indices_;
+ Indices::iterator iterator_;
};
} // namespace pcl
//////////////////////////////////////////////////////////////////////////////
template <class PointT>
pcl::CloudIterator<PointT>::CloudIterator (
- PointCloud<PointT>& cloud, const std::vector<int>& indices)
+ PointCloud<PointT>& cloud, const Indices& indices)
: iterator_ (new IteratorIdx<PointT> (cloud, indices))
{
}
pcl::CloudIterator<PointT>::CloudIterator (
PointCloud<PointT>& cloud, const Correspondences& corrs, bool source)
{
- std::vector<int> indices;
+ Indices indices;
indices.reserve (corrs.size ());
if (source)
{
//////////////////////////////////////////////////////////////////////////////
template <class PointT>
pcl::ConstCloudIterator<PointT>::ConstCloudIterator (
- const PointCloud<PointT>& cloud, const std::vector<int>& indices)
+ const PointCloud<PointT>& cloud, const Indices& indices)
: iterator_ (new typename pcl::ConstCloudIterator<PointT>::ConstIteratorIdx (cloud, indices))
{
}
pcl::ConstCloudIterator<PointT>::ConstCloudIterator (
const PointCloud<PointT>& cloud, const Correspondences& corrs, bool source)
{
- std::vector<int> indices;
+ Indices indices;
indices.reserve (corrs.size ());
if (source)
{
template <typename PointT> void
pcl::PCLBase<PointT>::setIndices (const IndicesConstPtr &indices)
{
- indices_.reset (new std::vector<int> (*indices));
+ indices_.reset (new Indices (*indices));
fake_indices_ = false;
use_indices_ = true;
}
template <typename PointT> void
pcl::PCLBase<PointT>::setIndices (const PointIndicesConstPtr &indices)
{
- indices_.reset (new std::vector<int> (indices->indices));
+ indices_.reset (new Indices (indices->indices));
fake_indices_ = false;
use_indices_ = true;
}
return;
}
- indices_.reset (new std::vector<int>);
+ indices_.reset (new Indices);
indices_->reserve (nb_cols * nb_rows);
for(std::size_t i = row_start; i < row_end; i++)
for(std::size_t j = col_start; j < col_end; j++)
if (!indices_)
{
fake_indices_ = true;
- indices_.reset (new std::vector<int>);
+ indices_.reset (new Indices);
}
// If we have a set of fake indices, but they do not match the number of points in the cloud, update them
- if (fake_indices_ && indices_->size () != input_->points.size ())
+ if (fake_indices_ && indices_->size () != input_->size ())
{
const auto indices_size = indices_->size ();
try
{
- indices_->resize (input_->points.size ());
+ indices_->resize (input_->size ());
}
catch (const std::bad_alloc&)
{
- PCL_ERROR ("[initCompute] Failed to allocate %lu indices.\n", input_->points.size ());
+ PCL_ERROR ("[initCompute] Failed to allocate %lu indices.\n", input_->size ());
}
for (auto i = indices_size; i < indices_->size (); ++i) { (*indices_)[i] = static_cast<int>(i); }
}
using IndicesPtr = shared_ptr<Indices>;
using IndicesConstPtr = shared_ptr<const Indices>;
+ //Used to denote that a value has not been set for an index_t variable
+ static constexpr index_t UNAVAILABLE = static_cast<index_t>(-1);
+
/////////////////////////////////////////////////////////////////////////////////////////
/** \brief PCL base class. Implements methods that are used by most PCL algorithms.
* \ingroup common
setIndices (std::size_t row_start, std::size_t col_start, std::size_t nb_rows, std::size_t nb_cols);
/** \brief Get a pointer to the vector of indices used. */
- inline IndicesPtr const
+ inline IndicesPtr
getIndices () { return (indices_); }
/** \brief Get a pointer to the vector of indices used. */
/** \brief Override PointCloud operator[] to shorten code
* \note this method can be called instead of (*input_)[(*indices_)[pos]]
- * or input_->points[(*indices_)[pos]]
+ * or (*input_)[(*indices_)[pos]]
* \param[in] pos position in indices_ vector
*/
inline const PointT& operator[] (std::size_t pos) const
std::vector<int> field_sizes_;
/** \brief The x-y-z fields indices. */
- int x_idx_, y_idx_, z_idx_;
+ index_t x_idx_, y_idx_, z_idx_;
/** \brief The desired x-y-z field names. */
std::string x_field_name_, y_field_name_, z_field_name_;
// Header 'pcl_macros' is not suitable since it includes <Eigen/Core>,
// which can't be eaten by nvcc (it's too weak)
-#if defined WIN32 || defined _WIN32 || defined WINCE || defined __MINGW32__
+#if defined _WIN32 || defined WINCE || defined __MINGW32__
#ifdef PCLAPI_EXPORTS
#define PCL_EXPORTS __declspec(dllexport)
#else
#define _USE_MATH_DEFINES
#endif
#include <cmath>
-#include <cstdarg>
-#include <cstdio>
#include <cstdlib>
-#include <cstdint>
#include <iostream>
// We need to check for GCC version, because GCC releases before 9 were implementing an
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/type_traits.h>
+#include <pcl/types.h>
#include <algorithm>
#include <utility>
* \param[in] indices the subset to copy
*/
PointCloud (const PointCloud<PointT> &pc,
- const std::vector<int> &indices) :
+ const Indices &indices) :
header (pc.header), points (indices.size ()), width (indices.size ()), height (1), is_dense (pc.is_dense),
sensor_origin_ (pc.sensor_origin_), sensor_orientation_ (pc.sensor_orientation_)
{
// Copy the obvious
assert (indices.size () <= pc.size ());
for (std::size_t i = 0; i < indices.size (); i++)
- points[i] = pc.points[indices[i]];
+ points[i] = pc[indices[i]];
}
/** \brief Allocate constructor from point cloud subset
// This causes a drastic performance hit. Prefer not to use reserve with libstdc++ (default on clang)
cloud1.points.insert (cloud1.points.end (), cloud2.points.begin (), cloud2.points.end ());
- cloud1.width = static_cast<std::uint32_t>(cloud1.points.size ());
+ cloud1.width = cloud1.size ();
cloud1.height = 1;
cloud1.is_dense = cloud1.is_dense && cloud2.is_dense;
return true;
getMatrixXfMap (int dim, int stride, int offset)
{
if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
- return (Eigen::Map<Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(&points[0])+offset, points.size (), dim, Eigen::OuterStride<> (stride)));
+ return (Eigen::Map<Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(&points[0])+offset, size (), dim, Eigen::OuterStride<> (stride)));
else
- return (Eigen::Map<Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(&points[0])+offset, dim, points.size (), Eigen::OuterStride<> (stride)));
+ return (Eigen::Map<Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(&points[0])+offset, dim, size (), Eigen::OuterStride<> (stride)));
}
/** \brief Return an Eigen MatrixXf (assumes float values) mapped to the specified dimensions of the PointCloud.
getMatrixXfMap (int dim, int stride, int offset) const
{
if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
- return (Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(const_cast<PointT*>(&points[0]))+offset, points.size (), dim, Eigen::OuterStride<> (stride)));
+ return (Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(const_cast<PointT*>(&points[0]))+offset, size (), dim, Eigen::OuterStride<> (stride)));
else
- return (Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(const_cast<PointT*>(&points[0]))+offset, dim, points.size (), Eigen::OuterStride<> (stride)));
+ return (Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(const_cast<PointT*>(&points[0]))+offset, dim, size (), Eigen::OuterStride<> (stride)));
}
/**
// iterators
using iterator = typename VectorType::iterator;
using const_iterator = typename VectorType::const_iterator;
- inline iterator begin () { return (points.begin ()); }
- inline iterator end () { return (points.end ()); }
- inline const_iterator begin () const { return (points.begin ()); }
- inline const_iterator end () const { return (points.end ()); }
+ using reverse_iterator = typename VectorType::reverse_iterator;
+ using const_reverse_iterator = typename VectorType::const_reverse_iterator;
+ inline iterator begin () noexcept { return (points.begin ()); }
+ inline iterator end () noexcept { return (points.end ()); }
+ inline const_iterator begin () const noexcept { return (points.begin ()); }
+ inline const_iterator end () const noexcept { return (points.end ()); }
+ inline const_iterator cbegin () const noexcept { return (points.cbegin ()); }
+ inline const_iterator cend () const noexcept { return (points.cend ()); }
+ inline reverse_iterator rbegin () noexcept { return (points.rbegin ()); }
+ inline reverse_iterator rend () noexcept { return (points.rend ()); }
+ inline const_reverse_iterator rbegin () const noexcept { return (points.rbegin ()); }
+ inline const_reverse_iterator rend () const noexcept { return (points.rend ()); }
+ inline const_reverse_iterator crbegin () const noexcept { return (points.crbegin ()); }
+ inline const_reverse_iterator crend () const noexcept { return (points.crend ()); }
//capacity
- inline std::size_t size () const { return (points.size ()); }
+ inline std::size_t size () const { return points.size (); }
+ index_t max_size() const noexcept { return static_cast<index_t>(points.max_size()); }
inline void reserve (std::size_t n) { points.reserve (n); }
inline bool empty () const { return points.empty (); }
+ PointT* data() noexcept { return points.data(); }
+ const PointT* data() const noexcept { return points.data(); }
- /** \brief Resize the cloud
- * \param[in] n the new cloud size
- */
- inline void resize (std::size_t n)
+ /**
+ * \brief Resizes the container to contain `count` elements
+ * \details
+ * * If the current size is greater than `count`, the pointcloud is reduced to its
+ * first `count` elements
+ * * If the current size is less than `count`, additional default-inserted points
+ * are appended
+ * \note This potentially breaks the organized structure of the cloud by setting
+ * the height to 1 IFF `width * height != count`!
+ * \param[in] count new size of the point cloud
+ */
+ inline void
+ resize(std::size_t count)
{
- points.resize (n);
- if (width * height != n)
- {
- width = static_cast<std::uint32_t> (n);
+ points.resize(count);
+ if (width * height != count) {
+ width = static_cast<std::uint32_t>(count);
+ height = 1;
+ }
+ }
+
+ /**
+ * \brief Resizes the container to contain count elements
+ * \details
+ * * If the current size is greater than `count`, the pointcloud is reduced to its
+ * first `count` elements
+ * * If the current size is less than `count`, additional copies of `value` are
+ * appended
+ * \note This potentially breaks the organized structure of the cloud by setting
+ * the height to 1 IFF `width * height != count`!
+ * \param[in] count new size of the point cloud
+ * \param[in] value the value to initialize the new points with
+ */
+ void
+ resize(index_t count, const PointT& value)
+ {
+ points.resize(count, value);
+ if (width * height != count) {
+ width = count;
height = 1;
}
}
inline const PointT& back () const { return (points.back ()); }
inline PointT& back () { return (points.back ()); }
+ /**
+ * \brief Replaces the points with `count` copies of `value`
+ * \note This breaks the organized structure of the cloud by setting the height to
+ * 1!
+ */
+ void
+ assign(index_t count, const PointT& value)
+ {
+ points.assign(count, value);
+ width = static_cast<std::uint32_t>(size());
+ height = 1;
+ }
+
+ /**
+ * \brief Replaces the points with copies of those in the range `[first, last)`
+ * \details The behavior is undefined if either argument is an iterator into
+ * `*this`
+ * \note This breaks the organized structure of the cloud by setting the height to
+ * 1!
+ */
+ template <class InputIt>
+ void
+ assign(InputIt first, InputIt last)
+ {
+ points.assign(std::move(first), std::move(last));
+ width = static_cast<std::uint32_t>(size());
+ height = 1;
+ }
+
+ /**
+ * \brief Replaces the points with the elements from the initializer list `ilist`
+ * \note This breaks the organized structure of the cloud by setting the height to
+ * 1!
+ */
+ void
+ assign(std::initializer_list<PointT> ilist)
+ {
+ points.assign(std::move(ilist));
+ width = static_cast<std::uint32_t>(size());
+ height = 1;
+ }
+
/** \brief Insert a new point in the cloud, at the end of the container.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] pt the point to insert
push_back (const PointT& pt)
{
points.push_back (pt);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
}
emplace_back (Args&& ...args)
{
points.emplace_back (std::forward<Args> (args)...);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
return points.back();
}
insert (iterator position, const PointT& pt)
{
iterator it = points.insert (position, pt);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
return (it);
}
insert (iterator position, std::size_t n, const PointT& pt)
{
points.insert (position, n, pt);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
}
insert (iterator position, InputIterator first, InputIterator last)
{
points.insert (position, first, last);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
}
emplace (iterator position, Args&& ...args)
{
iterator it = points.emplace (position, std::forward<Args> (args)...);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
return (it);
}
erase (iterator position)
{
iterator it = points.erase (position);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
return (it);
}
erase (iterator first, iterator last)
{
iterator it = points.erase (first, last);
- width = static_cast<std::uint32_t> (points.size ());
+ width = size ();
height = 1;
return (it);
}
operator << (std::ostream& s, const pcl::PointCloud<PointT> &p)
{
s << "header: " << p.header << std::endl;
- s << "points[]: " << p.points.size () << std::endl;
+ s << "points[]: " << p.size () << std::endl;
s << "width: " << p.width << std::endl;
s << "height: " << p.height << std::endl;
s << "is_dense: " << p.is_dense << std::endl;
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010-2012, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+// https://bugreports.qt-project.org/browse/QTBUG-22829
+#ifndef Q_MOC_RUN
+#include <boost/mpl/assert.hpp> // for BOOST_MPL_ASSERT_MSG
+#include <boost/mpl/identity.hpp> // for boost::mpl::identity
+
+#include <boost/mpl/vector.hpp> // for boost::mpl::vector
+#include <boost/preprocessor/seq/enum.hpp> // for BOOST_PP_SEQ_ENUM
+#include <boost/preprocessor/tuple/elem.hpp> // for BOOST_PP_TUPLE_ELEM
+#include <boost/preprocessor/stringize.hpp> // for BOOST_PP_STRINGIZE
+#endif
+
+// This is required for the workaround at line 84
+#ifdef _MSC_VER
+#include <Eigen/Core>
+#include <Eigen/src/StlSupport/details.h>
+#endif
+
+#include <cstddef> // for std::size_t, offsetof
+#include <cstdint> // for std::int8_t, std::uint8_t, std::int16_t, std::uint16_t, std::int32_t, std::uint32_t
+#include <type_traits> // for std::is_same, std::remove_all_extents_t
+
+namespace pcl
+{
+namespace traits
+{
+
+// forward declaration
+template<typename T> struct asEnum;
+
+// Metafunction to decompose a type (possibly of array of any number of dimensions) into
+// its scalar type and total number of elements.
+template<typename T> struct decomposeArray
+{
+ using type = std::remove_all_extents_t<T>;
+ static const std::uint32_t value = sizeof (T) / sizeof (type);
+};
+
+// For non-POD point types, this is specialized to return the corresponding POD type.
+template<typename PointT>
+struct POD
+{
+ using type = PointT;
+};
+
+#ifdef _MSC_VER
+/* Sometimes when calling functions like `copyPoint()` or `copyPointCloud`
+ * without explicitly specifying point types, MSVC deduces them to be e.g.
+ * `Eigen::internal::workaround_msvc_stl_support<pcl::PointXYZ>` instead of
+ * plain `pcl::PointXYZ`. Subsequently these types are passed to meta-
+ * functions like `has_field` or `fieldList` and make them choke. This hack
+ * makes use of the fact that internally `fieldList` always applies `POD` to
+ * its argument type. This specialization therefore allows to unwrap the
+ * contained point type. */
+template<typename PointT>
+struct POD<Eigen::internal::workaround_msvc_stl_support<PointT> >
+{
+ using type = PointT;
+};
+#endif
+
+// name
+/* This really only depends on Tag, but we go through some gymnastics to avoid ODR violations.
+ We template it on the point type PointT to avoid ODR violations when registering multiple
+ point types with shared tags.
+ The dummy parameter is so we can partially specialize name on PointT and Tag but leave it
+ templated on dummy. Each specialization declares a static char array containing the tag
+ name. The definition of the static member would conflict when linking multiple translation
+ units that include the point type registration. But when the static member definition is
+ templated (on dummy), we sidestep the ODR issue.
+*/
+template<class PointT, typename Tag, int dummy = 0>
+struct name /** \cond NO_WARN_RECURSIVE */ : name<typename POD<PointT>::type, Tag, dummy> /** \endcond */
+{ /** \cond NO_WARN_RECURSIVE */
+ // Contents of specialization:
+ // static const char value[];
+
+ // Avoid infinite compile-time recursion
+ BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
+ POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
+};
+} // namespace traits
+} // namespace pcl
+
+#define POINT_CLOUD_REGISTER_FIELD_NAME(r, point, elem) \
+ template<int dummy> \
+ struct name<point, pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem), dummy> \
+ { /** \endcond */ \
+ static const char value[]; \
+ }; \
+ \
+ template<int dummy> \
+ const char name<point, \
+ pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem), \
+ dummy>::value[] = \
+ BOOST_PP_STRINGIZE(BOOST_PP_TUPLE_ELEM(3, 2, elem)); \
+
+
+namespace pcl
+{
+namespace traits
+{
+// offset
+template<class PointT, typename Tag>
+struct offset /** \cond NO_WARN_RECURSIVE */ : offset<typename POD<PointT>::type, Tag> /** \endcond */
+{ /** \cond NO_WARN_RECURSIVE */
+ // Contents of specialization:
+ // static const std::size_t value;
+
+ // Avoid infinite compile-time recursion
+ BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
+ POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
+};
+} // namespace traits
+} // namespace pcl
+
+#define POINT_CLOUD_REGISTER_FIELD_OFFSET(r, name, elem) \
+ template<> struct offset<name, pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem)> \
+ { /** \endcond */ \
+ static const std::size_t value = offsetof(name, BOOST_PP_TUPLE_ELEM(3, 1, elem)); \
+ }; \
+
+
+namespace pcl
+{
+namespace traits
+{
+ // datatype
+ template<class PointT, typename Tag>
+ struct datatype /** \cond NO_WARN_RECURSIVE */ : datatype<typename POD<PointT>::type, Tag> /** \endcond */
+ { /** \cond NO_WARN_RECURSIVE */
+ // Contents of specialization:
+ // using type = ...;
+ // static const std::uint8_t value;
+ // static const std::uint32_t size;
+
+ // Avoid infinite compile-time recursion
+ BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
+ POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
+ };
+ } // namespace traits
+ } // namespace pcl
+
+#define POINT_CLOUD_REGISTER_FIELD_DATATYPE(r, name, elem) \
+ template<> struct datatype<name, pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem)> \
+ { /** \endcond */ \
+ using type = boost::mpl::identity<BOOST_PP_TUPLE_ELEM(3, 0, elem)>::type; \
+ using decomposed = decomposeArray<type>; \
+ static const std::uint8_t value = asEnum<decomposed::type>::value; \
+ static const std::uint32_t size = decomposed::value; \
+ }; \
+
+
+namespace pcl
+{
+namespace traits
+{
+// fields
+template<typename PointT>
+struct fieldList /** \cond NO_WARN_RECURSIVE */ : fieldList<typename POD<PointT>::type> /** \endcond */
+{ /** \cond NO_WARN_RECURSIVE */
+ // Contents of specialization:
+ // using type = boost::mpl::vector<...>;
+
+ // Avoid infinite compile-time recursion
+ BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
+ POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
+};
+} // namespace traits
+} // namespace pcl
+
+#define POINT_CLOUD_REGISTER_POINT_FIELD_LIST(name, seq) \
+ template<> struct fieldList<name> \
+ { /** \endcond */ \
+ using type = boost::mpl::vector<BOOST_PP_SEQ_ENUM(seq)>; \
+ };
{
//MEASURE_FUNCTION_TIME;
- //std::cout << "Starting to create range image from "<<point_cloud.points.size ()<<" points.\n";
+ //std::cout << "Starting to create range image from "<<point_cloud.size ()<<" points.\n";
// If the sensor pose is inside of the sphere we have to calculate the image the normal way
if ((point_cloud_center-sensor_pose.translation()).norm() <= point_cloud_radius) {
CoordinateFrame coordinate_frame, float noise_level,
float min_range)
{
- //std::cout << "Starting to create range image from "<<point_cloud.points.size ()<<" points.\n";
+ //std::cout << "Starting to create range image from "<<point_cloud.size ()<<" points.\n";
width = di_width;
height = di_height;
#include <pcl/point_cloud.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
-#include <pcl/common/common_headers.h>
+#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
+#include <pcl/common/angles.h> // for deg2rad
#include <pcl/common/vector_average.h>
#include <typeinfo>
{
os << "header: " << std::endl;
os << r.header;
- os << "points[]: " << r.points.size () << std::endl;
+ os << "points[]: " << r.size () << std::endl;
os << "width: " << r.width << std::endl;
os << "height: " << r.height << std::endl;
os << "sensor_origin_: "
//https://bugreports.qt-project.org/browse/QTBUG-22829
#ifndef Q_MOC_RUN
-#include <pcl/pcl_macros.h>
-#include <pcl/type_traits.h>
-#include <boost/mpl/vector.hpp>
-#include <boost/preprocessor/seq/enum.hpp>
-#include <boost/preprocessor/seq/for_each.hpp>
-#include <boost/preprocessor/seq/transform.hpp>
-#include <boost/preprocessor/cat.hpp>
-#include <boost/preprocessor/comparison.hpp>
+#include <pcl/point_struct_traits.h> // for pcl::traits::POD, POINT_CLOUD_REGISTER_FIELD_(NAME, OFFSET, DATATYPE), POINT_CLOUD_REGISTER_POINT_FIELD_LIST
+#include <boost/mpl/assert.hpp> // for BOOST_MPL_ASSERT_MSG
+#include <boost/preprocessor/seq/for_each.hpp> // for BOOST_PP_SEQ_FOR_EACH
+#include <boost/preprocessor/seq/transform.hpp> // for BOOST_PP_SEQ_TRANSFORM
+#include <boost/preprocessor/tuple/elem.hpp> // for BOOST_PP_TUPLE_ELEM
+#include <boost/preprocessor/cat.hpp> // for BOOST_PP_CAT
#endif
-#include <cstddef> //offsetof
-#include <type_traits>
+#include <cstdint> // for std::uint32_t
+#include <type_traits> // for std::enable_if_t, std::is_array, std::remove_const_t, std::remove_all_extents_t
// Must be used in global namespace with name fully qualified
#define POINT_CLOUD_REGISTER_POINT_STRUCT(name, fseq) \
struct BOOST_PP_TUPLE_ELEM(3, 2, elem); \
/***/
-#define POINT_CLOUD_REGISTER_FIELD_NAME(r, point, elem) \
- template<int dummy> \
- struct name<point, pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem), dummy> \
- { \
- static const char value[]; \
- }; \
- \
- template<int dummy> \
- const char name<point, \
- pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem), \
- dummy>::value[] = \
- BOOST_PP_STRINGIZE(BOOST_PP_TUPLE_ELEM(3, 2, elem));
- /***/
-
-#define POINT_CLOUD_REGISTER_FIELD_OFFSET(r, name, elem) \
- template<> struct offset<name, pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem)> \
- { \
- static const std::size_t value = offsetof(name, BOOST_PP_TUPLE_ELEM(3, 1, elem)); \
- };
- /***/
-
-#define POINT_CLOUD_REGISTER_FIELD_DATATYPE(r, name, elem) \
- template<> struct datatype<name, pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem)> \
- { \
- using type = boost::mpl::identity<BOOST_PP_TUPLE_ELEM(3, 0, elem)>::type; \
- using decomposed = decomposeArray<type>; \
- static const std::uint8_t value = asEnum<decomposed::type>::value; \
- static const std::uint32_t size = decomposed::value; \
- };
- /***/
-
#define POINT_CLOUD_TAG_OP(s, data, elem) pcl::fields::BOOST_PP_TUPLE_ELEM(3, 2, elem)
#define POINT_CLOUD_EXTRACT_TAGS(seq) BOOST_PP_SEQ_TRANSFORM(POINT_CLOUD_TAG_OP, _, seq)
-#define POINT_CLOUD_REGISTER_POINT_FIELD_LIST(name, seq) \
- template<> struct fieldList<name> \
- { \
- using type = boost::mpl::vector<BOOST_PP_SEQ_ENUM(seq)>; \
- };
- /***/
-
#if defined _MSC_VER
#pragma warning (pop)
#endif
#pragma once
-#include <boost/mpl/assert.hpp>
+#include <pcl/point_struct_traits.h> // for pcl::traits::POD, pcl::traits::name, pcl::traits::datatype, pcl::traits::offset
-// This is required for the workaround at line 109
-#ifdef _MSC_VER
-#include <Eigen/Core>
-#include <Eigen/src/StlSupport/details.h>
-#endif
-
-#include <string>
-#include <type_traits>
+#include <cstddef> // for std::size_t
+#include <cstdint> // for std::uint8_t
-#include <cstdint>
+#include <functional> // for std::function, needed till C++17
+#include <string> // for std::string
+#include <type_traits> // for std::false_type, std::true_type
namespace pcl
{
template<int index>
using asType_t = typename asType<index>::type;
- // Metafunction to decompose a type (possibly of array of any number of dimensions) into
- // its scalar type and total number of elements.
- template<typename T> struct decomposeArray
- {
- using type = std::remove_all_extents_t<T>;
- static const std::uint32_t value = sizeof (T) / sizeof (type);
- };
-
- // For non-POD point types, this is specialized to return the corresponding POD type.
- template<typename PointT>
- struct POD
- {
- using type = PointT;
- };
-
-#ifdef _MSC_VER
-
- /* Sometimes when calling functions like `copyPoint()` or `copyPointCloud`
- * without explicitly specifying point types, MSVC deduces them to be e.g.
- * `Eigen::internal::workaround_msvc_stl_support<pcl::PointXYZ>` instead of
- * plain `pcl::PointXYZ`. Subsequently these types are passed to meta-
- * functions like `has_field` or `fieldList` and make them choke. This hack
- * makes use of the fact that internally `fieldList` always applies `POD` to
- * its argument type. This specialization therefore allows to unwrap the
- * contained point type. */
- template<typename PointT>
- struct POD<Eigen::internal::workaround_msvc_stl_support<PointT> >
- {
- using type = PointT;
- };
-
-#endif
-
- // name
- /* This really only depends on Tag, but we go through some gymnastics to avoid ODR violations.
- We template it on the point type PointT to avoid ODR violations when registering multiple
- point types with shared tags.
- The dummy parameter is so we can partially specialize name on PointT and Tag but leave it
- templated on dummy. Each specialization declares a static char array containing the tag
- name. The definition of the static member would conflict when linking multiple translation
- units that include the point type registration. But when the static member definition is
- templated (on dummy), we sidestep the ODR issue.
- */
- template<class PointT, typename Tag, int dummy = 0>
- struct name : name<typename POD<PointT>::type, Tag, dummy>
- {
- // Contents of specialization:
- // static const char value[];
-
- // Avoid infinite compile-time recursion
- BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
- POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
- };
-
- // offset
- template<class PointT, typename Tag>
- struct offset : offset<typename POD<PointT>::type, Tag>
- {
- // Contents of specialization:
- // static const std::size_t value;
-
- // Avoid infinite compile-time recursion
- BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
- POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
- };
-
- // datatype
- template<class PointT, typename Tag>
- struct datatype : datatype<typename POD<PointT>::type, Tag>
- {
- // Contents of specialization:
- // using type = ...;
- // static const std::uint8_t value;
- // static const std::uint32_t size;
-
- // Avoid infinite compile-time recursion
- BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
- POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
- };
-
- // fields
- template<typename PointT>
- struct fieldList : fieldList<typename POD<PointT>::type>
- {
- // Contents of specialization:
- // using type = boost::mpl::vector<...>;
-
- // Avoid infinite compile-time recursion
- BOOST_MPL_ASSERT_MSG((!std::is_same<PointT, typename POD<PointT>::type>::value),
- POINT_TYPE_NOT_PROPERLY_REGISTERED, (PointT&));
- };
- } //namespace traits
+ } // namespace traits
/** \brief A helper functor that can copy a specific value if the given field exists.
*
template <typename T> struct has_custom_allocator<T, void_t<typename T::_custom_allocator_type_trait>> : std::true_type {};
#endif
-}
+ /**
+ * \todo: Remove in C++17
+ */
+#ifndef __cpp_lib_is_invocable
+ // Implementation taken from: https://stackoverflow.com/a/51188325
+ template <typename F, typename... Args>
+ constexpr bool is_invocable_v =
+ std::is_constructible<std::function<void(Args...)>,
+ std::reference_wrapper<std::remove_reference_t<F>>>::value;
+
+ template <typename R, typename F, typename... Args>
+ constexpr bool is_invocable_r_v =
+ std::is_constructible<std::function<R(Args...)>,
+ std::reference_wrapper<std::remove_reference_t<F>>>::value;
+#else
+ using std::is_invocable_v;
+ using std::is_invocable_r_v;
+#endif
+
+ /**
+ * \todo: Remove in C++17
+ */
+#ifndef __cpp_lib_remove_cvref
+ template <typename T>
+ using remove_cvref_t = std::remove_cv_t<std::remove_reference_t<T>>;
+#else
+ using std::remove_cvref_t;
+#endif
+}
* \ingroup common
*/
+#include <pcl/pcl_config.h>
#include <pcl/pcl_macros.h>
-
#include <vector>
#include <cstdint>
using int64_t PCL_DEPRECATED(1, 12, "use std::int64_t instead of pcl::int64_t") = std::int64_t;
using int_fast16_t PCL_DEPRECATED(1, 12, "use std::int_fast16_t instead of pcl::int_fast16_t") = std::int_fast16_t;
-// temporary macros for customization. Only use for PCL < 1.12
-// Aim is to remove macros and instead allow multiple index types to coexist together
-#ifndef PCL_INDEX_SIZE
-#if PCL_MINOR_VERSION <= 11
-// sizeof returns bytes, while we measure size by bits in the template
-#define PCL_INDEX_SIZE (sizeof(int) * 8)
-#else
-#define PCL_INDEX_SIZE 32
-#endif // PCL_MINOR_VERSION
-#endif // PCL_INDEX_SIZE
-
-#ifndef PCL_INDEX_SIGNED
-#define PCL_INDEX_SIGNED true
-#endif
-
namespace detail {
/**
* \brief int_type::type refers to an integral type that satisfies template parameters
/**
* \brief number of bits in PCL's index type
*
- * For PCL 1.11, please use PCL_INDEX_SIZE to choose a size best suited for your needs.
- * PCL 1.12 will come with default 32, along with client code compile time choice
+ * Please use PCL_INDEX_SIZE when building PCL to choose a size best suited for your needs.
+ * PCL 1.12 will come with default 32
*
* PCL 1.11 has a default size = sizeof(int)
*/
/**
* \brief signed/unsigned nature of PCL's index type
- * For PCL 1.11, please use PCL_INDEX_SIGNED to choose a type best suited for your needs.
- * PCL 1.12 will come with default signed, along with client code compile time choice
+ * Please use PCL_INDEX_SIGNED when building PCL to choose a type best suited for your needs.
* Default: signed
*/
constexpr bool index_type_signed = PCL_INDEX_SIGNED;
using index_t = detail::int_type_t<detail::index_type_size, detail::index_type_signed>;
static_assert(!std::is_void<index_t>::value, "`index_t` can't have type `void`");
+ /**
+ * \brief Type used for an unsigned index in PCL
+ *
+ * Unsigned index that mirrors the type of the index_t
+ */
+ using uindex_t = detail::int_type_t<detail::index_type_size, false>;
+ static_assert(!std::is_signed<uindex_t>::value, "`uindex_t` must be unsigned");
+
+ /**
+ * \brief Type used for indices in PCL
+ * \todo Remove with C++20
+ */
+ template <typename Allocator = std::allocator<index_t>>
+ using IndicesAllocator = std::vector<index_t, Allocator>;
+
/**
* \brief Type used for indices in PCL
*/
- using Indices = std::vector<index_t>;
+ using Indices = IndicesAllocator<>;
} // namespace pcl
*
*/
-#include <numeric>
#include <vector>
#include <pcl/common/io.h>
*
*/
+#include <pcl/types.h>
#include <pcl/correspondence.h>
#include <algorithm>
#include <iterator>
void
pcl::getRejectedQueryIndices (const pcl::Correspondences &correspondences_before,
const pcl::Correspondences &correspondences_after,
- std::vector<int>& indices,
+ Indices& indices,
bool presorting_required)
{
indices.clear();
return;
}
- std::vector<int> indices_before (nr_correspondences_before);
+ Indices indices_before (nr_correspondences_before);
for (std::size_t i = 0; i < nr_correspondences_before; ++i)
indices_before[i] = correspondences_before[i].index_query;
- std::vector<int> indices_after (nr_correspondences_after);
+ Indices indices_after (nr_correspondences_after);
for (std::size_t i = 0; i < nr_correspondences_after; ++i)
indices_after[i] = correspondences_after[i].index_query;
#include <pcl/common/feature_histogram.h>
-#include <algorithm>
-
#include <pcl/console/print.h>
pcl::FeatureHistogram::FeatureHistogram (std::size_t const number_of_bins,
{
threshold_min_ = min;
threshold_max_ = max;
- step_ = (max - min) / static_cast<float> (number_of_bins_);
+ step_ = (max - min) / static_cast<float> (number_of_bins);
}
else
{
assert (kernel.size () % 2 == 1);
std::size_t kernel_width = kernel.size () -1;
std::size_t radius = kernel.size () / 2;
- std::unique_ptr<const pcl::PointCloud<float>> copied_input;
+ pcl::PointCloud<float> copied_input;
const pcl::PointCloud<float>* unaliased_input;
if (&input != &output)
{
}
else
{
- copied_input = std::make_unique<pcl::PointCloud<float>>(input);
- unaliased_input = copied_input.get();
+ copied_input = input;
+ unaliased_input = &copied_input;
}
std::size_t i;
assert (kernel.size () % 2 == 1);
std::size_t kernel_width = kernel.size () -1;
std::size_t radius = kernel.size () / 2;
- std::unique_ptr<const pcl::PointCloud<float>> copied_input;
+ pcl::PointCloud<float> copied_input;
const pcl::PointCloud<float>* unaliased_input;
if (&input != &output)
{
}
else
{
- copied_input = std::make_unique<pcl::PointCloud<float>>(input);
- unaliased_input = copied_input.get();
+ copied_input = input;
+ unaliased_input = &copied_input;
}
std::size_t j;
void
pcl::copyPointCloud (
const pcl::PCLPointCloud2 &cloud_in,
- const std::vector<int> &indices,
+ const Indices &indices,
pcl::PCLPointCloud2 &cloud_out)
{
cloud_out.header = cloud_in.header;
cloud_out.height = 1;
- cloud_out.width = static_cast<std::uint32_t> (indices.size ());
+ cloud_out.width = indices.size ();
cloud_out.fields = cloud_in.fields;
cloud_out.is_bigendian = cloud_in.is_bigendian;
cloud_out.point_step = cloud_in.point_step;
void
pcl::copyPointCloud (
const pcl::PCLPointCloud2 &cloud_in,
- const std::vector<int, Eigen::aligned_allocator<int> > &indices,
+ const IndicesAllocator< Eigen::aligned_allocator<int> > &indices,
pcl::PCLPointCloud2 &cloud_out)
{
cloud_out.header = cloud_in.header;
cloud_out.height = 1;
- cloud_out.width = static_cast<std::uint32_t> (indices.size ());
+ cloud_out.width = indices.size ();
cloud_out.fields = cloud_in.fields;
cloud_out.is_bigendian = cloud_in.is_bigendian;
cloud_out.point_step = cloud_in.point_step;
#include <cctype>
#include <cerrno>
-#include <complex>
-#include <cstdio>
#include <limits>
#include <type_traits>
: use_indices_ (false)
, fake_indices_ (false)
, field_sizes_ (0)
- , x_idx_ (-1)
- , y_idx_ (-1)
- , z_idx_ (-1)
+ , x_idx_ (UNAVAILABLE)
+ , y_idx_ (UNAVAILABLE)
+ , z_idx_ (UNAVAILABLE)
, x_field_name_ ("x")
, y_field_name_ ("y")
, z_field_name_ ("z")
if (!indices_)
{
fake_indices_ = true;
- indices_.reset (new std::vector<int>);
+ indices_.reset (new Indices);
}
// If we have a set of fake indices, but they do not match the number of points in the cloud, update them
void
pcl::PCLBase<pcl::PCLPointCloud2>::setIndices (const PointIndicesConstPtr &indices)
{
- indices_.reset (new std::vector<int> (indices->indices));
+ indices_.reset (new Indices(indices->indices));
fake_indices_ = false;
use_indices_ = true;
}
*
*/
-#include <cstddef>
#include <iostream>
-#include <pcl/common/eigen.h>
#include <pcl/common/poses_from_matches.h>
#include <pcl/common/transformation_from_correspondences.h>
#include <string>
#include <boost/optional.hpp>
-#if defined WIN32
+#if defined _WIN32
# include <windows.h>
# include <io.h>
if (!colored)
{
// Use colored output if PCL_CLICOLOR_FORCE is set or if the output is an interactive terminal
-#ifdef WIN32
+#ifdef _WIN32
colored = getenv ("PCL_CLICOLOR_FORCE") || _isatty (_fileno (stream));
#else
colored = getenv ("PCL_CLICOLOR_FORCE") || isatty (fileno (stream));
{
if (!useColoredOutput (stream)) return;
-#ifdef WIN32
+#ifdef _WIN32
HANDLE h = GetStdHandle ((stream == stdout) ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
SetConsoleTextAttribute (h, convertAttributesColor (attribute, fg, bg));
#else
{
if (!useColoredOutput (stream)) return;
-#ifdef WIN32
+#ifdef _WIN32
HANDLE h = GetStdHandle ((stream == stdout) ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
SetConsoleTextAttribute (h, convertAttributesColor (attribute, fg));
#else
{
if (!useColoredOutput (stream)) return;
-#ifdef WIN32
+#ifdef _WIN32
HANDLE h = GetStdHandle ((stream == stdout) ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
SetConsoleTextAttribute (h, convertAttributesColor (0, TT_WHITE, TT_BLACK));
#else
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <cstddef>
#include <iostream>
#include <cmath>
-#include <set>
+#include <pcl/common/time.h> // for MEASURE_FUNCTION_TIME
#include <pcl/common/eigen.h>
#include <pcl/range_image/range_image.h>
-#include <pcl/common/transformation_from_correspondences.h>
namespace pcl
{
currentPoint = unobserved_point;
continue;
}
- currentPoint = oldRangeImage.points[oldY*oldRangeImage.width + oldX];
+ currentPoint = oldRangeImage[oldY*oldRangeImage.width + oldX];
}
}
}
far_ranges.points.push_back (point);
}
}
- far_ranges.width= static_cast<std::uint32_t> (far_ranges.points.size ()); far_ranges.height = 1;
+ far_ranges.width= far_ranges.size (); far_ranges.height = 1;
far_ranges.is_dense = false;
}
#include <pcl/common/time_trigger.h>
#include <pcl/common/time.h>
-#include <iostream>
//////////////////////////////////////////////////////////////////////////////////////////////
pcl::TimeTrigger::TimeTrigger (double interval, const callback_type& callback)
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
- pt.x = cloud->points[i].x;
- pt.y = cloud->points[i].y;
- pt.z = cloud->points[i].z;
+ pt.x = (*cloud)[i].x;
+ pt.y = (*cloud)[i].y;
+ pt.z = (*cloud)[i].z;
// Pack RGB into a float
- pt.rgb = *(float*)(&cloud->points[i].rgb);
- data_host.points[i] = pt;
+ pt.rgb = *(float*)(&(*cloud)[i].rgb);
+ data_host[i] = pt;
}
data_host.width = cloud->width;
data_host.height = cloud->height;
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
- pt.x = cloud->points[i].x;
- pt.y = cloud->points[i].y;
- pt.z = cloud->points[i].z;
+ pt.x = (*cloud)[i].x;
+ pt.y = (*cloud)[i].y;
+ pt.z = (*cloud)[i].z;
// Pack RGB into a float
- pt.rgb = *(float*)(&cloud->points[i].rgb);
- data_host.points[i] = pt;
+ pt.rgb = *(float*)(&(*cloud)[i].rgb);
+ data_host[i] = pt;
}
data_host.width = cloud->width;
data_host.height = cloud->height;
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
- pt.x = cloud->points[i].x;
- pt.y = cloud->points[i].y;
- pt.z = cloud->points[i].z;
+ pt.x = (*cloud)[i].x;
+ pt.y = (*cloud)[i].y;
+ pt.z = (*cloud)[i].z;
// Pack RGB into a float
- pt.rgb = *(float*)(&cloud->points[i].rgb);
- data_host.points[i] = pt;
+ pt.rgb = *(float*)(&(*cloud)[i].rgb);
+ data_host[i] = pt;
}
data_host.width = cloud->width;
data_host.height = cloud->height;
for (std::size_t i = 0; i < cloud->points.size (); ++i)
{
PointXYZRGB pt;
- pt.x = cloud->points[i].x;
- pt.y = cloud->points[i].y;
- pt.z = cloud->points[i].z;
+ pt.x = (*cloud)[i].x;
+ pt.y = (*cloud)[i].y;
+ pt.z = (*cloud)[i].z;
// Pack RGB into a float
- pt.rgb = *(float*)(&cloud->points[i].rgb);
- data_host.points[i] = pt;
+ pt.rgb = *(float*)(&(*cloud)[i].rgb);
+ data_host[i] = pt;
}
data_host.width = cloud->width;
data_host.height = cloud->height;
/** \brief Removes points with x, y, or z equal to NaN
* \param cloud_in the input point cloud
* \param cloud_out the input point cloud
- * \param index the mapping (ordered): cloud_out.points[i] = cloud_in.points[index[i]]
+ * \param index the mapping (ordered): cloud_out[i] = cloud_in[index[i]]
* \note The density of the point cloud is lost.
* \note Can be called with cloud_in == cloud_out
*/
// output.points.resize (input.points.size());
// for (std::size_t i = 0; i < input.points.size (); ++i)
// {
-// output.points[i].x = input.points[i].x;
-// output.points[i].y = input.points[i].y;
-// output.points[i].z = input.points[i].z;
+// output[i].x = input[i].x;
+// output[i].y = input[i].y;
+// output[i].z = input[i].z;
// // Pack RGB into a float
-// output.points[i].rgb = *(float*)(&input.points[i].rgb);
+// output[i].rgb = *(float*)(&input[i].rgb);
// }
// thrust::copy (output.points.begin(), output.points.end (), input.points.begin());
// output.width = input.width;
output.points.resize (input.points.size ());
for (std::size_t i = 0; i < input.points.size (); ++i)
{
- output.points[i].x = input.points[i].x;
- output.points[i].y = input.points[i].y;
- output.points[i].z = input.points[i].z;
+ output[i].x = input.points[i].x;
+ output[i].y = input.points[i].y;
+ output[i].z = input.points[i].z;
// Pack RGB into a float
- output.points[i].rgb = *(float*)(&input.points[i].rgb);
+ output[i].rgb = *(float*)(&input.points[i].rgb);
- output.points[i].normal_x = normals[i].x;
- output.points[i].normal_y = normals[i].y;
- output.points[i].normal_z = normals[i].z;
- output.points[i].curvature = normals[i].w;
+ output[i].normal_x = normals[i].x;
+ output[i].normal_y = normals[i].y;
+ output[i].normal_z = normals[i].z;
+ output[i].curvature = normals[i].w;
}
output.width = input.width;
input << d_input;
thrust::host_vector<float4> normals = d_normals;
- output.points.resize (input.points.size ());
- for (std::size_t i = 0; i < input.points.size (); ++i)
- {
- output.points[i].x = input.points[i].x;
- output.points[i].y = input.points[i].y;
- output.points[i].z = input.points[i].z;
- // Pack RGB into a float
- output.points[i].rgb = *(float*)(&input.points[i].rgb);
-
- output.points[i].normal_x = normals[i].x;
- output.points[i].normal_y = normals[i].y;
- output.points[i].normal_z = normals[i].z;
- output.points[i].curvature = normals[i].w;
- }
- output.width = input.width;
- output.height = input.height;
- output.is_dense = input.is_dense;
+ toPCL(input, normals, output);
}
//////////////////////////////////////////////////////////////////////////
output.points.resize (input.points.size ());
for (std::size_t i = 0; i < input.points.size (); ++i)
{
- output.points[i].x = input.points[i].x;
- output.points[i].y = input.points[i].y;
- output.points[i].z = input.points[i].z;
+ output[i].x = input.points[i].x;
+ output[i].y = input.points[i].y;
+ output[i].z = input.points[i].z;
// Pack RGB into a float
- output.points[i].rgb = *(float*)(&input.points[i].rgb);
+ output[i].rgb = *(float*)(&input.points[i].rgb);
}
output.width = input.width;
/* for (std::size_t i = 0; i < output.points.size (); ++i)
std::cerr <<
- output.points[i].x << " " <<
- output.points[i].y << " " <<
- output.points[i].z << " " << std::endl;*/
+ output[i].x << " " <<
+ output[i].y << " " <<
+ output[i].z << " " << std::endl;*/
}
//////////////////////////////////////////////////////////////////////////
PointCloudAOS<Host> cloud;
cloud << input;
- output.points.resize (cloud.points.size ());
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
- {
- output.points[i].x = cloud.points[i].x;
- output.points[i].y = cloud.points[i].y;
- output.points[i].z = cloud.points[i].z;
- // Pack RGB into a float
- output.points[i].rgb = *(float*)(&cloud.points[i].rgb);
- }
-
- output.width = cloud.width;
- output.height = cloud.height;
- output.is_dense = cloud.is_dense;
-
-/* for (std::size_t i = 0; i < output.points.size (); ++i)
- std::cerr <<
- output.points[i].x << " " <<
- output.points[i].y << " " <<
- output.points[i].z << " " << std::endl;*/
+ toPCL(cloud, output);
}
-
} // namespace
} // namespace
typename PointCloudAOS<Storage>::iterator it = thrust::copy_if (input->points.begin (), input->points.end (), mask_device.begin (), output->points.begin (), isNotZero<T> ());
output->points.resize (it - output->points.begin ());
- output->width = (unsigned int) output->points.size();
+ output->width = output->points.size();
output->height = 1;
output->is_dense = false;
}
typename PointCloudAOS<Storage>::iterator it = thrust::copy_if (input->points.begin (), input->points.end (), indices.begin (), output->points.begin (), isInlier ());
output->points.resize (it - output->points.begin ());
- output->width = (unsigned int) output->points.size();
+ output->width = output->points.size();
output->height = 1;
output->is_dense = false;
}
typename PointCloudAOS<Storage>::iterator it = thrust::copy_if (input->points.begin (), input->points.end (), indices.begin (), output->points.begin (), isNotInlier ());
output->points.resize (it - output->points.begin ());
- output->width = (unsigned int) output->points.size();
+ output->width = output->points.size();
output->height = 1;
output->is_dense = false;
}
for (int y = leftY; (y <= rightY) && (nnn < max_nn_arg); y++)
{
int idx = y * input_->width + x;
- const PointT& point = input_->points[idx];
+ const PointT& point = (*input_)[idx];
const double point_dist_x = point.x - p_q_arg.x;
const double point_dist_y = point.y - p_q_arg.y;
if ((x >= 0) && (y >= 0) && (x < (int)input_->width) && (y < (int)input_->height))
{
idx = y * (int)input_->width + x;
- const PointT& point = input_->points[idx];
+ const PointT& point = (*input_)[idx];
if ((point.x == point.x) && // check for NaNs
(point.y == point.y) &&
if ((x >= 0) && (y >= 0) && (x < (int)input_->width) && (y < (int)input_->height))
{
idx = y * (int)input_->width + x;
- const PointT& point = input_->points[idx];
+ const PointT& point = (*input_)[idx];
if ((point.x == point.x) && // check for NaNs
(point.y == point.y) && (point.z == point.z))
for (int x = 0; x < (int)input_->width; x++)
{
std::size_t i = y * input_->width + x;
- if ((input_->points[i].x == input_->points[i].x) && // check for NaNs
- (input_->points[i].y == input_->points[i].y) && (input_->points[i].z == input_->points[i].z))
+ if (((*input_)[i].x == (*input_)[i].x) && // check for NaNs
+ ((*input_)[i].y == (*input_)[i].y) && ((*input_)[i].z == (*input_)[i].z))
{
- const PointT& point = input_->points[i];
+ const PointT& point = (*input_)[i];
if ((double)(x - input_->width / 2) * (double)(y - input_->height / 2) * point.z != 0)
{
// estimate the focal length for point.x and point.y
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points);
float p_no_outliers = 1.0f - pow (w, 1.0f);
- p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
- p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
+ p_no_outliers = max(std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
+ p_no_outliers = min(1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers == 1.0f)
k++;
else
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)min_nr_in_shape / (float)nr_remaining_points);
float p_no_outliers = 1.0f - pow (w, 1.0f);
- p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
- p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
+ p_no_outliers = max(std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
+ p_no_outliers = min(1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers != 1.0f)
{
if (std::log (1.0f - probability_) / std::log (p_no_outliers) < valid_iterations) // we won't find a model with min_nr_in_shape points anymore...
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points);
float p_no_outliers = 1.0f - pow (w, 1.0f);
- p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
- p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
+ p_no_outliers = max(std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
+ p_no_outliers = min(1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers == 1.0f)
k++;
else
float w = (float)((float)n_best_inliers_count / (float)sac_model_->getIndices ()->size ());
// float p_no_outliers = 1.0 - pow (w, (float)selection.size ());
float p_no_outliers = 1.0f - pow (w, (float)1);
- p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
- p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
+ p_no_outliers = max(std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
+ p_no_outliers = min(1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers == 1.0f)
k++;
else
#include <opencv2/core/core.hpp>
#include <opencv2/gpu/gpu.hpp>
-using namespace std;
-
namespace pcl
{
namespace cuda
region_mask.resize (input->points.size());
//int** stencils = new int*[inlier_stencils.size()];
thrust::host_vector<int*> stencils_host (inlier_stencils.size ());
- for (int i = 0; i < inlier_stencils.size (); ++i)
+ for (std::size_t i = 0; i < inlier_stencils.size (); ++i)
stencils_host[i] = thrust::raw_pointer_cast(&(*inlier_stencils[i])[0]);
//stencils_host[i] = thrust::raw_pointer_cast<int> (&(*inlier_stencils[i])[0]);
#include <pcl/cuda/segmentation/mssegmentation.h>
-using namespace std;
-
// Auxiliray stuff
namespace pcl
{
*/
void addPointCloud (PointCloud &pc, Pose &pose = 0)
{
- new_clouds_.push_back (make_pair (pc, pose));
+ new_clouds_.push_back (std::make_pair (pc, pose));
}
/**
{
public:
virtual ~LoopDetection () {}
- virtual list<pair<PointCloud*, PointCloud*> > detectLoop(list<PosedPointCloud*> poses, list<PosedPointCloud*> query) {} = 0;
+ virtual list<std::pair<PointCloud*, PointCloud*> > detectLoop(list<PosedPointCloud*> poses, list<PosedPointCloud*> query) {} = 0;
}
GraphHandler
class DistanceLoopDetection : LoopDetection
{
public:
- virtual list<pair<PointCloud*, PointCloud*> > detectLoop(list<PosedPointCloud*> poses, list<PosedPointCloud*> query)
+ virtual list<std::pair<PointCloud*, PointCloud*> > detectLoop(list<PosedPointCloud*> poses, list<PosedPointCloud*> query)
{
//I want a map reduce here ;)
list<PosedPointCloud >::iterator poses_it;
cloud.width = 2;
cloud.height = 1;
- cloud.points[0].test = 1;
- cloud.points[1].test = 2;
- cloud.points[0].x = cloud.points[0].y = cloud.points[0].z = 0;
- cloud.points[1].x = cloud.points[1].y = cloud.points[1].z = 3;
+ cloud[0].test = 1;
+ cloud[1].test = 2;
+ cloud[0].x = cloud[0].y = cloud[0].z = 0;
+ cloud[1].x = cloud[1].y = cloud[1].z = 3;
pcl::io::savePCDFile ("test.pcd", cloud);
}
pcl::PCDReader reader;
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>), cloud_f (new pcl::PointCloud<pcl::PointXYZ>);
reader.read ("table_scene_lms400.pcd", *cloud);
- std::cout << "PointCloud before filtering has: " << cloud->points.size () << " data points." << std::endl;
+ std::cout << "PointCloud before filtering has: " << cloud->size () << " data points." << std::endl;
.
.
.
- while (cloud_filtered->points.size () > 0.3 * nr_points)
+ while (cloud_filtered->size () > 0.3 * nr_points)
{
.
+---------------------------------------------------------------+-----------------+-------------------+-------------------+
| .. image:: images/posix_building_pcl/openni_logo.png | OpenNI | 1.3 | pcl_io |
+---------------------------------------------------------------+-----------------+-------------------+-------------------+
-| .. image:: images/posix_building_pcl/cuda_logo.png | CUDA | 4.0 | pcl_* |
+| .. image:: images/posix_building_pcl/cuda_logo.png | CUDA | 9.2 | pcl_* |
+---------------------------------------------------------------+-----------------+-------------------+-------------------+
Troubleshooting
as described in :ref:`pfh_Estimation` - this will be called the Simplified
Point Feature Histogram (SPFH);
- * in a second step, for each point its k neighbors are re-determined, and the
- neighboring SPFH values are used to weight the final histogram of pq
+ * in a second step, for each point its :math:`k` neighbors are re-determined, and the
+ neighboring SPFH values are used to weight the final histogram of :math:`p_q`
(called FPFH) as follows:
.. math::
- FPFH(\boldsymbol{p}_q) = SPFH(\boldsymbol{p}_q) + {1 \over k} \sum_{i=1}^k {{1 \over \omega_k} \cdot SPFH(\boldsymbol{p}_k)}
+ FPFH(\boldsymbol{p}_q) = SPFH(\boldsymbol{p}_q) + {1 \over k} \sum_{i=1}^k {{1 \over \omega_i} \cdot SPFH(\boldsymbol{p}_i)}
-where the weight :math:`\omega_k` represents a distance between the query point
-:math:`p_q` and a neighbor point :math:`p_k` in some given metric space, thus
-scoring the (:math:`p_q, p_k`) pair, but could just as well be selected as a
+where the weight :math:`\omega_i` represents a distance between the query point
+:math:`p_q` and a neighbor point :math:`p_i` in some given metric space, thus
+scoring the (:math:`p_q, p_i`) pair, but could just as well be selected as a
different measure if necessary. To understand the importance of this weighting
scheme, the figure below presents the influence region diagram for a
k-neighborhood set centered at :math:`p_q`.
Thus, for a given query point :math:`p_q`, the algorithm first estimates its
SPFH values by creating pairs between itself and its neighbors (illustrated
using red lines). This is repeated for all the points in the dataset, followed
-by a re-weighting of the SPFH values of pq using the SPFH values of its
-:math:`p_k` neighbors, thus creating the FPFH for :math:`p_q`. The extra FPFH
+by a re-weighting of the SPFH values of :math:`p_q` using the SPFH values of its
+:math:`k` neighbors, thus creating the FPFH for :math:`p_q`. The extra FPFH
connections, resultant due to the additional weighting scheme, are shown with
black lines. As the diagram shows, some of the value pairs will be counted
twice (marked with thicker lines in the figure).
// Compute the features
fpfh.compute (*fpfhs);
- // fpfhs->points.size () should have the same size as the input cloud->points.size ()*
+ // fpfhs->size () should have the same size as the input cloud->size ()*
}
The actual **compute** call from the **FPFHEstimation** class does nothing internally but::
1. get the nearest neighbors of :math:`p`
- 2. for each pair of :math:`p, p_k` (where :math:`p_k` is a neighbor of :math:`p`, compute the three angular values
+ 2. for each pair of :math:`p, p_i` (where :math:`p_i` is a neighbor of :math:`p`, compute the three angular values
3. bin all the results in an output SPFH histogram
.. code-block:: cpp
- for (int i = 0; i < normals->points.size(); i++)
+ for (int i = 0; i < normals->size(); i++)
{
- if (!pcl::isFinite<pcl::Normal>(normals->points[i]))
+ if (!pcl::isFinite<pcl::Normal>((*normals)[i]))
{
PCL_WARN("normals[%d] is not finite\n", i);
}
Configuring your PC to use your Nvidia GPU with PCL
---------------------------------------------------
-In this tutorial we will learn how to check if your PC is
-suitable for use with the GPU methods provided within PCL.
-This tutorial has been tested on Ubuntu 11.04 and 12.04, let
-us know on the user mailing list if you have tested this on other
-distributions.
-The explanation
----------------
-
-In order to run the code you'll need a decent Nvidia GPU with Fermi or Kepler architecture you can check this by::
-
- $ lspci | grep nVidia
-
-This should indicate which GPU you have on your system, if you don't have an Nvidia GPU, we're sorry, but you
-won't be able to use PCL GPU.
-The output of this you can compare with `this link <http://www.nvidia.co.uk/object/cuda-parallel-computing-uk.html>`_
-on the Nvidia website, your card should mention compute capability of 2.x (Fermi) or 3.x (Kepler) or higher.
-If you want to run with a GUI, you can also run::
-
- $ nvidia-settings
-
-From either CLI or from your system settings menu. This should mention the same information.
-
-First you need to test if your CPU is 32 or 64 bit, if it is 64-bit, it is preferred to work in this mode.
-For this you can run::
+In this tutorial you will learn how to configure your system to make it compatible to run the GPU methods provided by PCL.
+This tutorial is for Ubuntu, other Linux distrubutions can follow a similar process to set it up.
- $ lscpu | grep op-mode
+Windows is currently **not** officially supported for the GPU methods.
-As a next step you will need a up to date version of the Cuda Toolkit. You can get this
-`here <http://developer.nvidia.com/cuda/cuda-downloads>`_, at the time of writing the
-latest version was 4.2 and the beta release of version 5 was available as well.
-
-First you will need to install the latest video drivers, download the correct one from the site
-and run the install file, after this, download the toolkit and install it.
-At the moment of writing this was version 295.41, please choose the most up to date one::
-
- $ wget http://developer.download.nvidia.com/compute/cuda/4_2/rel/drivers/devdriver_4.2_linux_64_295.41.run
-
-Make the driver executable::
-
- $ chmod +x devdriver_4.2_linux_64_295.41.run
+Checking CUDA Version
+---------------
-Run the driver::
+In order to run the code you will need a system with an Nvidia GPU, having CUDA Toolkit v9.2+ installed.
+We will not be covering CUDA toolkit installation in this tutorial as there already exists many great tutorials for the same.
- $ sudo ./devdriver_4.2_linux_64_295.41.run
+You can check your CUDA toolkit version using the following command::
-You need to run this script without your X-server running. You can shut your X-server down as follows:
-Go to a terminal by pressing Ctrl-Alt-F1 and typing::
+ $ nvcc --version | grep "release" | awk '{print $6}' | cut -c2-
+
+
+Checking C++ Version
+---------------
- $ sudo service gdm stop
+The GPU methods in PCL require a min version of GCC 7 or Clang 6 onwards (min version unknown).
+This will not be a problem if you are running Ubuntu 18.04 or later. However on Ubuntu 16.04, you will need to install GCC 7 or Clang 6 (lower versions not tested) manually because the versions available by default are: GCC 5 and Clang 3.8
-Once you have installed you GPU device driver you will also need to install the CUDA Toolkit::
+You can check your GCC and Clang version using the following commands::
- $ wget http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/cudatoolkit_4.2.9_linux_64_ubuntu11.04.run
- $ chmod +x cudatoolkit_4.2.9_linux_64_ubuntu11.04.run
- $ sudo ./cudatoolkit_4.2.9_linux_64_ubuntu11.04.run
+ $ gcc -dumpversion
-You can get the SDK, but for PCL this is not needed, this provides you with general CUDA examples
-and some scripts to test the performance of your CPU as well as your hardware specifications.
-
-CUDA only compiles with gcc 4.4 and lower, so if your default installed gcc is 4.5 or higher you'll need to install gcc 4.4:
-
- $ sudo apt-get install gcc-4.4
+ $ clang --version
+
+
+Installing GCC
+---------------
-Now you need to force your gcc to use this version, you can remove the older version, the other option is to create a symlink in your home folder and include that in the beginning of your path:
+To install GCC 7 run the following commands::
+
+$ sudo add-apt-repository ppa:ubuntu-toolchain-r/test
+$ sudo apt update && apt install g++-7 -y
+
+Set it as the default version::
+
+$ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 60 --slave /usr/bin/g++ g++ /usr/bin/g++-7
+$ sudo update-alternatives --config gcc
- $ cd
- $ mkdir bin
+Installing Eigen
+---------------
-Add 'export PATH=$HOME/bin:$PATH as the last line to your ~/.bashrc file.
-Now create the symlinks in your bin folder:
+You will also need Eigen v3.3.7+, since the previous versions are incompatible with the latest CUDA versions.
+If you are on Ubuntu 29+, then there is no issue since Eigen 3.3.7 is shipped by default.
+On older versions Eigen v3.3.7 will need to be installed manually::
- $ cd ~/bin
- $ ln -s <your_gcc_installation> c++
- $ ln -s <your_gcc_installation> cc
- $ ln -s <your_gcc_installation> g++
- $ ln -s <your_gcc_installation> gcc
+$ wget -qO- https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.tar.gz | tar xz
+$ sudo apt install -y libblas-dev
+$ cd eigen-3.3.7 && mkdir build && cd build
+$ cmake ..
+$ sudo make install
+$ cd ../.. && rm -rf eigen-3.3.7/ && rm -f eigen-3.3.7.tar.gz
-If you use colorgcc these links all need to point to /usr/bin/colorgcc.
+Building PCL
+---------------
Now you can get the latest git master (or another one) of PCL and configure your
installation to use the CUDA functions.
$ make install
Now your installation is finished!
-
-Tested Hardware
----------------
-Please report us the hardware you have tested the following methods with.
-
-+-----------------------+----------------------------------------------------------------------+----------------+
-| Method | Hardware | Reported FPS |
-+=======================+======================================================================+================+
-| Kinfu | GTX680, Intel Xeon CPU E5620 @ 2.40Ghz x 8, 24Gb RAM | 20-27 |
-+-----------------------+----------------------------------------------------------------------+----------------+
-| | GTX480, Intel Xeon CPU W3550 @ 3.07GHz × 4, 7.8Gb RAM | 40 |
-+-----------------------+----------------------------------------------------------------------+----------------+
-| | C2070, Intel Xeon CPU E5620 @ 2.40Ghz x 8, 24Gb RAM | 29 |
-+-----------------------+----------------------------------------------------------------------+----------------+
-| People Pose Detection | GTX680, Intel Xeon CPU E5620 @ 2.40Ghz x 8, 24Gb RAM | 20-23 |
-+-----------------------+----------------------------------------------------------------------+----------------+
-| | C2070, Intel Xeon CPU E5620 @ 2.40Ghz x 8, 24Gb RAM | 10-20 |
-+-----------------------+----------------------------------------------------------------------+----------------+
-
-
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/console/parse.h>
- using namespace std;
using namespace pcl::console;
using namespace pcl::visualization;
// Compute the features
ne.compute (*cloud_normals);
- // cloud_normals->points.size () should have the same size as the input cloud->points.size ()
+ // cloud_normals->size () should have the same size as the input cloud->size ()
}
The following code snippet will estimate a set of surface normals for a subset of the points in the input dataset.
... read, pass in or create a point cloud ...
// Create a set of indices to be used. For simplicity, we're going to be using the first 10% of the points in cloud
- std::vector<int> indices (std::floor (cloud->points.size () / 10));
+ std::vector<int> indices (std::floor (cloud->size () / 10));
for (std::size_t i = 0; i < indices.size (); ++i) indices[i] = i;
// Create the normal estimation class, and pass the input dataset to it
// Compute the features
ne.compute (*cloud_normals);
- // cloud_normals->points.size () should have the same size as the input indicesptr->size ()
+ // cloud_normals->size () should have the same size as the input indicesptr->size ()
}
// Compute the features
ne.compute (*cloud_normals);
- // cloud_normals->points.size () should have the same size as the input cloud_downsampled->points.size ()
+ // cloud_normals->size () should have the same size as the input cloud_downsampled->size ()
}
.. [RusuDissertation] http://mediatum.ub.tum.de/doc/800632/941254.pdf
#. Install Homebrew. See the Homebrew website for instructions.
#. Execute ``brew update``.
- #. Execute ``brew tap homebrew/science``.
To install the latest version using the formula, execute the following command::
void
CopyPointCloudToBuffers (pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr cloud, PointCloudBuffers& cloud_buffers)
{
- const std::size_t nr_points = cloud->points.size ();
+ const std::size_t nr_points = cloud->size ();
cloud_buffers.points.resize (nr_points*3);
cloud_buffers.rgb.resize (nr_points*3);
for (std::size_t i = 0; i < nr_points; ++i)
{
- const pcl::PointXYZRGBA& point = cloud->points[i];
+ const pcl::PointXYZRGBA& point = (*cloud)[i];
if (!pcl_isfinite (point.x) ||
!pcl_isfinite (point.y) ||
const int conversion_factor = 500;
- cloud_buffers.points[j*3 + 0] = static_cast<short> (point.x * conversion_factor);
- cloud_buffers.points[j*3 + 1] = static_cast<short> (point.y * conversion_factor);
- cloud_buffers.points[j*3 + 2] = static_cast<short> (point.z * conversion_factor);
+ cloud_buffers[j*3 + 0] = static_cast<short> (point.x * conversion_factor);
+ cloud_buffers[j*3 + 1] = static_cast<short> (point.y * conversion_factor);
+ cloud_buffers[j*3 + 2] = static_cast<short> (point.z * conversion_factor);
cloud_buffers.rgb[j*3 + 0] = point.r;
cloud_buffers.rgb[j*3 + 1] = point.g;
PointCloudBuffers::Ptr buffers_to_send = getLatestBuffers ();
- nr_points = static_cast<unsigned int> (buffers_to_send->points.size()/3);
+ nr_points = static_cast<unsigned int> (buffers_to_send->size()/3);
boost::asio::write (socket, boost::asio::buffer (&nr_points, sizeof (nr_points)));
if (nr_points)
PointCloudBuffers::Ptr buffers_to_send = getLatestBuffers ();
- nr_points = static_cast<unsigned int> (buffers_to_send->points.size()/3);
+ nr_points = static_cast<unsigned int> (buffers_to_send->size()/3);
boost::asio::write (socket, boost::asio::buffer (&nr_points, sizeof (nr_points)));
Next, if there is a non-zero number of points, the server sends the xyz and rgb
...
std::vector<int> keypoint_indices2;
- keypoint_indices2.resize(keypoint_indices.points.size());
+ keypoint_indices2.resize(keypoint_indices.size());
for (unsigned int i=0; i<keypoint_indices.size(); ++i) // This step is necessary to get the right vector type
- keypoint_indices2[i]=keypoint_indices.points[i];
+ keypoint_indices2[i]=keypoint_indices[i];
...
Here we copy the indices to the vector used as input for the feature.
narf_descriptor.getParameters().rotation_invariant = rotation_invariant;
pcl::PointCloud<pcl::Narf36> narf_descriptors;
narf_descriptor.compute(narf_descriptors);
- std::cout << "Extracted "<<narf_descriptors.size()<<" descriptors for "<<keypoint_indices.points.size()<< " keypoints.\n";
+ std::cout << "Extracted "<<narf_descriptors.size()<<" descriptors for "<<keypoint_indices.size()<< " keypoints.\n";
...
This code does the actual calculation of the descriptors. It first creates the
pcl::PointCloud<int> keypoint_indices;
narf_keypoint_detector.compute (keypoint_indices);
- std::cout << "Found "<<keypoint_indices.points.size ()<<" key points.\n";
+ std::cout << "Found "<<keypoint_indices.size ()<<" key points.\n";
...
This creates a RangeImageBorderExtractor object, that is needed for the
// Compute the features
ne.compute (*cloud_normals);
- // cloud_normals->points.size () should have the same size as the input cloud->points.size ()*
+ // cloud_normals->size () should have the same size as the input cloud->size ()*
}
The actual **compute** call from the **NormalEstimation** class does nothing internally but::
The code
--------
-First, create a file, let's say, ``normal_estimation_using_integral_images.cpp`` in your favorite
+First, download the dataset `table_scene_mug_stereo_textured.pcd
+<https://github.com/PointCloudLibrary/pcl/raw/master/test/table_scene_mug_stereo_textured.pcd>`_
+and save it somewhere to disk.
+
+Then, create a file, let's say, ``normal_estimation_using_integral_images.cpp`` in your favorite
editor, and place the following inside it:
-.. code-block:: cpp
+.. literalinclude:: sources/normal_estimation_using_integral_images/normal_estimation_using_integral_images.cpp
+ :language: cpp
:linenos:
-
- #include <pcl/io/io.h>
- #include <pcl/io/pcd_io.h>
- #include <pcl/features/integral_image_normal.h>
- #include <pcl/visualization/cloud_viewer.h>
-
- int
- main ()
- {
- // load point cloud
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
- pcl::io::loadPCDFile ("table_scene_mug_stereo_textured.pcd", *cloud);
-
- // estimate normals
- pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
-
- pcl::IntegralImageNormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
- ne.setNormalEstimationMethod (ne.AVERAGE_3D_GRADIENT);
- ne.setMaxDepthChangeFactor(0.02f);
- ne.setNormalSmoothingSize(10.0f);
- ne.setInputCloud(cloud);
- ne.compute(*normals);
-
- // visualize normals
- pcl::visualization::PCLVisualizer viewer("PCL Viewer");
- viewer.setBackgroundColor (0.0, 0.0, 0.5);
- viewer.addPointCloudNormals<pcl::PointXYZ,pcl::Normal>(cloud, normals);
-
- while (!viewer.wasStopped ())
- {
- viewer.spinOnce ();
- }
- return 0;
- }
-
+
The explanation
---------------
Now, let's break down the code piece by piece. In the first part we load a
point cloud from a file:
-.. code-block:: cpp
-
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
- pcl::io::loadPCDFile ("table_scene_mug_stereo_textured.pcd", *cloud);
+.. literalinclude:: sources/normal_estimation_using_integral_images/normal_estimation_using_integral_images.cpp
+ :language: cpp
+ :lines: 11-12
In the second part we create an object for the normal estimation and compute
the normals:
-.. code-block:: cpp
-
- // estimate normals
- pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
-
- pcl::IntegralImageNormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
- ne.setNormalEstimationMethod (ne.AVERAGE_3D_GRADIENT);
- ne.setMaxDepthChangeFactor(0.02f);
- ne.setNormalSmoothingSize(10.0f);
- ne.setInputCloud(cloud);
- ne.compute(*normals);
+.. literalinclude:: sources/normal_estimation_using_integral_images/normal_estimation_using_integral_images.cpp
+ :language: cpp
+ :lines: 14-22
The following normal estimation methods are available:
In the last part we visualize the point cloud and the corresponding normals:
-.. code-block:: cpp
-
- // visualize normals
- pcl::visualization::PCLVisualizer viewer("PCL Viewer");
- viewer.setBackgroundColor (0.0, 0.0, 0.5);
- viewer.addPointCloudNormals<pcl::PointXYZ,pcl::Normal>(cloud, normals);
-
- while (!viewer.wasStopped ())
- {
- viewer.spinOnce ();
- }
-
+.. literalinclude:: sources/normal_estimation_using_integral_images/normal_estimation_using_integral_images.cpp
+ :language: cpp
+ :lines: 24-32
#include<pcl/visualization/pcl_plotter.h>
//...
-
- using namespace std;
-
+
int
main ()
{
pcl::visualization::PCLPlotter * plotter = new PCLPlotter ();
//defining the polynomial function, y = x^2. Index of x^2 is 1, rest is 0
- vector<double> func1 (3,0);
+ std::vector<double> func1 (3,0);
func1[2] = 1;
//adding the polynomial func1 to the plotter with [-10, 10] as the range in X axis and "y = x^2" as title
Point-Correspondences
---------------------
-This the most fundamental way of providing input. Provide the point correspondences, that is (x,y) coordinates, for the plot using a vector<pair> in *addPlotData*
+This the most fundamental way of providing input. Provide the point correspondences, that is (x,y) coordinates, for the plot using a std::vector<std::pair> in *addPlotData*
.. code-block:: cpp
...
- vector<pair<double, double> > data;
+ std::vector<std::pair<double, double> > data;
populateData (data);
plotter->addPlotData (data,"cos");
...
.. code-block:: cpp
...
- vector<double> func1 (1,0);
+ std::vector<double> func1 (1,0);
func1[0] = 1;
- vector<double> func2 (2,0);
+ std::vector<double> func2 (2,0);
func1[1] = 1;
plotter->addPlotData (std::make_pair (func1, func2),-10, 10, "y = 1/x");
...
- vector<double> freqdata = generateNomalDistData ();
+ std::vector<double> freqdata = generateNomalDistData ();
plotter->addHistogramData (freqdata,10); //number of bins are 10
.. code-block:: cpp
...
- viewer->addLine<pcl::PointXYZRGB> (cloud->points[0], cloud->points[cloud->size() - 1], "line");
+ viewer->addLine<pcl::PointXYZRGB> ((*cloud)[0], (*cloud)[cloud->size() - 1], "line");
...
This line (of code) adds a line (in space) from the first point in the
.. code-block:: cpp
...
- viewer->addSphere (cloud->points[0], 0.2, 0.5, 0.5, 0.0, "sphere");
+ viewer->addSphere ((*cloud)[0], 0.2, 0.5, 0.5, 0.0, "sphere");
...
This next line adds a sphere centred on the first point in the cloud
To create the final PFH representation for the query point, the set of all
quadruplets is binned into a histogram. The binning process divides each
-features’s value range into **b** subdivisions, and counts the number of
+feature’s value range into **b** subdivisions, and counts the number of
occurrences in each subinterval. Since three out of the four features presented
-above are measure of the angles between normals, their values can easily be
+above are measures of the angles between normals, their values can easily be
normalized to the same interval on the trigonometric circle. A binning example
is to divide each feature interval into the same number of equal parts, and
therefore create a histogram with :math:`b^4` bins in a fully correlated space.
// Compute the features
pfh.compute (*pfhs);
- // pfhs->points.size () should have the same size as the input cloud->points.size ()*
+ // pfhs->size () should have the same size as the input cloud->size ()*
}
The actual **compute** call from the **PFHEstimation** class does nothing internally but::
.. note::
- For efficiency reasons, the **compute** method in **PFHEstimation** does not check if the normals contains NaN or infinite values.
+ For efficiency reasons, the **compute** method in **PFHEstimation** does not check if the normals contain NaN or infinite values.
Passing such values to **compute()** will result in undefined output.
It is advisable to check the normals, at least during the design of the processing chain or when setting the parameters.
This can be done by inserting the following code before the call to **compute()**:
.. code-block:: cpp
- for (int i = 0; i < normals->points.size(); i++)
+ for (int i = 0; i < normals->size(); i++)
{
- if (!pcl::isFinite<pcl::Normal>(normals->points[i]))
+ if (!pcl::isFinite<pcl::Normal>((*normals)[i]))
{
PCL_WARN("normals[%d] is not finite\n", i);
}
.. literalinclude:: sources/remove_outliers/remove_outliers.cpp
:language: cpp
- :lines: 29-37
+ :lines: 29-38
Basically, we create the RadiusOutlierRemoval filter object, set its parameters and apply it to our input cloud. The radius of search is set to 0.8, and a point must have a minimum of 2 neighbors in that radius to be kept as part of the PointCloud.
.. literalinclude:: sources/remove_outliers/remove_outliers.cpp
:language: cpp
- :lines: 38-53
+ :lines: 39-54
Basically, we create the condition which a given point must satisfy for it to remain in our PointCloud. In this example, we use add two comparisons to the condition: greater than (GT) 0.0 and less than (LT) 0.8. This condition is then used to build the filter.
.. literalinclude:: sources/remove_outliers/remove_outliers.cpp
:language: cpp
- :lines: 58-68
+ :lines: 59-69
Compiling and running remove_outliers.cpp
---------------------------------------------
pcl::PCDReader reader;
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>), cloud_f (new pcl::PointCloud<pcl::PointXYZ>);
reader.read ("table_scene_lms400.pcd", *cloud);
- std::cout << "PointCloud before filtering has: " << cloud->points.size () << " data points." << std::endl; //*
+ std::cout << "PointCloud before filtering has: " << cloud->size () << " data points." << std::endl; //*
// Create the filtering object: downsample the dataset using a leaf size of 1cm
pcl::VoxelGrid<pcl::PointXYZ> vg;
vg.setInputCloud (cloud);
vg.setLeafSize (0.01f, 0.01f, 0.01f);
vg.filter (*cloud_filtered);
- std::cout << "PointCloud after filtering has: " << cloud_filtered->points.size () << " data points." << std::endl; //*
+ std::cout << "PointCloud after filtering has: " << cloud_filtered->size () << " data points." << std::endl; //*
// Create the segmentation object for the planar model and set all the parameters
pcl::SACSegmentation<pcl::PointXYZ> seg;
seg.setMaxIterations (100);
seg.setDistanceThreshold (0.02);
- int i=0, nr_points = (int) cloud_filtered->points.size ();
- while (cloud_filtered->points.size () > 0.3 * nr_points)
+ int i=0, nr_points = (int) cloud_filtered->size ();
+ while (cloud_filtered->size () > 0.3 * nr_points)
{
// Segment the largest planar component from the remaining cloud
seg.setInputCloud (cloud_filtered);
// Get the points associated with the planar surface
extract.filter (*cloud_plane);
- std::cout << "PointCloud representing the planar component: " << cloud_plane->points.size () << " data points." << std::endl;
+ std::cout << "PointCloud representing the planar component: " << cloud_plane->size () << " data points." << std::endl;
// Remove the planar inliers, extract the rest
extract.setNegative (true);
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
- cloud_cluster->points.push_back (cloud_filtered->points[*pit]); //*
- cloud_cluster->width = cloud_cluster->points.size ();
+ cloud_cluster->push_back ((*cloud_filtered)[*pit]); //*
+ cloud_cluster->width = cloud_cluster->size ();
cloud_cluster->height = 1;
cloud_cluster->is_dense = true;
- std::cout << "PointCloud representing the Cluster: " << cloud_cluster->points.size () << " data points." << std::endl;
+ std::cout << "PointCloud representing the Cluster: " << cloud_cluster->size () << " data points." << std::endl;
std::stringstream ss;
ss << "cloud_cluster_" << j << ".pcd";
writer.write<pcl::PointXYZ> (ss.str (), *cloud_cluster, false); //*
n_cloud_b.points.resize (n_cloud_b.width * n_cloud_b.height);
}
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- cloud_a.points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_a.points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_a.points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
if (strcmp(argv[1], "-p") == 0)
- for (std::size_t i = 0; i < cloud_b.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_b.size (); ++i)
{
- cloud_b.points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_b.points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_b.points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
else
- for (std::size_t i = 0; i < n_cloud_b.points.size (); ++i)
+ for (std::size_t i = 0; i < n_cloud_b.size (); ++i)
{
- n_cloud_b.points[i].normal[0] = 1024 * rand () / (RAND_MAX + 1.0f);
- n_cloud_b.points[i].normal[1] = 1024 * rand () / (RAND_MAX + 1.0f);
- n_cloud_b.points[i].normal[2] = 1024 * rand () / (RAND_MAX + 1.0f);
+ n_cloud_b[i].normal[0] = 1024 * rand () / (RAND_MAX + 1.0f);
+ n_cloud_b[i].normal[1] = 1024 * rand () / (RAND_MAX + 1.0f);
+ n_cloud_b[i].normal[2] = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cerr << "Cloud A: " << std::endl;
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
- std::cerr << " " << cloud_a.points[i].x << " " << cloud_a.points[i].y << " " << cloud_a.points[i].z << std::endl;
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
+ std::cerr << " " << cloud_a[i].x << " " << cloud_a[i].y << " " << cloud_a[i].z << std::endl;
std::cerr << "Cloud B: " << std::endl;
if (strcmp(argv[1], "-p") == 0)
- for (std::size_t i = 0; i < cloud_b.points.size (); ++i)
- std::cerr << " " << cloud_b.points[i].x << " " << cloud_b.points[i].y << " " << cloud_b.points[i].z << std::endl;
+ for (std::size_t i = 0; i < cloud_b.size (); ++i)
+ std::cerr << " " << cloud_b[i].x << " " << cloud_b[i].y << " " << cloud_b[i].z << std::endl;
else
- for (std::size_t i = 0; i < n_cloud_b.points.size (); ++i)
- std::cerr << " " << n_cloud_b.points[i].normal[0] << " " << n_cloud_b.points[i].normal[1] << " " << n_cloud_b.points[i].normal[2] << std::endl;
+ for (std::size_t i = 0; i < n_cloud_b.size (); ++i)
+ std::cerr << " " << n_cloud_b[i].normal[0] << " " << n_cloud_b[i].normal[1] << " " << n_cloud_b[i].normal[2] << std::endl;
// Copy the point cloud data
if (strcmp(argv[1], "-p") == 0)
cloud_c = cloud_a;
cloud_c += cloud_b;
std::cerr << "Cloud C: " << std::endl;
- for (std::size_t i = 0; i < cloud_c.points.size (); ++i)
- std::cerr << " " << cloud_c.points[i].x << " " << cloud_c.points[i].y << " " << cloud_c.points[i].z << " " << std::endl;
+ for (std::size_t i = 0; i < cloud_c.size (); ++i)
+ std::cerr << " " << cloud_c[i].x << " " << cloud_c[i].y << " " << cloud_c[i].z << " " << std::endl;
}
else
{
pcl::concatenateFields (cloud_a, n_cloud_b, p_n_cloud_c);
std::cerr << "Cloud C: " << std::endl;
- for (std::size_t i = 0; i < p_n_cloud_c.points.size (); ++i)
+ for (std::size_t i = 0; i < p_n_cloud_c.size (); ++i)
std::cerr << " " <<
- p_n_cloud_c.points[i].x << " " << p_n_cloud_c.points[i].y << " " << p_n_cloud_c.points[i].z << " " <<
- p_n_cloud_c.points[i].normal[0] << " " << p_n_cloud_c.points[i].normal[1] << " " << p_n_cloud_c.points[i].normal[2] << std::endl;
+ p_n_cloud_c[i].x << " " << p_n_cloud_c[i].y << " " << p_n_cloud_c[i].z << " " <<
+ p_n_cloud_c[i].normal[0] << " " << p_n_cloud_c[i].normal[1] << " " << p_n_cloud_c[i].normal[2] << std::endl;
}
return (0);
}
cloud_a.points.resize (cloud_a.width * cloud_a.height);
cloud_b.points.resize (cloud_b.width * cloud_b.height);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- cloud_a.points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_a.points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_a.points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
- for (std::size_t i = 0; i < cloud_b.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_b.size (); ++i)
{
- cloud_b.points[i].normal[0] = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_b.points[i].normal[1] = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_b.points[i].normal[2] = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].normal[0] = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].normal[1] = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].normal[2] = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cerr << "Cloud A: " << std::endl;
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
- std::cerr << " " << cloud_a.points[i].x << " " << cloud_a.points[i].y << " " << cloud_a.points[i].z << std::endl;
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
+ std::cerr << " " << cloud_a[i].x << " " << cloud_a[i].y << " " << cloud_a[i].z << std::endl;
std::cerr << "Cloud B: " << std::endl;
- for (std::size_t i = 0; i < cloud_b.points.size (); ++i)
- std::cerr << " " << cloud_b.points[i].normal[0] << " " << cloud_b.points[i].normal[1] << " " << cloud_b.points[i].normal[2] << std::endl;
+ for (std::size_t i = 0; i < cloud_b.size (); ++i)
+ std::cerr << " " << cloud_b[i].normal[0] << " " << cloud_b[i].normal[1] << " " << cloud_b[i].normal[2] << std::endl;
pcl::concatenateFields (cloud_a, cloud_b, cloud_c);
std::cerr << "Cloud C: " << std::endl;
- for (std::size_t i = 0; i < cloud_c.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_c.size (); ++i)
std::cerr << " " <<
- cloud_c.points[i].x << " " << cloud_c.points[i].y << " " << cloud_c.points[i].z << " " <<
- cloud_c.points[i].normal[0] << " " << cloud_c.points[i].normal[1] << " " << cloud_c.points[i].normal[2] << std::endl;
+ cloud_c[i].x << " " << cloud_c[i].y << " " << cloud_c[i].z << " " <<
+ cloud_c[i].normal[0] << " " << cloud_c[i].normal[1] << " " << cloud_c[i].normal[2] << std::endl;
return (0);
-}
\ No newline at end of file
+}
cloud_a.points.resize (cloud_a.width * cloud_a.height);
cloud_b.points.resize (cloud_b.width * cloud_b.height);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- cloud_a.points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_a.points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_a.points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_a[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
- for (std::size_t i = 0; i < cloud_b.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_b.size (); ++i)
{
- cloud_b.points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_b.points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_b.points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ cloud_b[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cerr << "Cloud A: " << std::endl;
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
- std::cerr << " " << cloud_a.points[i].x << " " << cloud_a.points[i].y << " " << cloud_a.points[i].z << std::endl;
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
+ std::cerr << " " << cloud_a[i].x << " " << cloud_a[i].y << " " << cloud_a[i].z << std::endl;
std::cerr << "Cloud B: " << std::endl;
- for (std::size_t i = 0; i < cloud_b.points.size (); ++i)
- std::cerr << " " << cloud_b.points[i].x << " " << cloud_b.points[i].y << " " << cloud_b.points[i].z << std::endl;
+ for (std::size_t i = 0; i < cloud_b.size (); ++i)
+ std::cerr << " " << cloud_b[i].x << " " << cloud_b[i].y << " " << cloud_b[i].z << std::endl;
// Copy the point cloud data
cloud_c = cloud_a;
cloud_c += cloud_b;
std::cerr << "Cloud C: " << std::endl;
- for (std::size_t i = 0; i < cloud_c.points.size (); ++i)
- std::cerr << " " << cloud_c.points[i].x << " " << cloud_c.points[i].y << " " << cloud_c.points[i].z << " " << std::endl;
+ for (std::size_t i = 0; i < cloud_c.size (); ++i)
+ std::cerr << " " << cloud_c[i].x << " " << cloud_c[i].y << " " << cloud_c[i].z << " " << std::endl;
return (0);
-}
\ No newline at end of file
+}
pass.setFilterLimits (0, 1.1);
pass.filter (*cloud_filtered);
std::cerr << "PointCloud after filtering has: "
- << cloud_filtered->points.size () << " data points." << std::endl;
+ << cloud_filtered->size () << " data points." << std::endl;
pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
proj.setModelCoefficients (coefficients);
proj.filter (*cloud_projected);
std::cerr << "PointCloud after projection has: "
- << cloud_projected->points.size () << " data points." << std::endl;
+ << cloud_projected->size () << " data points." << std::endl;
// Create a Concave Hull representation of the projected inliers
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_hull (new pcl::PointCloud<pcl::PointXYZ>);
chull.setAlpha (0.1);
chull.reconstruct (*cloud_hull);
- std::cerr << "Concave hull has: " << cloud_hull->points.size ()
+ std::cerr << "Concave hull has: " << cloud_hull->size ()
<< " data points." << std::endl;
pcl::PCDWriter writer;
// Load the input point cloud
std::cerr << "Loading...\n", tt.tic ();
pcl::io::loadPCDFile ("Statues_4.pcd", *cloud_in);
- std::cerr << ">> Done: " << tt.toc () << " ms, " << cloud_in->points.size () << " points\n";
+ std::cerr << ">> Done: " << tt.toc () << " ms, " << cloud_in->size () << " points\n";
// Downsample the cloud using a Voxel Grid class
std::cerr << "Downsampling...\n", tt.tic ();
vg.setLeafSize (80.0, 80.0, 80.0);
vg.setDownsampleAllData (true);
vg.filter (*cloud_out);
- std::cerr << ">> Done: " << tt.toc () << " ms, " << cloud_out->points.size () << " points\n";
+ std::cerr << ">> Done: " << tt.toc () << " ms, " << cloud_out->size () << " points\n";
// Set up a Normal Estimation class and merge data in cloud_with_normals
std::cerr << "Computing normals...\n", tt.tic ();
cec.setInputCloud (cloud_with_normals);
cec.setConditionFunction (&customRegionGrowing);
cec.setClusterTolerance (500.0);
- cec.setMinClusterSize (cloud_with_normals->points.size () / 1000);
- cec.setMaxClusterSize (cloud_with_normals->points.size () / 5);
+ cec.setMinClusterSize (cloud_with_normals->size () / 1000);
+ cec.setMaxClusterSize (cloud_with_normals->size () / 5);
cec.segment (*clusters);
cec.getRemovedClusters (small_clusters, large_clusters);
std::cerr << ">> Done: " << tt.toc () << " ms\n";
// Using the intensity channel for lazy visualization of the output
for (int i = 0; i < small_clusters->size (); ++i)
for (int j = 0; j < (*small_clusters)[i].indices.size (); ++j)
- cloud_out->points[(*small_clusters)[i].indices[j]].intensity = -2.0;
+ (*cloud_out)[(*small_clusters)[i].indices[j]].intensity = -2.0;
for (int i = 0; i < large_clusters->size (); ++i)
for (int j = 0; j < (*large_clusters)[i].indices.size (); ++j)
- cloud_out->points[(*large_clusters)[i].indices[j]].intensity = +10.0;
+ (*cloud_out)[(*large_clusters)[i].indices[j]].intensity = +10.0;
for (int i = 0; i < clusters->size (); ++i)
{
int label = rand () % 8;
for (int j = 0; j < (*clusters)[i].indices.size (); ++j)
- cloud_out->points[(*clusters)[i].indices[j]].intensity = label;
+ (*cloud_out)[(*clusters)[i].indices[j]].intensity = label;
}
// Save the output point cloud
cloud->height = 1;
cloud->points.resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cerr << "Cloud before filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cerr << " " << cloud->points[i].x << " "
- << cloud->points[i].y << " "
- << cloud->points[i].z << std::endl;
+ for (std::size_t i = 0; i < cloud->size (); ++i)
+ std::cerr << " " << (*cloud)[i].x << " "
+ << (*cloud)[i].y << " "
+ << (*cloud)[i].z << std::endl;
// build the condition
pcl::ConditionAnd<pcl::PointXYZ>::Ptr range_cond (new
pcl::ConditionAnd<pcl::PointXYZ> ());
// display pointcloud after filtering
std::cerr << "Cloud after filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud_filtered->points.size (); ++i)
- std::cerr << " " << cloud_filtered->points[i].x << " "
- << cloud_filtered->points[i].y << " "
- << cloud_filtered->points[i].z << std::endl;
+ for (std::size_t i = 0; i < cloud_filtered->size (); ++i)
+ std::cerr << " " << (*cloud_filtered)[i].x << " "
+ << (*cloud_filtered)[i].y << " "
+ << (*cloud_filtered)[i].z << std::endl;
return (0);
}
pass.setFilterFieldName ("z");
pass.setFilterLimits (0, 1.1);
pass.filter (*cloud_filtered);
- std::cerr << "PointCloud after filtering has: " << cloud_filtered->points.size () << " data points." << std::endl;
+ std::cerr << "PointCloud after filtering has: " << cloud_filtered->size () << " data points." << std::endl;
pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
chull.setInputCloud (cloud_projected);
chull.reconstruct (*cloud_hull);
- std::cerr << "Convex hull has: " << cloud_hull->points.size () << " data points." << std::endl;
+ std::cerr << "Convex hull has: " << cloud_hull->size () << " data points." << std::endl;
pcl::PCDWriter writer;
writer.write ("table_scene_mug_stereo_textured_hull.pcd", *cloud_hull, false);
// Read in the cloud data
reader.read ("table_scene_mug_stereo_textured.pcd", *cloud);
- std::cerr << "PointCloud has: " << cloud->points.size () << " data points." << std::endl;
+ std::cerr << "PointCloud has: " << cloud->size () << " data points." << std::endl;
// Build a passthrough filter to remove spurious NaNs
pass.setInputCloud (cloud);
pass.setFilterFieldName ("z");
pass.setFilterLimits (0, 1.5);
pass.filter (*cloud_filtered);
- std::cerr << "PointCloud after filtering has: " << cloud_filtered->points.size () << " data points." << std::endl;
+ std::cerr << "PointCloud after filtering has: " << cloud_filtered->size () << " data points." << std::endl;
// Estimate point normals
ne.setSearchMethod (tree);
// Write the planar inliers to disk
pcl::PointCloud<PointT>::Ptr cloud_plane (new pcl::PointCloud<PointT> ());
extract.filter (*cloud_plane);
- std::cerr << "PointCloud representing the planar component: " << cloud_plane->points.size () << " data points." << std::endl;
+ std::cerr << "PointCloud representing the planar component: " << cloud_plane->size () << " data points." << std::endl;
writer.write ("table_scene_mug_stereo_textured_plane.pcd", *cloud_plane, false);
// Remove the planar inliers, extract the rest
std::cerr << "Can't find the cylindrical component." << std::endl;
else
{
- std::cerr << "PointCloud representing the cylindrical component: " << cloud_cylinder->points.size () << " data points." << std::endl;
+ std::cerr << "PointCloud representing the cylindrical component: " << cloud_cylinder->size () << " data points." << std::endl;
writer.write ("table_scene_mug_stereo_textured_cylinder.pcd", *cloud_cylinder, false);
}
return (0);
#include <pcl/features/don.h>
using namespace pcl;
-using namespace std;
int
main (int argc, char *argv[])
}
/// the file to read from.
- string infile = argv[1];
+ std::string infile = argv[1];
/// small scale
- istringstream (argv[2]) >> scale1;
+ std::istringstream (argv[2]) >> scale1;
/// large scale
- istringstream (argv[3]) >> scale2;
- istringstream (argv[4]) >> threshold; // threshold for DoN magnitude
- istringstream (argv[5]) >> segradius; // threshold for radius segmentation
+ std::istringstream (argv[3]) >> scale2;
+ std::istringstream (argv[4]) >> threshold; // threshold for DoN magnitude
+ std::istringstream (argv[5]) >> segradius; // threshold for radius segmentation
// Load cloud in blob format
pcl::PCLPointCloud2 blob;
doncloud = doncloud_filtered;
// Save filtered output
- std::cout << "Filtered Pointcloud: " << doncloud->points.size () << " data points." << std::endl;
+ std::cout << "Filtered Pointcloud: " << doncloud->size () << " data points." << std::endl;
writer.write<pcl::PointNormal> ("don_filtered.pcd", *doncloud, false);
pcl::PointCloud<PointNormal>::Ptr cloud_cluster_don (new pcl::PointCloud<PointNormal>);
for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
{
- cloud_cluster_don->points.push_back (doncloud->points[*pit]);
+ cloud_cluster_don->points.push_back ((*doncloud)[*pit]);
}
- cloud_cluster_don->width = int (cloud_cluster_don->points.size ());
+ cloud_cluster_don->width = cloud_cluster_don->size ();
cloud_cluster_don->height = 1;
cloud_cluster_don->is_dense = true;
//Save cluster
- std::cout << "PointCloud representing the Cluster: " << cloud_cluster_don->points.size () << " data points." << std::endl;
- stringstream ss;
+ std::cout << "PointCloud representing the Cluster: " << cloud_cluster_don->size () << " data points." << std::endl;
+ std::stringstream ss;
ss << "don_cluster_" << j << ".pcd";
writer.write<pcl::PointNormal> (ss.str (), *cloud_cluster_don, false);
}
// Create the filtering object
pcl::ExtractIndices<pcl::PointXYZ> extract;
- int i = 0, nr_points = (int) cloud_filtered->points.size ();
+ int i = 0, nr_points = (int) cloud_filtered->size ();
// While 30% of the original cloud is still there
- while (cloud_filtered->points.size () > 0.3 * nr_points)
+ while (cloud_filtered->size () > 0.3 * nr_points)
{
// Segment the largest planar component from the remaining cloud
seg.setInputCloud (cloud_filtered);
using namespace pcl::visualization;
using namespace pcl::gpu;
using namespace pcl;
-using namespace std;
struct SampledScopeTime : public StopWatch
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-string
+std::string
make_name(int counter, const char* suffix)
{
char buf[4096];
people_detector_.depth_device1_.download(depth_host_.points, c);
}
- depth_view_.showShortImage(&depth_host_.points[0], depth_host_.width, depth_host_.height, 0, 5000, true);
+ depth_view_.showShortImage(&depth_host_[0], depth_host_.width, depth_host_.height, 0, 5000, true);
depth_view_.spinOnce(1, true);
if (write)
depth_host_.points.resize(w *h);
depth_host_.width = w;
depth_host_.height = h;
- std::copy(data, data + w * h, &depth_host_.points[0]);
+ std::copy(data, data + w * h, &depth_host_[0]);
//getting image
w = image_wrapper->getWidth();
for(int i = 0; i < rgba_host_.size(); ++i)
{
const unsigned char *pixel = &rgb_host_[i * 3];
- RGB& rgba = rgba_host_.points[i];
+ RGB& rgba = rgba_host_[i];
rgba.r = pixel[0];
rgba.g = pixel[1];
rgba.b = pixel[2];
}
- image_device_.upload(&rgba_host_.points[0], s, h, w);
+ image_device_.upload(&rgba_host_[0], s, h, w);
}
data_ready_cond_.notify_one();
}
pcl::Grabber::Ptr capture (new pcl::OpenNIGrabber());
//selecting tree files
- std::vector<string> tree_files;
+ std::vector<std::string> tree_files;
tree_files.push_back("Data/forest1/tree_20.txt");
tree_files.push_back("Data/forest2/tree_20.txt");
tree_files.push_back("Data/forest3/tree_20.txt");
Eigen::VectorXf ground_coeffs;
ground_coeffs.resize(4);
std::vector<int> clicked_points_indices;
- for (unsigned int i = 0; i < clicked_points_3d->points.size(); i++)
+ for (unsigned int i = 0; i < clicked_points_3d->size(); i++)
clicked_points_indices.push_back(i);
pcl::SampleConsensusModelPlane<PointT> model_plane(clicked_points_3d);
model_plane.computeModelCoefficients(clicked_points_indices,ground_coeffs);
{
ObjectModel query_object;
constructObjectModel (query_cloud, query_object);
- const GlobalDescriptorT & query_descriptor = query_object.global_descriptor->points[0];
+ const GlobalDescriptorT & query_descriptor = (*query_object.global_descriptor)[0];
std::vector<int> nn_index (1);
std::vector<float> nn_sqr_distance (1);
{
ObjectModel query_object;
constructObjectModel (query_cloud, query_object);
- const GlobalDescriptorT & query_descriptor = query_object.global_descriptor->points[0];
+ const GlobalDescriptorT & query_descriptor = (*query_object.global_descriptor)[0];
std::vector<int> nn_index (1);
std::vector<float> nn_sqr_distance (1);
}
ObjectRecognitionParameters params;
- ifstream params_stream;
+ std::ifstream params_stream;
//Parse filter parameters
std::string filter_parameters_file;
pcl::console::print_info ("Loaded %s (%lu points)\n", argv[1], input->size ());
ObjectRecognitionParameters params;
- ifstream params_stream;
+ std::ifstream params_stream;
//Parse filter parameters
std::string filter_parameters_file;
}
// Get the pair of points
- const PointT & p_left = keypoints_left->points[i];
- const PointT & p_right = keypoints_right->points[correspondences[i]];
+ const PointT & p_left = (*keypoints_left)[i];
+ const PointT & p_right = (*keypoints_right)[correspondences[i]];
// Generate a random (bright) color
double r = (rand() % 100);
pcl::io::loadPCDFile (argv[1], *query);
pcl::console::print_info ("Loaded %s (%lu points)\n", argv[1], query->size ());
- ifstream input_stream;
+ std::ifstream input_stream;
ObjectRecognitionParameters params;
// Parse the exemplar files
keypoint_detector_->setInputCloud(input);
keypoint_detector_->setSearchSurface(input);
keypoint_detector_->compute(*keypoints);
- std::cout << "OK. keypoints found: " << keypoints->points.size() << std::endl;
+ std::cout << "OK. keypoints found: " << keypoints->size() << std::endl;
}
template<typename FeatureType>
void ICCVTutorial<FeatureType>::extractDescriptors (typename pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr input, typename pcl::PointCloud<pcl::PointXYZI>::Ptr keypoints, typename pcl::PointCloud<FeatureType>::Ptr features)
{
typename pcl::PointCloud<pcl::PointXYZRGB>::Ptr kpts(new pcl::PointCloud<pcl::PointXYZRGB>);
- kpts->points.resize(keypoints->points.size());
+ kpts->points.resize(keypoints->size());
pcl::copyPointCloud(*keypoints, *kpts);
point.g = 255;
point.b = 255;
- for (std::size_t i_point = 0; i_point < testing_cloud->points.size (); i_point++)
+ for (std::size_t i_point = 0; i_point < testing_cloud->size (); i_point++)
{
- point.x = testing_cloud->points[i_point].x;
- point.y = testing_cloud->points[i_point].y;
- point.z = testing_cloud->points[i_point].z;
+ point.x = (*testing_cloud)[i_point].x;
+ point.y = (*testing_cloud)[i_point].y;
+ point.z = (*testing_cloud)[i_point].z;
colored_cloud->points.push_back (point);
}
- colored_cloud->height += testing_cloud->points.size ();
+ colored_cloud->height += testing_cloud->size ();
point.r = 255;
point.g = 0;
{
ObjectModel query_object;
constructObjectModel (query_cloud, query_object);
- const GlobalDescriptorT & query_descriptor = query_object.global_descriptor->points[0];
+ const GlobalDescriptorT & query_descriptor = (*query_object.global_descriptor)[0];
std::vector<int> nn_index (1);
std::vector<float> nn_sqr_distance (1);
{
ObjectModel query_object;
constructObjectModel (query_cloud, query_object);
- const GlobalDescriptorT & query_descriptor = query_object.global_descriptor->points[0];
+ const GlobalDescriptorT & query_descriptor = (*query_object.global_descriptor)[0];
std::vector<int> nn_index (1);
std::vector<float> nn_sqr_distance (1);
pcl::console::print_info ("Loaded %s (%lu points)\n", argv[1], input->size ());
ObjectRecognitionParameters params;
- ifstream params_stream;
+ std::ifstream params_stream;
//Parse filter parameters
std::string filter_parameters_file;
}
// Get the pair of points
- const PointT & p_left = keypoints_left->points[i];
- const PointT & p_right = keypoints_right->points[correspondences[i]];
+ const PointT & p_left = (*keypoints_left)[i];
+ const PointT & p_right = (*keypoints_right)[correspondences[i]];
// Generate a random (bright) color
double r = (rand() % 100);
pcl::io::loadPCDFile (argv[1], *query);
pcl::console::print_info ("Loaded %s (%lu points)\n", argv[1], query->size ());
- ifstream input_stream;
+ std::ifstream input_stream;
ObjectRecognitionParameters params;
// Parse the exemplar files
point.z = 1024 * rand() / (RAND_MAX + 1.0f);
}
- std::cout << "Saved " << cloud_in->points.size () << " data points to input:" << std::endl;
+ std::cout << "Saved " << cloud_in->size () << " data points to input:" << std::endl;
for (auto& point : *cloud_in)
std::cout << point << std::endl;
*cloud_out = *cloud_in;
- std::cout << "size:" << cloud_out->points.size() << std::endl;
+ std::cout << "size:" << cloud_out->size() << std::endl;
for (auto& point : *cloud_out)
point.x += 0.7f;
- std::cout << "Transformed " << cloud_in->points.size () << " data points:" << std::endl;
+ std::cout << "Transformed " << cloud_in->size () << " data points:" << std::endl;
for (auto& point : *cloud_out)
std::cout << point << std::endl;
cloud->height = 1;
cloud->points.resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
- cloud->points[i].x = 1024.0f * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024.0f * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].x = 1024.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].y = 1024.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].z = 1024.0f * rand () / (RAND_MAX + 1.0f);
}
pcl::KdTreeFLANN<pcl::PointXYZ> kdtree;
if ( kdtree.nearestKSearch (searchPoint, K, pointIdxNKNSearch, pointNKNSquaredDistance) > 0 )
{
for (std::size_t i = 0; i < pointIdxNKNSearch.size (); ++i)
- std::cout << " " << cloud->points[ pointIdxNKNSearch[i] ].x
- << " " << cloud->points[ pointIdxNKNSearch[i] ].y
- << " " << cloud->points[ pointIdxNKNSearch[i] ].z
+ std::cout << " " << (*cloud)[ pointIdxNKNSearch[i] ].x
+ << " " << (*cloud)[ pointIdxNKNSearch[i] ].y
+ << " " << (*cloud)[ pointIdxNKNSearch[i] ].z
<< " (squared distance: " << pointNKNSquaredDistance[i] << ")" << std::endl;
}
if ( kdtree.radiusSearch (searchPoint, radius, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0 )
{
for (std::size_t i = 0; i < pointIdxRadiusSearch.size (); ++i)
- std::cout << " " << cloud->points[ pointIdxRadiusSearch[i] ].x
- << " " << cloud->points[ pointIdxRadiusSearch[i] ].y
- << " " << cloud->points[ pointIdxRadiusSearch[i] ].z
+ std::cout << " " << (*cloud)[ pointIdxRadiusSearch[i] ].x
+ << " " << (*cloud)[ pointIdxRadiusSearch[i] ].y
+ << " " << (*cloud)[ pointIdxRadiusSearch[i] ].z
<< " (squared distance: " << pointRadiusSquaredDistance[i] << ")" << std::endl;
}
// 1.1 Add noise
for (std::size_t i = 0; i < noise_size; ++i)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
// 1.2 Add sphere:
double rand_x1 = 1;
rand_x2 = (rand () % 100) / (50.0f) - 1;
}
double pre_calc = sqrt (1 - pow (rand_x1, 2) - pow (rand_x2, 2));
- cloud->points[i].x = 2 * rand_x1 * pre_calc;
- cloud->points[i].y = 2 * rand_x2 * pre_calc;
- cloud->points[i].z = 1 - 2 * (pow (rand_x1, 2) + pow (rand_x2, 2));
+ (*cloud)[i].x = 2 * rand_x1 * pre_calc;
+ (*cloud)[i].y = 2 * rand_x2 * pre_calc;
+ (*cloud)[i].z = 1 - 2 * (pow (rand_x1, 2) + pow (rand_x2, 2));
rand_x1 = 1;
rand_x2 = 1;
}
std::cerr << "Cloud before filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cout << " " << cloud->points[i].x << " " << cloud->points[i].y << " " << cloud->points[i].z << std::endl;
+ for (const auto& point: *cloud)
+ std::cout << " " << point.x << " " << point.y << " " << point.z << std::endl;
// 2. filter sphere:
// 2.1 generate model:
sphere_filter.filter (*cloud_sphere_filtered);
std::cerr << "Sphere after filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud_sphere_filtered->points.size (); ++i)
- std::cout << " " << cloud_sphere_filtered->points[i].x << " " << cloud_sphere_filtered->points[i].y << " " << cloud_sphere_filtered->points[i].z
- << std::endl;
+ for (const auto& point: *cloud_sphere_filtered)
+ std::cout << " " << point.x << " " << point.y << " " << point.z << std::endl;
return (0);
}
#include <pcl/range_image/range_image.h>
#include <pcl/features/narf.h>
#include <pcl/console/parse.h>
+#include <pcl/common/file_io.h> // for getFilenameWithoutExtension
float angular_resolution = 0.5f;
int rotation_invariant = 0;
// Extract NARF features:
std::cout << "Now extracting NARFs in every image point.\n";
std::vector<std::vector<pcl::Narf*> > narfs;
- narfs.resize (range_image.points.size ());
+ narfs.resize (range_image.size ());
int last_percentage=-1;
for (unsigned int y=0; y<range_image.height; ++y)
{
for (unsigned int x=0; x<range_image.width; ++x)
{
- int index = y*range_image.width+x;
- int percentage = (int) ((100*index) / range_image.points.size ());
+ const auto index = y*range_image.width+x;
+ const auto percentage = ((100*index) / range_image.size ());
if (percentage > last_percentage)
{
std::cout << percentage<<"% "<<std::flush;
continue;
//descriptor_distances_widget.show (false);
- float* descriptor_distance_image = new float[range_image.points.size ()];
- for (unsigned int point_index=0; point_index<range_image.points.size (); ++point_index)
+ float* descriptor_distance_image = new float[range_image.size ()];
+ for (unsigned int point_index=0; point_index<range_image.size (); ++point_index)
{
float& descriptor_distance = descriptor_distance_image[point_index];
descriptor_distance = std::numeric_limits<float>::infinity ();
#include <pcl/keypoints/narf_keypoint.h>
#include <pcl/features/narf_descriptor.h>
#include <pcl/console/parse.h>
+#include <pcl/common/file_io.h> // for getFilenameWithoutExtension
typedef pcl::PointXYZ PointType;
point_cloud.points.push_back (point);
}
}
- point_cloud.width = (int) point_cloud.points.size (); point_cloud.height = 1;
+ point_cloud.width = point_cloud.size (); point_cloud.height = 1;
}
// -----------------------------------------------
pcl::PointCloud<int> keypoint_indices;
narf_keypoint_detector.compute (keypoint_indices);
- std::cout << "Found "<<keypoint_indices.points.size ()<<" key points.\n";
+ std::cout << "Found "<<keypoint_indices.size ()<<" key points.\n";
// ----------------------------------------------
// -----Show keypoints in range image widget-----
// ----------------------------------------------
- //for (std::size_t i=0; i<keypoint_indices.points.size (); ++i)
- //range_image_widget.markPoint (keypoint_indices.points[i]%range_image.width,
- //keypoint_indices.points[i]/range_image.width);
+ //for (std::size_t i=0; i<keypoint_indices.size (); ++i)
+ //range_image_widget.markPoint (keypoint_indices[i]%range_image.width,
+ //keypoint_indices[i]/range_image.width);
// -------------------------------------
// -----Show keypoints in 3D viewer-----
// -------------------------------------
pcl::PointCloud<pcl::PointXYZ>::Ptr keypoints_ptr (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>& keypoints = *keypoints_ptr;
- keypoints.points.resize (keypoint_indices.points.size ());
- for (std::size_t i=0; i<keypoint_indices.points.size (); ++i)
- keypoints.points[i].getVector3fMap () = range_image.points[keypoint_indices.points[i]].getVector3fMap ();
+ keypoints.resize (keypoint_indices.size ());
+ for (std::size_t i=0; i<keypoint_indices.size (); ++i)
+ keypoints[i].getVector3fMap () = range_image[keypoint_indices[i]].getVector3fMap ();
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> keypoints_color_handler (keypoints_ptr, 0, 255, 0);
viewer.addPointCloud<pcl::PointXYZ> (keypoints_ptr, keypoints_color_handler, "keypoints");
viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "keypoints");
// -----Extract NARF descriptors for interest points-----
// ------------------------------------------------------
std::vector<int> keypoint_indices2;
- keypoint_indices2.resize (keypoint_indices.points.size ());
+ keypoint_indices2.resize (keypoint_indices.size ());
for (unsigned int i=0; i<keypoint_indices.size (); ++i) // This step is necessary to get the right vector type
- keypoint_indices2[i]=keypoint_indices.points[i];
+ keypoint_indices2[i]=keypoint_indices[i];
pcl::NarfDescriptor narf_descriptor (&range_image, &keypoint_indices2);
narf_descriptor.getParameters ().support_size = support_size;
narf_descriptor.getParameters ().rotation_invariant = rotation_invariant;
pcl::PointCloud<pcl::Narf36> narf_descriptors;
narf_descriptor.compute (narf_descriptors);
std::cout << "Extracted "<<narf_descriptors.size ()<<" descriptors for "
- <<keypoint_indices.points.size ()<< " keypoints.\n";
+ <<keypoint_indices.size ()<< " keypoints.\n";
//--------------------
// -----Main loop-----
#include <pcl/features/range_image_border_extractor.h>
#include <pcl/keypoints/narf_keypoint.h>
#include <pcl/console/parse.h>
+#include <pcl/common/file_io.h> // for getFilenameWithoutExtension
typedef pcl::PointXYZ PointType;
point_cloud.points.push_back (point);
}
}
- point_cloud.width = (int) point_cloud.points.size (); point_cloud.height = 1;
+ point_cloud.width = point_cloud.size (); point_cloud.height = 1;
}
// -----------------------------------------------
pcl::PointCloud<int> keypoint_indices;
narf_keypoint_detector.compute (keypoint_indices);
- std::cout << "Found "<<keypoint_indices.points.size ()<<" key points.\n";
+ std::cout << "Found "<<keypoint_indices.size ()<<" key points.\n";
// ----------------------------------------------
// -----Show keypoints in range image widget-----
// ----------------------------------------------
- //for (std::size_t i=0; i<keypoint_indices.points.size (); ++i)
- //range_image_widget.markPoint (keypoint_indices.points[i]%range_image.width,
- //keypoint_indices.points[i]/range_image.width);
+ //for (std::size_t i=0; i<keypoint_indices.size (); ++i)
+ //range_image_widget.markPoint (keypoint_indices[i]%range_image.width,
+ //keypoint_indices[i]/range_image.width);
// -------------------------------------
// -----Show keypoints in 3D viewer-----
// -------------------------------------
pcl::PointCloud<pcl::PointXYZ>::Ptr keypoints_ptr (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>& keypoints = *keypoints_ptr;
- keypoints.points.resize (keypoint_indices.points.size ());
- for (std::size_t i=0; i<keypoint_indices.points.size (); ++i)
- keypoints.points[i].getVector3fMap () = range_image.points[keypoint_indices.points[i]].getVector3fMap ();
+ keypoints.resize (keypoint_indices.size ());
+ for (std::size_t i=0; i<keypoint_indices.size (); ++i)
+ keypoints[i].getVector3fMap () = range_image[keypoint_indices[i]].getVector3fMap ();
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> keypoints_color_handler (keypoints_ptr, 0, 255, 0);
viewer.addPointCloud<pcl::PointXYZ> (keypoints_ptr, keypoints_color_handler, "keypoints");
cloudA->height = 1;
cloudA->points.resize (cloudA->width * cloudA->height);
- for (std::size_t i = 0; i < cloudA->points.size (); ++i)
+ for (std::size_t i = 0; i < cloudA->size (); ++i)
{
- cloudA->points[i].x = 64.0f * rand () / (RAND_MAX + 1.0f);
- cloudA->points[i].y = 64.0f * rand () / (RAND_MAX + 1.0f);
- cloudA->points[i].z = 64.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloudA)[i].x = 64.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloudA)[i].y = 64.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloudA)[i].z = 64.0f * rand () / (RAND_MAX + 1.0f);
}
// Add points from cloudA to octree
cloudB->height = 1;
cloudB->points.resize (cloudB->width * cloudB->height);
- for (std::size_t i = 0; i < cloudB->points.size (); ++i)
+ for (std::size_t i = 0; i < cloudB->size (); ++i)
{
- cloudB->points[i].x = 64.0f * rand () / (RAND_MAX + 1.0f);
- cloudB->points[i].y = 64.0f * rand () / (RAND_MAX + 1.0f);
- cloudB->points[i].z = 64.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloudB)[i].x = 64.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloudB)[i].y = 64.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloudB)[i].z = 64.0f * rand () / (RAND_MAX + 1.0f);
}
// Add points from cloudB to octree
std::cout << "Output from getPointIndicesFromNewVoxels:" << std::endl;
for (std::size_t i = 0; i < newPointIdxVector.size (); ++i)
std::cout << i << "# Index:" << newPointIdxVector[i]
- << " Point:" << cloudB->points[newPointIdxVector[i]].x << " "
- << cloudB->points[newPointIdxVector[i]].y << " "
- << cloudB->points[newPointIdxVector[i]].z << std::endl;
+ << " Point:" << (*cloudB)[newPointIdxVector[i]].x << " "
+ << (*cloudB)[newPointIdxVector[i]].y << " "
+ << (*cloudB)[newPointIdxVector[i]].z << std::endl;
}
cloud->height = 1;
cloud->points.resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
- cloud->points[i].x = 1024.0f * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024.0f * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].x = 1024.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].y = 1024.0f * rand () / (RAND_MAX + 1.0f);
+ (*cloud)[i].z = 1024.0f * rand () / (RAND_MAX + 1.0f);
}
float resolution = 128.0f;
<< std::endl;
for (std::size_t i = 0; i < pointIdxVec.size (); ++i)
- std::cout << " " << cloud->points[pointIdxVec[i]].x
- << " " << cloud->points[pointIdxVec[i]].y
- << " " << cloud->points[pointIdxVec[i]].z << std::endl;
+ std::cout << " " << (*cloud)[pointIdxVec[i]].x
+ << " " << (*cloud)[pointIdxVec[i]].y
+ << " " << (*cloud)[pointIdxVec[i]].z << std::endl;
}
// K nearest neighbor search
if (octree.nearestKSearch (searchPoint, K, pointIdxNKNSearch, pointNKNSquaredDistance) > 0)
{
for (std::size_t i = 0; i < pointIdxNKNSearch.size (); ++i)
- std::cout << " " << cloud->points[ pointIdxNKNSearch[i] ].x
- << " " << cloud->points[ pointIdxNKNSearch[i] ].y
- << " " << cloud->points[ pointIdxNKNSearch[i] ].z
+ std::cout << " " << (*cloud)[ pointIdxNKNSearch[i] ].x
+ << " " << (*cloud)[ pointIdxNKNSearch[i] ].y
+ << " " << (*cloud)[ pointIdxNKNSearch[i] ].z
<< " (squared distance: " << pointNKNSquaredDistance[i] << ")" << std::endl;
}
if (octree.radiusSearch (searchPoint, radius, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0)
{
for (std::size_t i = 0; i < pointIdxRadiusSearch.size (); ++i)
- std::cout << " " << cloud->points[ pointIdxRadiusSearch[i] ].x
- << " " << cloud->points[ pointIdxRadiusSearch[i] ].y
- << " " << cloud->points[ pointIdxRadiusSearch[i] ].z
+ std::cout << " " << (*cloud)[ pointIdxRadiusSearch[i] ].x
+ << " " << (*cloud)[ pointIdxRadiusSearch[i] ].y
+ << " " << (*cloud)[ pointIdxRadiusSearch[i] ].z
<< " (squared distance: " << pointRadiusSquaredDistance[i] << ")" << std::endl;
}
/* ---[ */
#include <iostream>
-using namespace std;
#include <pcl/io/openni_grabber.h>
#include <pcl/range_image/range_image_planar.h>
#include <pcl/common/common_headers.h>
double keypoint_extraction_start_time = pcl::getTime();
narf_keypoint_detector.compute (keypoint_indices);
double keypoint_extraction_time = pcl::getTime()-keypoint_extraction_start_time;
- std::cout << "Found "<<keypoint_indices.points.size ()<<" key points. "
+ std::cout << "Found "<<keypoint_indices.size ()<<" key points. "
<< "This took "<<1000.0*keypoint_extraction_time<<"ms.\n";
// ----------------------------------------------
// -----Show keypoints in range image widget-----
// ----------------------------------------------
range_image_widget.showRangeImage (range_image_planar, 0.5f, 10.0f);
- //for (std::size_t i=0; i<keypoint_indices.points.size (); ++i)
- //range_image_widget.markPoint (keypoint_indices.points[i]%range_image_planar.width,
- //keypoint_indices.points[i]/range_image_planar.width,
+ //for (std::size_t i=0; i<keypoint_indices.size (); ++i)
+ //range_image_widget.markPoint (keypoint_indices[i]%range_image_planar.width,
+ //keypoint_indices[i]/range_image_planar.width,
//pcl::visualization::Vector3ub (0,255,0));
// -------------------------------------
if (!viewer.updatePointCloud<pcl::PointWithRange> (range_image_planar_ptr, color_handler_cloud, "range image"))
viewer.addPointCloud<pcl::PointWithRange> (range_image_planar_ptr, color_handler_cloud, "range image");
- keypoints_cloud.points.resize (keypoint_indices.points.size ());
- for (std::size_t i=0; i<keypoint_indices.points.size (); ++i)
- keypoints_cloud.points[i].getVector3fMap () =
- range_image_planar.points[keypoint_indices.points[i]].getVector3fMap ();
+ keypoints_cloud.resize (keypoint_indices.size ());
+ for (std::size_t i=0; i<keypoint_indices.size (); ++i)
+ keypoints_cloud[i].getVector3fMap () =
+ range_image_planar[keypoint_indices[i]].getVector3fMap ();
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> color_handler_keypoints
(keypoints_cloud_ptr, 0, 255, 0);
if (!viewer.updatePointCloud (keypoints_cloud_ptr, color_handler_keypoints, "keypoints"))
/* ---[ */
#include <iostream>
-using namespace std;
#include <pcl/io/openni_grabber.h>
#include <pcl/range_image/range_image_planar.h>
#include <pcl/common/common_headers.h>
showCloudsLeft(source, target);
PointCloud::Ptr temp (new PointCloud);
- PCL_INFO ("Aligning %s (%d) with %s (%d).\n", data[i-1].f_name.c_str (), source->points.size (), data[i].f_name.c_str (), target->points.size ());
+ PCL_INFO ("Aligning %s (%zu) with %s (%zu).\n", data[i-1].f_name.c_str (), static_cast<std::size_t>(source->size ()), data[i].f_name.c_str (), static_cast<std::size_t>(target->size ()));
pairAlign (source, target, temp, pairTransform, true);
//transform current pair into the global transform
cloud->height = 1;
cloud->points.resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (auto& point: *cloud)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.x = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.y = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.z = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cerr << "Cloud before filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cerr << " " << cloud->points[i].x << " "
- << cloud->points[i].y << " "
- << cloud->points[i].z << std::endl;
+ for (const auto& point: *cloud)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
// Create the filtering object
pcl::PassThrough<pcl::PointXYZ> pass;
pass.filter (*cloud_filtered);
std::cerr << "Cloud after filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud_filtered->points.size (); ++i)
- std::cerr << " " << cloud_filtered->points[i].x << " "
- << cloud_filtered->points[i].y << " "
- << cloud_filtered->points[i].z << std::endl;
+ for (const auto& point: *cloud_filtered)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
return (0);
}
<< cloud->width * cloud->height
<< " data points from test_pcd.pcd with the following fields: "
<< std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cout << " " << cloud->points[i].x
- << " " << cloud->points[i].y
- << " " << cloud->points[i].z << std::endl;
+ for (const auto& point: *cloud)
+ std::cout << " " << point.x
+ << " " << point.y
+ << " " << point.z << std::endl;
return (0);
}
cloud.is_dense = false;
cloud.points.resize (cloud.width * cloud.height);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (auto& point: cloud)
{
- cloud.points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud.points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud.points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.x = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.y = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.z = 1024 * rand () / (RAND_MAX + 1.0f);
}
pcl::io::savePCDFileASCII ("test_pcd.pcd", cloud);
- std::cerr << "Saved " << cloud.points.size () << " data points to test_pcd.pcd." << std::endl;
+ std::cerr << "Saved " << cloud.size () << " data points to test_pcd.pcd." << std::endl;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
- std::cerr << " " << cloud.points[i].x << " " << cloud.points[i].y << " " << cloud.points[i].z << std::endl;
+ for (const auto& point: cloud)
+ std::cerr << " " << point.x << " " << point.y << " " << point.z << std::endl;
return (0);
-}
\ No newline at end of file
+}
#include<utility>
#include<cmath> //for std::abs()
-using namespace std;
using namespace pcl::visualization;
void
//------------------------------------
//-----Add shapes at cloud points-----
//------------------------------------
- viewer->addLine<pcl::PointXYZRGB> (cloud->points[0],
- cloud->points[cloud->size() - 1], "line");
- viewer->addSphere (cloud->points[0], 0.2, 0.5, 0.5, 0.0, "sphere");
+ viewer->addLine<pcl::PointXYZRGB> ((*cloud)[0],
+ (*cloud)[cloud->size() - 1], "line");
+ viewer->addSphere ((*cloud)[0], 0.2, 0.5, 0.5, 0.0, "sphere");
//---------------------------------------
//-----Add shapes at other locations-----
b += 12;
}
}
- basic_cloud_ptr->width = (int) basic_cloud_ptr->points.size ();
+ basic_cloud_ptr->width = basic_cloud_ptr->size ();
basic_cloud_ptr->height = 1;
- point_cloud_ptr->width = (int) point_cloud_ptr->points.size ();
+ point_cloud_ptr->width = point_cloud_ptr->size ();
point_cloud_ptr->height = 1;
// ----------------------------------------------------------------
cloud->points.resize (cloud->width * cloud->height);
// Generate the data
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (auto& point: *cloud)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1.0;
+ point.x = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.y = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.z = 1.0;
}
// Set a few outliers
- cloud->points[0].z = 2.0;
- cloud->points[3].z = -2.0;
- cloud->points[6].z = 4.0;
+ (*cloud)[0].z = 2.0;
+ (*cloud)[3].z = -2.0;
+ (*cloud)[6].z = 4.0;
- std::cerr << "Point cloud data: " << cloud->points.size () << " points" << std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cerr << " " << cloud->points[i].x << " "
- << cloud->points[i].y << " "
- << cloud->points[i].z << std::endl;
+ std::cerr << "Point cloud data: " << cloud->size () << " points" << std::endl;
+ for (const auto& point: *cloud)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
std::cerr << "Model inliers: " << inliers->indices.size () << std::endl;
for (std::size_t i = 0; i < inliers->indices.size (); ++i)
- std::cerr << inliers->indices[i] << " " << cloud->points[inliers->indices[i]].x << " "
- << cloud->points[inliers->indices[i]].y << " "
- << cloud->points[inliers->indices[i]].z << std::endl;
+ for (const auto& idx: inliers->indices)
+ std::cerr << idx << " " << cloud->points[idx].x << " "
+ << cloud->points[idx].y << " "
+ << cloud->points[idx].z << std::endl;
return (0);
}
cloud->height = 1;
cloud->points.resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (auto& point: *cloud)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.x = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.y = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.z = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cerr << "Cloud before projection: " << std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cerr << " " << cloud->points[i].x << " "
- << cloud->points[i].y << " "
- << cloud->points[i].z << std::endl;
+ for (const auto& point: *cloud)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
// Create a set of planar coefficients with X=Y=0,Z=1
pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients ());
proj.filter (*cloud_projected);
std::cerr << "Cloud after projection: " << std::endl;
- for (std::size_t i = 0; i < cloud_projected->points.size (); ++i)
- std::cerr << " " << cloud_projected->points[i].x << " "
- << cloud_projected->points[i].y << " "
- << cloud_projected->points[i].z << std::endl;
+ for (const auto& point: *cloud_projected)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
return (0);
}
cloud_->resize (500);
// Fill the cloud with random points
- for (std::size_t i = 0; i < cloud_->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_->size (); ++i)
{
- cloud_->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud_->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud_)[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud_)[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
+ (*cloud_)[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
// Set up the QVTK window
switch (filtering_axis_)
{
case 0: // x
- min = cloud_->points[0].x;
- max = cloud_->points[0].x;
+ min = (*cloud_)[0].x;
+ max = (*cloud_)[0].x;
break;
case 1: // y
- min = cloud_->points[0].y;
- max = cloud_->points[0].y;
+ min = (*cloud_)[0].y;
+ max = (*cloud_)[0].y;
break;
default: // z
- min = cloud_->points[0].z;
- max = cloud_->points[0].z;
+ min = (*cloud_)[0].z;
+ max = (*cloud_)[0].z;
break;
}
blue = 128;
// Fill the cloud with some points
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (auto& point: *cloud)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.x = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.y = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.z = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].r = red;
- cloud->points[i].g = green;
- cloud->points[i].b = blue;
+ point.r = red;
+ point.g = green;
+ point.b = blue;
}
// Set up the QVTK window
printf ("Random button was pressed\n");
// Set the new color
- for (std::size_t i = 0; i < cloud->size(); i++)
+ for (auto& point: *cloud)
{
- cloud->points[i].r = 255 *(1024 * rand () / (RAND_MAX + 1.0f));
- cloud->points[i].g = 255 *(1024 * rand () / (RAND_MAX + 1.0f));
- cloud->points[i].b = 255 *(1024 * rand () / (RAND_MAX + 1.0f));
+ point.r = 255 *(1024 * rand () / (RAND_MAX + 1.0f));
+ point.g = 255 *(1024 * rand () / (RAND_MAX + 1.0f));
+ point.b = 255 *(1024 * rand () / (RAND_MAX + 1.0f));
}
viewer->updatePointCloud (cloud, "cloud");
PCLViewer::RGBsliderReleased ()
{
// Set the new color
- for (std::size_t i = 0; i < cloud->size (); i++)
+ for (auto& point: *cloud)
{
- cloud->points[i].r = red;
- cloud->points[i].g = green;
- cloud->points[i].b = blue;
+ point.r = red;
+ point.g = green;
+ point.b = blue;
}
viewer->updatePointCloud (cloud, "cloud");
ui->qvtkWidget->update ();
cloud->height = 1;
cloud->points.resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (auto& point: *cloud)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.x = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.y = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.z = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cerr << "Cloud before filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cerr << " " << cloud->points[i].x << " "
- << cloud->points[i].y << " "
- << cloud->points[i].z << std::endl;
+ for (const auto& point: *cloud)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
// build the filter
pcl::RadiusOutlierRemoval<pcl::PointXYZ> outrem;
outrem.setInputCloud(cloud);
// display pointcloud after filtering
std::cerr << "Cloud after filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud_filtered->points.size (); ++i)
- std::cerr << " " << cloud_filtered->points[i].x << " "
- << cloud_filtered->points[i].y << " "
- << cloud_filtered->points[i].z << std::endl;
+ for (const auto& point: *cloud_filtered)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
return (0);
}
cloud->height = 1;
cloud->is_dense = false;
cloud->points.resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (pcl::index_t i = 0; i < cloud->size (); ++i)
{
if (pcl::console::find_argument (argc, argv, "-s") >= 0 || pcl::console::find_argument (argc, argv, "-sf") >= 0)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0);
+ (*cloud)[i].x = 1024 * rand () / (RAND_MAX + 1.0);
+ (*cloud)[i].y = 1024 * rand () / (RAND_MAX + 1.0);
if (i % 5 == 0)
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0);
+ (*cloud)[i].z = 1024 * rand () / (RAND_MAX + 1.0);
else if(i % 2 == 0)
- cloud->points[i].z = sqrt( 1 - (cloud->points[i].x * cloud->points[i].x)
- - (cloud->points[i].y * cloud->points[i].y));
+ (*cloud)[i].z = sqrt( 1 - ((*cloud)[i].x * (*cloud)[i].x)
+ - ((*cloud)[i].y * (*cloud)[i].y));
else
- cloud->points[i].z = - sqrt( 1 - (cloud->points[i].x * cloud->points[i].x)
- - (cloud->points[i].y * cloud->points[i].y));
+ (*cloud)[i].z = - sqrt( 1 - ((*cloud)[i].x * (*cloud)[i].x)
+ - ((*cloud)[i].y * (*cloud)[i].y));
}
else
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0);
+ (*cloud)[i].x = 1024 * rand () / (RAND_MAX + 1.0);
+ (*cloud)[i].y = 1024 * rand () / (RAND_MAX + 1.0);
if( i % 2 == 0)
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0);
+ (*cloud)[i].z = 1024 * rand () / (RAND_MAX + 1.0);
else
- cloud->points[i].z = -1 * (cloud->points[i].x + cloud->points[i].y);
+ (*cloud)[i].z = -1 * ((*cloud)[i].x + (*cloud)[i].y);
}
}
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/features/range_image_border_extractor.h>
#include <pcl/console/parse.h>
+#include <pcl/common/file_io.h> // for getFilenameWithoutExtension
typedef pcl::PointXYZ PointType;
point_cloud.points.push_back (point);
}
}
- point_cloud.width = (int) point_cloud.points.size (); point_cloud.height = 1;
+ point_cloud.width = point_cloud.size (); point_cloud.height = 1;
}
// -----------------------------------------------
{
for (int x=0; x< (int)range_image.width; ++x)
{
- if (border_descriptions.points[y*range_image.width + x].traits[pcl::BORDER_TRAIT__OBSTACLE_BORDER])
- border_points.points.push_back (range_image.points[y*range_image.width + x]);
- if (border_descriptions.points[y*range_image.width + x].traits[pcl::BORDER_TRAIT__VEIL_POINT])
- veil_points.points.push_back (range_image.points[y*range_image.width + x]);
- if (border_descriptions.points[y*range_image.width + x].traits[pcl::BORDER_TRAIT__SHADOW_BORDER])
- shadow_points.points.push_back (range_image.points[y*range_image.width + x]);
+ if (border_descriptions[y*range_image.width + x].traits[pcl::BORDER_TRAIT__OBSTACLE_BORDER])
+ border_points.points.push_back (range_image[y*range_image.width + x]);
+ if (border_descriptions[y*range_image.width + x].traits[pcl::BORDER_TRAIT__VEIL_POINT])
+ veil_points.points.push_back (range_image[y*range_image.width + x]);
+ if (border_descriptions[y*range_image.width + x].traits[pcl::BORDER_TRAIT__SHADOW_BORDER])
+ shadow_points.points.push_back (range_image[y*range_image.width + x]);
}
}
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> border_points_color_handler (border_points_ptr, 0, 255, 0);
pointCloud.points.push_back(point);
}
}
- pointCloud.width = (std::uint32_t) pointCloud.points.size();
+ pointCloud.width = pointCloud.size();
pointCloud.height = 1;
// We now want to create a range image from the above point cloud, with a 1deg angular resolution
point_cloud.points.push_back (point);
}
}
- point_cloud.width = (int) point_cloud.points.size (); point_cloud.height = 1;
+ point_cloud.width = point_cloud.size (); point_cloud.height = 1;
}
// -----------------------------------------------
#include <pcl/visualization/pcl_visualizer.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
saveTransform (const std::string &file, const Eigen::Matrix4d &transform)
{
ofstream ofs;
- ofs.open (file.c_str (), ios::trunc | ios::binary);
+ ofs.open (file.c_str (), std::ios::trunc | std::ios::binary);
for (int i = 0; i < 4; ++i)
for (int j = 0; j < 4; ++j)
ofs.write (reinterpret_cast<const char*>(&transform (i, j)), sizeof (double));
#include <pcl/registration/correspondence_rejection_distance.h>
#include <pcl/registration/transformation_estimation_svd.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
keypoints_tgt (new PointCloud<PointXYZ>);
estimateKeypoints (src, tgt, *keypoints_src, *keypoints_tgt);
- print_info ("Found %lu and %lu keypoints for the source and target datasets.\n", keypoints_src->points.size (), keypoints_tgt->points.size ());
+ print_info ("Found %zu and %zu keypoints for the source and target datasets.\n", static_cast<std::size_t>(keypoints_src->size ()), static_cast<std::size_t>(keypoints_tgt->size ()));
// Compute normals for all points keypoint
PointCloud<Normal>::Ptr normals_src (new PointCloud<Normal>),
normals_tgt (new PointCloud<Normal>);
estimateNormals (src, tgt, *normals_src, *normals_tgt);
- print_info ("Estimated %lu and %lu normals for the source and target datasets.\n", normals_src->points.size (), normals_tgt->points.size ());
+ print_info ("Estimated %zu and %zu normals for the source and target datasets.\n", static_cast<std::size_t>(normals_src->size ()), static_cast<std::size_t>(normals_tgt->size ()));
// Compute FPFH features at each keypoint
PointCloud<FPFHSignature33>::Ptr fpfhs_src (new PointCloud<FPFHSignature33>),
// Fill in the cloud data
cloud->width = 5;
cloud->height = 1;
- cloud->points.resize (cloud->width * cloud->height);
+ cloud->resize (cloud->width * cloud->height);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (auto& point: *cloud)
{
- cloud->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
- cloud->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.x = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.y = 1024 * rand () / (RAND_MAX + 1.0f);
+ point.z = 1024 * rand () / (RAND_MAX + 1.0f);
}
if (strcmp(argv[1], "-r") == 0){
outrem.setInputCloud(cloud);
outrem.setRadiusSearch(0.8);
outrem.setMinNeighborsInRadius (2);
+ outrem.setKeepOrganized(true);
// apply filter
outrem.filter (*cloud_filtered);
}
exit(0);
}
std::cerr << "Cloud before filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
- std::cerr << " " << cloud->points[i].x << " "
- << cloud->points[i].y << " "
- << cloud->points[i].z << std::endl;
+ for (const auto& point: *cloud)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
// display pointcloud after filtering
std::cerr << "Cloud after filtering: " << std::endl;
- for (std::size_t i = 0; i < cloud_filtered->points.size (); ++i)
- std::cerr << " " << cloud_filtered->points[i].x << " "
- << cloud_filtered->points[i].y << " "
- << cloud_filtered->points[i].z << std::endl;
+ for (const auto& point: *cloud_filtered)
+ std::cerr << " " << point.x << " "
+ << point.y << " "
+ << point.z << std::endl;
return (0);
}
{
ParticleFilter::PointCloudStatePtr particles = tracker_->getParticles ();
if (particles && new_cloud_)
- {
+ {
//Set pointCloud with particle's points
pcl::PointCloud<pcl::PointXYZ>::Ptr particle_cloud (new pcl::PointCloud<pcl::PointXYZ> ());
- for (std::size_t i = 0; i < particles->points.size (); i++)
+ for (const auto& particle: *particles)
{
pcl::PointXYZ point;
- point.x = particles->points[i].x;
- point.y = particles->points[i].y;
- point.z = particles->points[i].z;
- particle_cloud->points.push_back (point);
+ point.x = particle.x;
+ point.y = particle.y;
+ point.z = particle.z;
+ particle_cloud->push_back (point);
}
//Draw red particles
for (std::size_t i = 0; i < fields[vfh_idx].count; ++i)
{
- vfh.second[i] = point.points[0].histogram[i];
+ vfh.second[i] = point[0].histogram[i];
}
vfh.first = path.string ();
return (true);
for (std::size_t i = 0; i < fields[vfh_idx].count; ++i)
{
- vfh.second[i] = point.points[0].histogram[i];
+ vfh.second[i] = point[0].histogram[i];
}
vfh.first = path.string ();
return (true);
bool
loadFileList (std::vector<vfh_model> &models, const std::string &filename)
{
- ifstream fs;
+ std::ifstream fs;
fs.open (filename.c_str ());
if (!fs.is_open () || fs.fail ())
return (false);
std::string line;
while (!fs.eof ())
{
- getline (fs, line);
+ std::getline (fs, line);
if (line.empty ())
continue;
vfh_model m;
pcl::PointCloud<pcl::PointXYZ> cloud_xyz;
pcl::fromPCLPointCloud2 (cloud, cloud_xyz);
- if (cloud_xyz.points.size () == 0)
+ if (cloud_xyz.size () == 0)
break;
pcl::console::print_info ("[done, ");
- pcl::console::print_value ("%d", (int)cloud_xyz.points.size ());
+ pcl::console::print_value ("%zu", static_cast<std::size_t>(cloud_xyz.size ()));
pcl::console::print_info (" points]\n");
pcl::console::print_info ("Available dimensions: ");
pcl::console::print_value ("%s\n", pcl::getFieldsList (cloud).c_str ());
cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
-This is mandatory for cmake, and since we are making very basic
+This is mandatory for cmake, and since we are making a very basic
project we don't need features from cmake 2.8 or higher.
.. code-block:: cmake
find_package(PCL 1.3 REQUIRED COMPONENTS common io)
We are requesting to find the PCL package at minimum version 1.3. We
-also says that it is ``REQUIRED`` meaning that cmake will fail
+also say that it is ``REQUIRED`` meaning that cmake will fail
gracefully if it can't be found. As PCL is modular one can request:
* only one component: find_package(PCL 1.3 REQUIRED COMPONENTS io)
target_link_libraries(pcd_write_test ${PCL_LIBRARIES})
-The executable we are building makes call to PCL functions. So far, we
-have only included the PCL headers so the compilers knows about the
-methods we are calling. We need also to make the linker knows about
-the libraries we are linking against. As said before the, PCL
+The executable we are building makes calls to PCL functions. So far, we
+have only included the PCL headers so the compiler knows about the
+methods we are calling. We need also to make the linker know about
+the libraries we are linking against. As said before, the PCL
found libraries are referred to using ``PCL_LIBRARIES`` variable, all
that remains is to trigger the link operation which we do calling
``target_link_libraries()`` macro.
PCLConfig.cmake uses a CMake special feature named `EXPORT` which
allows for using others' projects targets as if you built them
yourself. When you are using such targets they are called `imported
-targets` and acts just like any other target.
+targets` and act just like any other target.
Compiling and running the project
---------------------------------
- ``Where to build the binaries`` : this is where the Visual Studio project files will be generated
Then, click ``Configure``. You will be prompted for a generator/compiler. Then click the ``Generate``
-button. If there is no errors, the project files will be generated into the ``Where to build the binaries``
+button. If there are no errors, the project files will be generated into the ``Where to build the binaries``
folder.
Open the sln file, and build your project!
Weird installations
-------------------
CMake has a list of default searchable paths where it seeks for
-FindXXX.cmake or XXXConfig.cmake. If you happen to install in some non
-obvious repository (let us say in `Documents` for evils) then you can
+FindXXX.cmake or XXXConfig.cmake. If you happen to install in some
+non-obvious repository (let us say in `Documents` for evils) then you can
help cmake find PCLConfig.cmake adding this line:
.. code-block:: cmake
// Compute the features
vfh.compute (*vfhs);
- // vfhs->points.size () should be of size 1*
+ // vfhs->size () should be of size 1*
}
Visualizing VFH signatures
{
float id = k_indices.at (n_id);
float dist = sqrt (k_distances.at (n_id));
- float intensity_dist = std::abs (cloud->points[point_id].intensity - cloud->points[id].intensity);
+ float intensity_dist = std::abs ((*cloud)[point_id].intensity - (*cloud)[id].intensity);
float w_a = G (dist, sigma_s);
float w_b = G (intensity_dist, sigma_r);
float weight = w_a * w_b;
- BF += weight * cloud->points[id].intensity;
+ BF += weight * (*cloud)[id].intensity;
W += weight;
}
- outcloud.points[point_id].intensity = BF / W;
+ outcloud[point_id].intensity = BF / W;
}
// Save filtered output
:pcl:`PCL_XYZ_POINT_TYPES<PCL_XYZ_POINT_TYPES>` for more information).
By looking closer at the code presented in :ref:`bilateral_filter_example`, we
-notice constructs such as `cloud->points[point_id].intensity`. This indicates
+notice constructs such as `(*cloud)[point_id].intensity`. This indicates
that our filter expects the presence of an **intensity** field in the point
type. Because of this, using **PCL_XYZ_POINT_TYPES** won't work, as not all the
types defined there have intensity data present. In fact, it's easy to notice
{
double id = indices[n_id];
double dist = std::sqrt (distances[n_id]);
- double intensity_dist = std::abs (input_->points[pid].intensity - input_->points[id].intensity);
+ double intensity_dist = std::abs ((*input_)[pid].intensity - (*input_)[id].intensity);
double weight = kernel (dist, sigma_s_) * kernel (intensity_dist, sigma_r_);
- BF += weight * input_->points[id].intensity;
+ BF += weight * (*input_)[id].intensity;
W += weight;
}
return (BF / W);
output = *input_;
- for (std::size_t point_id = 0; point_id < input_->points.size (); ++point_id)
+ for (std::size_t point_id = 0; point_id < input_->size (); ++point_id)
{
tree_->radiusSearch (point_id, sigma_s_ * 2, k_indices, k_distances);
- output.points[point_id].intensity = computePointWeight (point_id, k_indices, k_distances);
+ output[point_id].intensity = computePointWeight (point_id, k_indices, k_distances);
}
}
{
double id = indices[n_id];
double dist = std::sqrt (distances[n_id]);
- double intensity_dist = std::abs (input_->points[pid].intensity - input_->points[id].intensity);
+ double intensity_dist = std::abs ((*input_)[pid].intensity - (*input_)[id].intensity);
double weight = kernel (dist, sigma_s_) * kernel (intensity_dist, sigma_r_);
- BF += weight * input_->points[id].intensity;
+ BF += weight * (*input_)[id].intensity;
W += weight;
}
return (BF / W);
output = *input_;
- for (std::size_t point_id = 0; point_id < input_->points.size (); ++point_id)
+ for (std::size_t point_id = 0; point_id < input_->size (); ++point_id)
{
tree_->radiusSearch (point_id, sigma_s_ * 2, k_indices, k_distances);
- output.points[point_id].intensity = computePointWeight (point_id, k_indices, k_distances);
+ output[point_id].intensity = computePointWeight (point_id, k_indices, k_distances);
}
}
{
double id = indices[n_id];
double dist = std::sqrt (distances[n_id]);
- double intensity_dist = std::abs (input_->points[pid].intensity - input_->points[id].intensity);
+ double intensity_dist = std::abs ((*input_)[pid].intensity - (*input_)[id].intensity);
double weight = kernel (dist, sigma_s_) * kernel (intensity_dist, sigma_r_);
- BF += weight * input_->points[id].intensity;
+ BF += weight * (*input_)[id].intensity;
W += weight;
}
return (BF / W);
{
tree_->radiusSearch ((*indices_)[i], sigma_s_ * 2, k_indices, k_distances);
- output.points[(*indices_)[i]].intensity = computePointWeight ((*indices_)[i], k_indices, k_distances);
+ output[(*indices_)[i]].intensity = computePointWeight ((*indices_)[i], k_indices, k_distances);
}
}
{
double id = indices[n_id];
// Compute the difference in intensity
- double intensity_dist = std::abs (input_->points[pid].intensity - input_->points[id].intensity);
+ double intensity_dist = std::abs ((*input_)[pid].intensity - (*input_)[id].intensity);
// Compute the Gaussian intensity weights both in Euclidean and in intensity space
double dist = std::sqrt (distances[n_id]);
double weight = kernel (dist, sigma_s_) * kernel (intensity_dist, sigma_r_);
// Calculate the bilateral filter response
- BF += weight * input_->points[id].intensity;
+ BF += weight * (*input_)[id].intensity;
W += weight;
}
return (BF / W);
tree_->radiusSearch ((*indices_)[i], sigma_s_ * 2, k_indices, k_distances);
// Overwrite the intensity value with the computed average
- output.points[(*indices_)[i]].intensity = computePointWeight ((*indices_)[i], k_indices, k_distances);
+ output[(*indices_)[i]].intensity = computePointWeight ((*indices_)[i], k_indices, k_distances);
}
}
CloudType::Ptr cloud2(new CloudType);
copyPointCloud(*cloud, *cloud2);
- CloudType::PointType p_retrieved = cloud2->points[0];
+ CloudType::PointType p_retrieved = (*cloud2)[0];
std::cout << p_retrieved.x << " " << p_retrieved.y << " " << p_retrieved.z << std::endl;
}
CloudType2::Ptr cloud2(new CloudType2);
copyPointCloud(*cloud, *cloud2);
- CloudType2::PointType p_retrieved = cloud2->points[0];
+ CloudType2::PointType p_retrieved = (*cloud2)[0];
std::cout << p_retrieved.x << " " << p_retrieved.y << " " << p_retrieved.z << std::endl;
}
CloudType2::Ptr cloud2(new CloudType2);
copyPointCloud(*cloud, *cloud2);
- CloudType2::PointType p_retrieved = cloud2->points[0];
+ CloudType2::PointType p_retrieved = (*cloud2)[0];
std::cout << p_retrieved.x << " " << p_retrieved.y << " " << p_retrieved.z << std::endl;
}
#endif
using namespace pcl;
-using namespace std;
using PointT = pcl::PointXYZRGB;
using PointNT = pcl::PointNormal;
}
///The file to read from.
- string infile = argv[1];
+ std::string infile = argv[1];
///The file to output to.
- string outfile = argv[2];
+ std::string outfile = argv[2];
// Load cloud in blob format
pcl::PCLPointCloud2 blob;
doncloud = doncloud_filtered;
// Save filtered output
- std::cout << "Filtered Pointcloud: " << doncloud->points.size () << " data points." << std::endl;
+ std::cout << "Filtered Pointcloud: " << doncloud->size () << " data points." << std::endl;
std::stringstream ss;
ss << outfile.substr(0,outfile.length()-4) << "_threshold_"<< threshold << "_.pcd";
writer.write<PointOutT> (ss.str (), *doncloud, false);
{
pcl::PointCloud<PointOutT>::Ptr cloud_cluster_don (new pcl::PointCloud<PointOutT>);
for (const int &index : it->indices){
- cloud_cluster_don->points.push_back (doncloud->points[index]);
+ cloud_cluster_don->points.push_back ((*doncloud)[index]);
}
- cloud_cluster_don->width = int (cloud_cluster_don->points.size ());
+ cloud_cluster_don->width = cloud_cluster_don->size ();
cloud_cluster_don->height = 1;
cloud_cluster_don->is_dense = true;
- std::cout << "PointCloud representing the Cluster: " << cloud_cluster_don->points.size () << " data points." << std::endl;
+ std::cout << "PointCloud representing the Cluster: " << cloud_cluster_don->size () << " data points." << std::endl;
std::stringstream ss;
ss << outfile.substr(0,outfile.length()-4) << "_threshold_"<< threshold << "_cluster_" << j << ".pcd";
writer.write<PointOutT> (ss.str (), *cloud_cluster_don, false);
return (-1);
}
- std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;
+ std::cout << "Loaded " << cloud->size () << " points." << std::endl;
// Compute the normals
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
// Actually compute the spin images
fpfh_estimation.compute (*pfh_features);
- std::cout << "output points.size (): " << pfh_features->points.size () << std::endl;
+ std::cout << "output size (): " << pfh_features->size () << std::endl;
// Display and retrieve the shape context descriptor vector for the 0th point.
- pcl::FPFHSignature33 descriptor = pfh_features->points[0];
+ pcl::FPFHSignature33 descriptor = (*pfh_features)[0];
std::cout << descriptor << std::endl;
return 0;
return -1;
}
- std::cout << "points: " << cloud->points.size () << std::endl;
+ std::cout << "points: " << cloud->size () << std::endl;
// Create the normal estimation class, and pass the input dataset to it
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
// Compute the features
normal_estimation.compute (*cloud_normals);
- // cloud_normals->points.size () should have the same size as the input cloud->points.size ()
- std::cout << "cloud_normals->points.size (): " << cloud_normals->points.size () << std::endl;
+ // cloud_normals->size () should have the same size as the input cloud->size ()
+ std::cout << "cloud_normals->size (): " << cloud_normals->size () << std::endl;
return 0;
}
return (-1);
}
- std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;
+ std::cout << "Loaded " << cloud->size () << " points." << std::endl;
// Compute the normals
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
// Actually compute the spin images
pfh_estimation.compute (*pfh_features);
- std::cout << "output points.size (): " << pfh_features->points.size () << std::endl;
+ std::cout << "output size (): " << pfh_features->size () << std::endl;
// Display and retrieve the shape context descriptor vector for the 0th point.
- pcl::PFHSignature125 descriptor = pfh_features->points[0];
+ pcl::PFHSignature125 descriptor = (*pfh_features)[0];
std::cout << descriptor << std::endl;
return 0;
return (-1);
}
- std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;
+ std::cout << "Loaded " << cloud->size () << " points." << std::endl;
// Compute the normals
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
pcl::PointCloud<pcl::PrincipalCurvatures>::Ptr principal_curvatures (new pcl::PointCloud<pcl::PrincipalCurvatures> ());
principal_curvatures_estimation.compute (*principal_curvatures);
- std::cout << "output points.size (): " << principal_curvatures->points.size () << std::endl;
+ std::cout << "output size (): " << principal_curvatures->size () << std::endl;
// Display and retrieve the shape context descriptor vector for the 0th point.
- pcl::PrincipalCurvatures descriptor = principal_curvatures->points[0];
+ pcl::PrincipalCurvatures descriptor = (*principal_curvatures)[0];
std::cout << descriptor << std::endl;
return 0;
return -1;
}
- std::cout << "points: " << cloud->points.size () << std::endl;
+ std::cout << "points: " << cloud->size () << std::endl;
// Estimate the surface normals
pcl::PointCloud<pcl::Normal>::Ptr cloud_n (new pcl::PointCloud<pcl::Normal>);
norm_est.compute(*cloud_n);
std::cout<<" Surface normals estimated";
- std::cout<<" with size "<< cloud_n->points.size() <<std::endl;
+ std::cout<<" with size "<< cloud_n->size() <<std::endl;
// Estimate the Intensity Gradient
pcl::PointCloud<pcl::IntensityGradient>::Ptr cloud_ig (new pcl::PointCloud<pcl::IntensityGradient>);
gradient_est.setRadiusSearch(0.25);
gradient_est.compute(*cloud_ig);
std::cout<<" Intensity Gradient estimated";
- std::cout<<" with size "<< cloud_ig->points.size() <<std::endl;
+ std::cout<<" with size "<< cloud_ig->size() <<std::endl;
// Estimate the RIFT feature
rift_est.compute(rift_output);
std::cout<<" RIFT feature estimated";
- std::cout<<" with size "<<rift_output.points.size()<<std::endl;
+ std::cout<<" with size "<<rift_output.size()<<std::endl;
// Display and retrieve the rift descriptor vector for the first point
- pcl::Histogram<32> first_descriptor = rift_output.points[0];
- std::cout << first_descriptor << std::endl;
+ std::cout << rift_output.front() << std::endl;
return 0;
}
PCL_ERROR ("Couldn't read file");
return (-1);
}
- std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;
+ std::cout << "Loaded " << cloud->size () << " points." << std::endl;
// Compute the normals
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
// Actually compute the shape contexts
shape_context.compute (*shape_context_features);
- std::cout << "3DSC output points.size (): " << shape_context_features->points.size () << std::endl;
+ std::cout << "3DSC output size (): " << shape_context_features->size () << std::endl;
// Display and retrieve the shape context descriptor vector for the 0th point.
- std::cout << shape_context_features->points[0] << std::endl;
- //float* first_descriptor = shape_context_features->points[0].descriptor; // 1980 elements
+ std::cout << (*shape_context_features)[0] << std::endl;
+ //float* first_descriptor = (*shape_context_features)[0].descriptor; // 1980 elements
return 0;
}
PCL_ERROR ("Couldn't read file");
return (-1);
}
- std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;
+ std::cout << "Loaded " << cloud->size () << " points." << std::endl;
// Compute the normals
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
// Actually compute the spin images
spin_image_descriptor.compute (*spin_images);
- std::cout << "SI output points.size (): " << spin_images->points.size () << std::endl;
+ std::cout << "SI output size (): " << spin_images->size () << std::endl;
// Display and retrieve the spin image descriptor vector for the first point.
- pcl::Histogram<153> first_descriptor = spin_images->points[0];
+ pcl::Histogram<153> first_descriptor = (*spin_images)[0];
std::cout << first_descriptor << std::endl;
return 0;
cloud->push_back (p);
}
- std::cout << "Cloud has " << cloud->points.size () << " points." << std::endl;
+ std::cout << "Cloud has " << cloud->size () << " points." << std::endl;
pcl::PointIndices indices;
indices.indices.push_back (0);
pcl::PointCloud<pcl::PointXYZ>::Ptr output (new pcl::PointCloud<pcl::PointXYZ>);
extract_indices.filter (*output);
- std::cout << "Output has " << output->points.size () << " points." << std::endl;
+ std::cout << "Output has " << output->size () << " points." << std::endl;
return (0);
}
p_valid.x = 1.0f;
cloud->push_back(p_valid);
- std::cout << "size: " << cloud->points.size () << std::endl;
+ std::cout << "size: " << cloud->size () << std::endl;
std::vector<int> indices;
pcl::removeNaNFromPointCloud(*cloud, *output_cloud, indices);
- std::cout << "size: " << output_cloud->points.size () << std::endl;
+ std::cout << "size: " << output_cloud->size () << std::endl;
return 0;
}
*
*/
-#include <iostream>
-
#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/harris_3d.h>
PCL_ERROR ("Couldn't read file");
return -1;
}
- std::cout << "points: " << cloud->points.size () <<std::endl;
+ std::cout << "points: " << cloud->size () <<std::endl;
// Parameters for sift computation
const float min_scale = 0.1f;
copyPointCloud(result, *cloud_temp);
// Saving the resultant cloud
- std::cout << "Resulting sift points are of size: " << cloud_temp->points.size () <<std::endl;
+ std::cout << "Resulting sift points are of size: " << cloud_temp->size () <<std::endl;
pcl::io::savePCDFileASCII("sift_points.pcd", *cloud_temp);
PCL_ERROR ("Couldn't read file");
return -1;
}
- std::cout << "points: " << cloud_xyz->points.size () <<std::endl;
+ std::cout << "points: " << cloud_xyz->size () <<std::endl;
// Parameters for sift computation
const float min_scale = 0.01f;
ne.compute(*cloud_normals);
// Copy the xyz info from cloud_xyz and add it to cloud_normals as the xyz field in PointNormals estimation is zero
- for(std::size_t i = 0; i<cloud_normals->points.size(); ++i)
+ for(std::size_t i = 0; i<cloud_normals->size(); ++i)
{
- cloud_normals->points[i].x = cloud_xyz->points[i].x;
- cloud_normals->points[i].y = cloud_xyz->points[i].y;
- cloud_normals->points[i].z = cloud_xyz->points[i].z;
+ (*cloud_normals)[i].x = (*cloud_xyz)[i].x;
+ (*cloud_normals)[i].y = (*cloud_xyz)[i].y;
+ (*cloud_normals)[i].z = (*cloud_xyz)[i].z;
}
// Estimate the sift interest points using normals values from xyz as the Intensity variants
sift.setInputCloud(cloud_normals);
sift.compute(result);
- std::cout << "No of SIFT points in the result are " << result.points.size () << std::endl;
+ std::cout << "No of SIFT points in the result are " << result.size () << std::endl;
/*
// Copying the pointwithscale to pointxyz so as visualize the cloud
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_temp (new pcl::PointCloud<pcl::PointXYZ>);
copyPointCloud(result, *cloud_temp);
- std::cout << "SIFT points in the cloud_temp are " << cloud_temp->points.size () << std::endl;
+ std::cout << "SIFT points in the cloud_temp are " << cloud_temp->size () << std::endl;
// Visualization of keypoints along with the original cloud
PCL_ERROR ("Couldn't read file");
return -1;
}
- std::cout << "points: " << cloud_xyz->points.size () <<std::endl;
+ std::cout << "points: " << cloud_xyz->size () <<std::endl;
// Parameters for sift computation
const float min_scale = 0.005f;
sift.setInputCloud(cloud_xyz);
sift.compute(result);
- std::cout << "No of SIFT points in the result are " << result.points.size () << std::endl;
+ std::cout << "No of SIFT points in the result are " << result.size () << std::endl;
/*
// Copying the pointwithscale to pointxyz so as visualize the cloud
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_temp (new pcl::PointCloud<pcl::PointXYZ>);
copyPointCloud(result, *cloud_temp);
- std::cout << "SIFT points in the result are " << cloud_temp->points.size () << std::endl;
+ std::cout << "SIFT points in the result are " << cloud_temp->size () << std::endl;
// Visualization of keypoints along with the original cloud
pcl::visualization::PCLVisualizer viewer("PCL Viewer");
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> keypoints_color_handler (cloud_temp, 0, 255, 0);
*
*/
-#include <cstdlib>
#include <thread>
#include <boost/format.hpp>
std::cout<<"Couldn't read the file "<<argv[1]<<std::endl;
return (-1);
}
- std::cout << "Loaded pcd file " << argv[1] << " with " << cloud_ptr->points.size () << std::endl;
+ std::cout << "Loaded pcd file " << argv[1] << " with " << cloud_ptr->size () << std::endl;
// Normal estimation
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
for (const int &index : it->indices)
- cloud_cluster->points.push_back (cloud_ptr->points[index]);
- cloud_cluster->width = static_cast<std::uint32_t> (cloud_cluster->points.size ());
+ cloud_cluster->push_back ((*cloud_ptr)[index]);
+ cloud_cluster->width = cloud_cluster->size ();
cloud_cluster->height = 1;
cloud_cluster->is_dense = true;
- std::cout << "PointCloud representing the Cluster using xyzn: " << cloud_cluster->points.size () << " data points." << std::endl;
+ std::cout << "PointCloud representing the Cluster using xyzn: " << cloud_cluster->size () << " data " << std::endl;
std::stringstream ss;
ss << "./cloud_cluster_" << j << ".pcd";
writer.write<pcl::PointXYZ> (ss.str (), *cloud_cluster, false);
*
*/
-#include <cstdlib>
#include <thread>
#include <boost/format.hpp>
return -1;
}
- pcl::console::print_highlight ("Loaded cloud %s of size %lu\n", av[1], cloud_ptr->points.size ());
+ pcl::console::print_highlight("Loaded cloud %s of size %zu\n",
+ av[1],
+ static_cast<std::size_t>(cloud_ptr->size()));
// Remove the nans
cloud_ptr->is_dense = false;
cloud_no_nans->is_dense = false;
std::vector<int> indices;
pcl::removeNaNFromPointCloud (*cloud_ptr, *cloud_no_nans, indices);
- pcl::console::print_highlight ("Removed nans from %lu to %lu\n", cloud_ptr->points.size (), cloud_no_nans->points.size ());
+ pcl::console::print_highlight("Removed nans from %zu to %zu\n",
+ static_cast<std::size_t>(cloud_ptr->size()),
+ static_cast<std::size_t>(cloud_no_nans->size()));
// Estimate the normals
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
ne.setSearchMethod (tree_n);
ne.setRadiusSearch (0.03);
ne.compute (*cloud_normals);
- pcl::console::print_highlight ("Normals are computed and size is %lu\n", cloud_normals->points.size ());
+ pcl::console::print_highlight("Normals are computed and size is %zu\n",
+ static_cast<std::size_t>(cloud_normals->size()));
// Region growing
pcl::RegionGrowing<pcl::PointXYZ, pcl::Normal> rg;
cloud_segmented = rg.getColoredCloud ();
// Writing the resulting cloud into a pcd file
- pcl::console::print_highlight ("Number of segments done is %lu\n", clusters.size ());
+ pcl::console::print_highlight("Number of segments done is %zu\n",
+ static_cast<std::size_t>(clusters.size()));
writer.write<pcl::PointXYZRGB> ("segment_result.pcd", *cloud_segmented, false);
if (pcl::console::find_switch (argc, av, "-dump"))
{
viewer->removePointCloud (ss.str ());
viewer->addPointCloudNormals<PointT,Normal> ((sv_itr->second)->voxels_,(sv_itr->second)->normals_,10,0.02f,ss.str ());
- // std::cout << (sv_itr->second)->normals_->points[0]<<"\n";
+ // std::cout << (sv_itr->second)->normals_[0]<<"\n";
}
template <typename PointT, typename PointNT> inline void
computeApproximateNormals(const pcl::PointCloud<PointT>& cloud, const std::vector<pcl::Vertices>& polygons, pcl::PointCloud<PointNT>& normals)
{
- const auto nr_points = cloud.points.size();
+ const auto nr_points = cloud.size();
normals.header = cloud.header;
normals.width = cloud.width;
if (polygon.vertices.size() < 3) continue;
// compute normal for triangle
- Eigen::Vector3f vec_a_b = cloud.points[polygon.vertices[0]].getVector3fMap() - cloud.points[polygon.vertices[1]].getVector3fMap();
- Eigen::Vector3f vec_a_c = cloud.points[polygon.vertices[0]].getVector3fMap() - cloud.points[polygon.vertices[2]].getVector3fMap();
+ Eigen::Vector3f vec_a_b = cloud[polygon.vertices[0]].getVector3fMap() - cloud[polygon.vertices[1]].getVector3fMap();
+ Eigen::Vector3f vec_a_c = cloud[polygon.vertices[0]].getVector3fMap() - cloud[polygon.vertices[2]].getVector3fMap();
Eigen::Vector3f normal = vec_a_b.cross(vec_a_c);
- pcl::flipNormalTowardsViewpoint(cloud.points[polygon.vertices[0]], 0.0f, 0.0f, 0.0f, normal(0), normal(1), normal(2));
+ pcl::flipNormalTowardsViewpoint(cloud[polygon.vertices[0]], 0.0f, 0.0f, 0.0f, normal(0), normal(1), normal(2));
// add normal to all points in polygon
for (const auto& vertex: polygon.vertices)
- normals.points[vertex].getNormalVector3fMap() += normal;
+ normals[vertex].getNormalVector3fMap() += normal;
}
for (std::size_t i = 0; i < nr_points; ++i)
{
- normals.points[i].getNormalVector3fMap().normalize();
- pcl::flipNormalTowardsViewpoint(cloud.points[i], 0.0f, 0.0f, 0.0f, normals.points[i].normal_x, normals.points[i].normal_y, normals.points[i].normal_z);
+ normals[i].getNormalVector3fMap().normalize();
+ pcl::flipNormalTowardsViewpoint(cloud[i], 0.0f, 0.0f, 0.0f, normals[i].normal_x, normals[i].normal_y, normals[i].normal_z);
}
}
std::vector<Eigen::Matrix3d, Eigen::aligned_allocator<Eigen::Matrix3d> >& covariances,
double epsilon = 0.001)
{
- assert(cloud.points.size() == normals.points.size());
+ assert(cloud.size() == normals.size());
- const auto nr_points = cloud.points.size();
+ const auto nr_points = cloud.size();
covariances.clear ();
covariances.reserve (nr_points);
for (const auto& point: normals.points)
const auto minIndex = nn_indices[std::distance (nn_dists.begin (), minDistanceIt)];
// Get origin point
- Vector3fMapConst origin = input_->points[(*indices_)[index]].getVector3fMap ();
+ Vector3fMapConst origin = (*input_)[(*indices_)[index]].getVector3fMap ();
// Get origin normal
// Use pre-computed normals
normal = normals[minIndex].getNormalVector3fMap ();
if (pcl::utils::equal (nn_dists[ne], 0.0f))
continue;
// Get neighbours coordinates
- Eigen::Vector3f neighbour = surface_->points[nn_indices[ne]].getVector3fMap ();
+ Eigen::Vector3f neighbour = (*surface_)[nn_indices[ne]].getVector3fMap ();
/// ----- Compute current neighbour polar coordinates -----
/// Get distance between the neighbour and the origin
#include <pcl/features/board.h>
#include <utility>
-#include <pcl/common/transforms.h>
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointNT, typename PointOutT> void
const Eigen::Vector4f &u, const Eigen::Vector4f &v,
const float angle_threshold)
{
- return (isBoundaryPoint (cloud, cloud.points[q_idx], indices, u, v, angle_threshold));
+ return (isBoundaryPoint (cloud, cloud[q_idx], indices, u, v, angle_threshold));
}
//////////////////////////////////////////////////////////////////////////////////////////////
for (const int &index : indices)
{
- if (!std::isfinite (cloud.points[index].x) ||
- !std::isfinite (cloud.points[index].y) ||
- !std::isfinite (cloud.points[index].z))
+ if (!std::isfinite (cloud[index].x) ||
+ !std::isfinite (cloud[index].y) ||
+ !std::isfinite (cloud[index].z))
continue;
- Eigen::Vector4f delta = cloud.points[index].getVector4fMap () - q_point.getVector4fMap ();
+ Eigen::Vector4f delta = cloud[index].getVector4fMap () - q_point.getVector4fMap ();
if (delta == Eigen::Vector4f::Zero())
continue;
{
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
- output.points[idx].boundary_point = std::numeric_limits<std::uint8_t>::quiet_NaN ();
+ output[idx].boundary_point = std::numeric_limits<std::uint8_t>::quiet_NaN ();
output.is_dense = false;
continue;
}
// Obtain a coordinate system on the least-squares plane
- //v = normals_->points[(*indices_)[idx]].getNormalVector4fMap ().unitOrthogonal ();
- //u = normals_->points[(*indices_)[idx]].getNormalVector4fMap ().cross3 (v);
- getCoordinateSystemOnPlane (normals_->points[(*indices_)[idx]], u, v);
+ //v = (*normals_)[(*indices_)[idx]].getNormalVector4fMap ().unitOrthogonal ();
+ //u = (*normals_)[(*indices_)[idx]].getNormalVector4fMap ().cross3 (v);
+ getCoordinateSystemOnPlane ((*normals_)[(*indices_)[idx]], u, v);
// Estimate whether the point is lying on a boundary surface or not
- output.points[idx].boundary_point = isBoundaryPoint (*surface_, input_->points[(*indices_)[idx]], nn_indices, u, v, angle_threshold_);
+ output[idx].boundary_point = isBoundaryPoint (*surface_, (*input_)[(*indices_)[idx]], nn_indices, u, v, angle_threshold_);
}
}
else
if (!isFinite ((*input_)[(*indices_)[idx]]) ||
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
- output.points[idx].boundary_point = std::numeric_limits<std::uint8_t>::quiet_NaN ();
+ output[idx].boundary_point = std::numeric_limits<std::uint8_t>::quiet_NaN ();
output.is_dense = false;
continue;
}
// Obtain a coordinate system on the least-squares plane
- //v = normals_->points[(*indices_)[idx]].getNormalVector4fMap ().unitOrthogonal ();
- //u = normals_->points[(*indices_)[idx]].getNormalVector4fMap ().cross3 (v);
- getCoordinateSystemOnPlane (normals_->points[(*indices_)[idx]], u, v);
+ //v = (*normals_)[(*indices_)[idx]].getNormalVector4fMap ().unitOrthogonal ();
+ //u = (*normals_)[(*indices_)[idx]].getNormalVector4fMap ().cross3 (v);
+ getCoordinateSystemOnPlane ((*normals_)[(*indices_)[idx]], u, v);
// Estimate whether the point is lying on a boundary surface or not
- output.points[idx].boundary_point = isBoundaryPoint (*surface_, input_->points[(*indices_)[idx]], nn_indices, u, v, angle_threshold_);
+ output[idx].boundary_point = isBoundaryPoint (*surface_, (*input_)[(*indices_)[idx]], nn_indices, u, v, angle_threshold_);
}
}
}
const int r_x_1 = (1024 - r_x);
const int r_y_1 = (1024 - r_y);
- //+const unsigned char* ptr = static_cast<const unsigned char*> (&image.points[0].r) + x + y * imagecols;
+ //+const unsigned char* ptr = static_cast<const unsigned char*> (&image[0].r) + x + y * imagecols;
const unsigned char* ptr = static_cast<const unsigned char*>(&image[0]) + x + y * imagecols;
// just interpolate:
{
// now the calculation:
- //+const unsigned char* ptr = static_cast<const unsigned char*> (&image.points[0].r) + x_left + imagecols * y_top;
+ //+const unsigned char* ptr = static_cast<const unsigned char*> (&image[0].r) + x_left + imagecols * y_top;
const unsigned char* ptr = static_cast<const unsigned char*>(&image[0]) + x_left + imagecols * y_top;
// first the corners:
// now the calculation:
- //const unsigned char* ptr = static_cast<const unsigned char*> (&image.points[0].r) + x_left + imagecols * y_top;
+ //const unsigned char* ptr = static_cast<const unsigned char*> (&image[0].r) + x_left + imagecols * y_top;
const unsigned char* ptr = static_cast<const unsigned char*>(&image[0]) + x_left + imagecols * y_top;
// first row:
image_data[i] = static_cast<unsigned char> (intensity_ ((*input_cloud_)[i]));
// Remove keypoints very close to the border
- std::size_t ksize = keypoints_->points.size ();
+ auto ksize = keypoints_->size ();
std::vector<int> kscales; // remember the scale per keypoint
kscales.resize (ksize);
unsigned int scale;
if (scale_invariance_enabled_)
{
- scale = std::max (static_cast<int> (float (scales_) / lb_scalerange * (std::log2 (keypoints_->points[k].size / (basic_size_06))) + 0.5f), 0);
+ scale = std::max (static_cast<int> (float (scales_) / lb_scalerange * (std::log2 ((*keypoints_)[k].size / (basic_size_06))) + 0.5f), 0);
// saturate
if (scale >= scales_) scale = scales_ - 1;
kscales[k] = scale;
const int border_x = width - border;
const int border_y = height - border;
- if (RoiPredicate (float (border), float (border), float (border_x), float (border_y), keypoints_->points[k]))
+ if (RoiPredicate (float (border), float (border), float (border_x), float (border_y), (*keypoints_)[k]))
{
//std::cerr << "remove keypoint" << std::endl;
keypoints_->points.erase (beginning + k);
}
}
- keypoints_->width = std::uint32_t (keypoints_->size ());
+ keypoints_->width = keypoints_->size ();
keypoints_->height = 1;
// first, calculate the integral image over the whole image:
//output.height = 1;
for (std::size_t k = 0; k < ksize; k++)
{
- unsigned char* ptr = &output.points[k].descriptor[0];
+ unsigned char* ptr = &output[k].descriptor[0];
int theta;
- KeypointT &kp = keypoints_->points[k];
+ KeypointT &kp = (*keypoints_)[k];
const int& scale = kscales[k];
int shifter = 0;
int* pvalues = values;
//ptr += strings_;
//// Account for the scale + orientation;
- //ptr += sizeof (output.points[0].scale);
- //ptr += sizeof (output.points[0].orientation);
+ //ptr += sizeof (output[0].scale);
+ //ptr += sizeof (output[0].orientation);
}
// we do not change the denseness
- output.width = int (output.points.size ());
+ output.width = output.size ();
output.height = 1;
output.is_dense = true;
{
// Initialize output container
output.points.clear ();
- output.points.reserve (indices_->size () * input_->points.size ());
+ output.points.reserve (indices_->size () * input_->size ());
output.is_dense = true;
// Compute point pair features for every pair of points in the cloud
for (const auto& i: *indices_)
{
- for (std::size_t j = 0 ; j < input_->points.size (); ++j)
+ for (std::size_t j = 0 ; j < input_->size (); ++j)
{
PointOutT p;
// No need to calculate feature for identity pair (i, j) as they aren't used in future calculations
if (static_cast<std::size_t>(i) != j)
{
if (
- pcl::computeCPPFPairFeature (input_->points[i].getVector4fMap (),
- normals_->points[i].getNormalVector4fMap (),
- input_->points[i].getRGBVector4i (),
- input_->points[j].getVector4fMap (),
- normals_->points[j].getNormalVector4fMap (),
- input_->points[j].getRGBVector4i (),
+ pcl::computeCPPFPairFeature ((*input_)[i].getVector4fMap (),
+ (*normals_)[i].getNormalVector4fMap (),
+ (*input_)[i].getRGBVector4i (),
+ (*input_)[j].getVector4fMap (),
+ (*normals_)[j].getNormalVector4fMap (),
+ (*input_)[j].getRGBVector4i (),
p.f1, p.f2, p.f3, p.f4, p.f5, p.f6, p.f7, p.f8, p.f9, p.f10))
{
// Calculate alpha_m angle
- Eigen::Vector3f model_reference_point = input_->points[i].getVector3fMap (),
- model_reference_normal = normals_->points[i].getNormalVector3fMap (),
- model_point = input_->points[j].getVector3fMap ();
+ Eigen::Vector3f model_reference_point = (*input_)[i].getVector3fMap (),
+ model_reference_normal = (*normals_)[i].getNormalVector3fMap (),
+ model_point = (*input_)[j].getVector3fMap ();
Eigen::AngleAxisf rotation_mg (std::acos (model_reference_normal.dot (Eigen::Vector3f::UnitX ())),
model_reference_normal.cross (Eigen::Vector3f::UnitX ()).normalized ());
Eigen::Affine3f transform_mg = Eigen::Translation3f ( rotation_mg * ((-1) * model_reference_point)) * rotation_mg;
}
// overwrite the sizes done by Feature::initCompute ()
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
}
#define PCL_INSTANTIATE_CPPFEstimation(T,NT,OutT) template class PCL_EXPORTS pcl::CPPFEstimation<T,NT,OutT>;
#include <pcl/features/crh.h>
#include <pcl/common/fft/kiss_fftr.h>
-#include <pcl/common/common.h>
#include <pcl/common/transforms.h>
//////////////////////////////////////////////////////////////////////////////////////////////
return;
}
- if (normals_->points.size () != surface_->points.size ())
+ if (normals_->size () != surface_->size ())
{
PCL_ERROR ("[pcl::%s::computeFeature] The number of points in the input dataset differs from the number of points in the dataset containing the normals!\n", getClassName ().c_str ());
output.width = output.height = 0;
for (std::size_t i = 0; i < indices_->size (); i++)
{
- grid.points[i].getVector4fMap () = surface_->points[(*indices_)[i]].getVector4fMap ();
- grid.points[i].getNormalVector4fMap () = normals_->points[(*indices_)[i]].getNormalVector4fMap ();
+ grid[i].getVector4fMap () = (*surface_)[(*indices_)[i]].getVector4fMap ();
+ grid[i].getNormalVector4fMap () = (*normals_)[(*indices_)[i]].getNormalVector4fMap ();
}
pcl::transformPointCloudWithNormals (grid, grid, transformPC);
output.points.resize (1);
output.width = output.height = 1;
- output.points[0].histogram[0] = freq_data[0].r; //dc
+ output[0].histogram[0] = freq_data[0].r; //dc
int k = 1;
for (int i = 1; i < (nbins / 2); i++, k += 2)
{
- output.points[0].histogram[k] = freq_data[i].r;
- output.points[0].histogram[k + 1] = freq_data[i].i;
+ output[0].histogram[k] = freq_data[i].r;
+ output[0].histogram[k + 1] = freq_data[i].i;
}
- output.points[0].histogram[nbins - 1] = freq_data[nbins / 2].r; //nyquist
+ output[0].histogram[nbins - 1] = freq_data[nbins / 2].r; //nyquist
}
#define PCL_INSTANTIATE_CRHEstimation(T,NT,OutT) template class PCL_EXPORTS pcl::CRHEstimation<T,NT,OutT>;
#include <pcl/features/cvfh.h>
#include <pcl/features/normal_3d.h>
-#include <pcl/features/pfh_tools.h>
#include <pcl/common/centroid.h>
//////////////////////////////////////////////////////////////////////////////////////////////
unsigned int min_pts_per_cluster,
unsigned int max_pts_per_cluster)
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different point cloud "
+ "dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
- if (cloud.points.size () != normals.points.size ())
+ if (cloud.size () != normals.size ())
{
- PCL_ERROR ("[pcl::extractEuclideanClusters] Number of points in the input point cloud (%lu) different than normals (%lu)!\n", cloud.points.size (), normals.points.size ());
+ PCL_ERROR("[pcl::extractEuclideanClusters] Number of points in the input point "
+ "cloud (%zu) different than normals (%zu)!\n",
+ static_cast<std::size_t>(cloud.size()),
+ static_cast<std::size_t>(normals.size()));
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
if (processed[i])
continue;
//processed[nn_indices[j]] = true;
// [-1;1]
- const double dot_p = normals.points[seed_queue[idx]].getNormalVector3fMap().dot(
- normals.points[nn_indices[j]].getNormalVector3fMap());
+ const double dot_p = normals[seed_queue[idx]].getNormalVector3fMap().dot(
+ normals[nn_indices[j]].getNormalVector3fMap());
if (std::acos (dot_p) < eps_angle)
{
std::vector<int> &indices_in,
float threshold)
{
- indices_out.resize (cloud.points.size ());
- indices_in.resize (cloud.points.size ());
+ indices_out.resize (cloud.size ());
+ indices_in.resize (cloud.size ());
std::size_t in, out;
in = out = 0;
for (const int &index : indices_to_use)
{
- if (cloud.points[index].curvature > threshold)
+ if (cloud[index].curvature > threshold)
{
indices_out[out] = index;
out++;
output.points.clear ();
return;
}
- if (normals_->points.size () != surface_->points.size ())
+ if (normals_->size () != surface_->size ())
{
PCL_ERROR ("[pcl::%s::computeFeature] The number of points in the input dataset differs from the number of points in the dataset containing the normals!\n", getClassName ().c_str ());
output.width = output.height = 0;
filterNormalsWithHighCurvature (*normals_, *indices_, indices_out, indices_in, curv_threshold_);
pcl::PointCloud<pcl::PointNormal>::Ptr normals_filtered_cloud (new pcl::PointCloud<pcl::PointNormal> ());
- normals_filtered_cloud->width = static_cast<std::uint32_t> (indices_in.size ());
+ normals_filtered_cloud->width = indices_in.size ();
normals_filtered_cloud->height = 1;
normals_filtered_cloud->points.resize (normals_filtered_cloud->width);
for (std::size_t i = 0; i < indices_in.size (); ++i)
{
- normals_filtered_cloud->points[i].x = surface_->points[indices_in[i]].x;
- normals_filtered_cloud->points[i].y = surface_->points[indices_in[i]].y;
- normals_filtered_cloud->points[i].z = surface_->points[indices_in[i]].z;
+ (*normals_filtered_cloud)[i].x = (*surface_)[indices_in[i]].x;
+ (*normals_filtered_cloud)[i].y = (*surface_)[indices_in[i]].y;
+ (*normals_filtered_cloud)[i].z = (*surface_)[indices_in[i]].z;
}
std::vector<pcl::PointIndices> clusters;
- if(normals_filtered_cloud->points.size() >= min_points_)
+ if(normals_filtered_cloud->size() >= min_points_)
{
//recompute normals and use them for clustering
KdTreePtr normals_tree_filtered (new pcl::search::KdTree<pcl::PointNormal> (false));
for (const auto &index : cluster.indices)
{
- avg_normal += normals_filtered_cloud->points[index].getNormalVector4fMap ();
- avg_centroid += normals_filtered_cloud->points[index].getVector4fMap ();
+ avg_normal += (*normals_filtered_cloud)[index].getNormalVector4fMap ();
+ avg_centroid += (*normals_filtered_cloud)[index].getVector4fMap ();
}
avg_normal /= static_cast<float> (cluster.indices.size ());
//compute modified VFH for all dominant clusters and add them to the list!
output.points.resize (dominant_normals_.size ());
- output.width = static_cast<std::uint32_t> (dominant_normals_.size ());
+ output.width = dominant_normals_.size ();
for (std::size_t i = 0; i < dominant_normals_.size (); ++i)
{
vfh.setCentroidToUse (centroids_dominant_orientations_[i]);
pcl::PointCloud<pcl::VFHSignature308> vfh_signature;
vfh.compute (vfh_signature);
- output.points[i] = vfh_signature.points[0];
+ output[i] = vfh_signature[0];
}
}
else
output.points.resize (1);
output.width = 1;
- output.points[0] = vfh_signature.points[0];
+ output[0] = vfh_signature[0];
}
}
}
// Check if the size of normals is the same as the size of the surface
- if (input_normals_small_->points.size () != input_->points.size ())
+ if (input_normals_small_->size () != input_->size ())
{
PCL_ERROR ("[pcl::%s::initCompute] ", getClassName().c_str ());
PCL_ERROR ("The number of points in the input dataset differs from ");
return (false);
}
- if (input_normals_large_->points.size () != input_->points.size ())
+ if (input_normals_large_->size () != input_->size ())
{
PCL_ERROR ("[pcl::%s::initCompute] ", getClassName().c_str ());
PCL_ERROR ("The number of points in the input dataset differs from ");
pcl::DifferenceOfNormalsEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
//perform DoN subtraction and return results
- for (std::size_t point_id = 0; point_id < input_->points.size (); ++point_id)
+ for (std::size_t point_id = 0; point_id < input_->size (); ++point_id)
{
- output.points[point_id].getNormalVector3fMap () = (input_normals_small_->points[point_id].getNormalVector3fMap ()
- - input_normals_large_->points[point_id].getNormalVector3fMap ()) / 2.0;
- if(!std::isfinite (output.points[point_id].normal_x) ||
- !std::isfinite (output.points[point_id].normal_y) ||
- !std::isfinite (output.points[point_id].normal_z)){
- output.points[point_id].getNormalVector3fMap () = Eigen::Vector3f(0,0,0);
+ output[point_id].getNormalVector3fMap () = ((*input_normals_small_)[point_id].getNormalVector3fMap ()
+ - (*input_normals_large_)[point_id].getNormalVector3fMap ()) / 2.0;
+ if(!std::isfinite (output[point_id].normal_x) ||
+ !std::isfinite (output[point_id].normal_y) ||
+ !std::isfinite (output[point_id].normal_z)){
+ output[point_id].getNormalVector3fMap () = Eigen::Vector3f(0,0,0);
}
- output.points[point_id].curvature = output.points[point_id].getNormalVector3fMap ().norm();
+ output[point_id].curvature = output[point_id].getNormalVector3fMap ().norm();
}
}
#define PCL_FEATURES_IMPL_ESF_H_
#include <pcl/features/esf.h>
-#include <pcl/common/common.h>
#include <pcl/common/distances.h>
#include <pcl/common/transforms.h>
#include <vector>
unsigned int sample_size = 20000;
// @TODO: Replace with c++ stdlib uniform_random_generator
srand (static_cast<unsigned int> (time (nullptr)));
- int maxindex = static_cast<int> (pc.points.size ());
+ const auto maxindex = pc.size ();
std::vector<float> d2v, d1v, d3v, wt_d3;
std::vector<int> wt_d2;
continue;
}
- Eigen::Vector4f p1 = pc.points[index1].getVector4fMap ();
- Eigen::Vector4f p2 = pc.points[index2].getVector4fMap ();
- Eigen::Vector4f p3 = pc.points[index3].getVector4fMap ();
+ Eigen::Vector4f p1 = pc[index1].getVector4fMap ();
+ Eigen::Vector4f p2 = pc[index2].getVector4fMap ();
+ Eigen::Vector4f p3 = pc[index3].getVector4fMap ();
// A3
Eigen::Vector4f v21 (p2 - p1);
}
// D2
- d2v.push_back (pcl::euclideanDistance (pc.points[index1], pc.points[index2]));
- d2v.push_back (pcl::euclideanDistance (pc.points[index1], pc.points[index3]));
- d2v.push_back (pcl::euclideanDistance (pc.points[index2], pc.points[index3]));
+ d2v.push_back (pcl::euclideanDistance (pc[index1], pc[index2]));
+ d2v.push_back (pcl::euclideanDistance (pc[index1], pc[index3]));
+ d2v.push_back (pcl::euclideanDistance (pc[index2], pc[index3]));
int vxlcnt_sum = 0;
int p_cnt = 0;
template <typename PointInT, typename PointOutT> void
pcl::ESFEstimation<PointInT, PointOutT>::voxelize9 (PointCloudIn &cluster)
{
- for (std::size_t i = 0; i < cluster.points.size (); ++i)
+ for (const auto& point: cluster)
{
- int xx = cluster.points[i].x<0.0? static_cast<int>(std::floor(cluster.points[i].x)+GRIDSIZE_H) : static_cast<int>(std::ceil(cluster.points[i].x)+GRIDSIZE_H-1);
- int yy = cluster.points[i].y<0.0? static_cast<int>(std::floor(cluster.points[i].y)+GRIDSIZE_H) : static_cast<int>(std::ceil(cluster.points[i].y)+GRIDSIZE_H-1);
- int zz = cluster.points[i].z<0.0? static_cast<int>(std::floor(cluster.points[i].z)+GRIDSIZE_H) : static_cast<int>(std::ceil(cluster.points[i].z)+GRIDSIZE_H-1);
+ int xx = point.x<0.0? static_cast<int>(std::floor(point.x)+GRIDSIZE_H) : static_cast<int>(std::ceil(point.x)+GRIDSIZE_H-1);
+ int yy = point.y<0.0? static_cast<int>(std::floor(point.y)+GRIDSIZE_H) : static_cast<int>(std::ceil(point.y)+GRIDSIZE_H-1);
+ int zz = point.z<0.0? static_cast<int>(std::floor(point.z)+GRIDSIZE_H) : static_cast<int>(std::ceil(point.z)+GRIDSIZE_H-1);
for (int x = -1; x < 2; x++)
for (int y = -1; y < 2; y++)
template <typename PointInT, typename PointOutT> void
pcl::ESFEstimation<PointInT, PointOutT>::cleanup9 (PointCloudIn &cluster)
{
- for (std::size_t i = 0; i < cluster.points.size (); ++i)
+ for (const auto& point: cluster)
{
- int xx = cluster.points[i].x<0.0? static_cast<int>(std::floor(cluster.points[i].x)+GRIDSIZE_H) : static_cast<int>(std::ceil(cluster.points[i].x)+GRIDSIZE_H-1);
- int yy = cluster.points[i].y<0.0? static_cast<int>(std::floor(cluster.points[i].y)+GRIDSIZE_H) : static_cast<int>(std::ceil(cluster.points[i].y)+GRIDSIZE_H-1);
- int zz = cluster.points[i].z<0.0? static_cast<int>(std::floor(cluster.points[i].z)+GRIDSIZE_H) : static_cast<int>(std::ceil(cluster.points[i].z)+GRIDSIZE_H-1);
+ int xx = point.x<0.0? static_cast<int>(std::floor(point.x)+GRIDSIZE_H) : static_cast<int>(std::ceil(point.x)+GRIDSIZE_H-1);
+ int yy = point.y<0.0? static_cast<int>(std::floor(point.y)+GRIDSIZE_H) : static_cast<int>(std::ceil(point.y)+GRIDSIZE_H-1);
+ int zz = point.z<0.0? static_cast<int>(std::floor(point.z)+GRIDSIZE_H) : static_cast<int>(std::ceil(point.z)+GRIDSIZE_H-1);
for (int x = -1; x < 2; x++)
for (int y = -1; y < 2; y++)
float max_distance = 0;
pcl::PointXYZ cog (0, 0, 0);
- for (std::size_t i = 0; i < local_cloud_.points.size (); ++i)
+ for (const auto& point: local_cloud_)
{
- float d = pcl::euclideanDistance(cog,local_cloud_.points[i]);
+ float d = pcl::euclideanDistance(cog,point);
if (d > max_distance)
max_distance = d;
}
output.height = 1;
for (std::size_t d = 0; d < hist.size (); ++d)
- output.points[0].histogram[d] = hist[d];
+ output[0].histogram[d] = hist[d];
}
#define PCL_INSTANTIATE_ESFEstimation(T,OutT) template class PCL_EXPORTS pcl::ESFEstimation<T,OutT>;
// If the input width or height are not set, set output width as size
if (indices_->size () != input_->points.size () || input_->width * input_->height == 0)
{
- output.width = static_cast<std::uint32_t> (indices_->size ());
+ output.width = indices_->size ();
output.height = 1;
}
else
if (normals_->points.size () != surface_->points.size ())
{
PCL_ERROR ("[pcl::%s::initCompute] ", getClassName ().c_str ());
- PCL_ERROR ("The number of points in the input dataset (%u) differs from ", surface_->points.size ());
- PCL_ERROR ("the number of points in the dataset containing the normals (%u)!\n", normals_->points.size ());
+ PCL_ERROR("The number of points in the input dataset (%zu) differs from ",
+ static_cast<std::size_t>(surface_->points.size()));
+ PCL_ERROR("the number of points in the dataset containing the normals (%zu)!\n",
+ static_cast<std::size_t>(normals_->points.size()));
Feature<PointInT, PointOutT>::deinitCompute ();
return (false);
}
const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
int p_idx, int q_idx, float &f1, float &f2, float &f3, float &f4)
{
- pcl::computePairFeatures (cloud.points[p_idx].getVector4fMap (), normals.points[p_idx].getNormalVector4fMap (),
- cloud.points[q_idx].getVector4fMap (), normals.points[q_idx].getNormalVector4fMap (),
+ pcl::computePairFeatures (cloud[p_idx].getVector4fMap (), normals[p_idx].getNormalVector4fMap (),
+ cloud[q_idx].getVector4fMap (), normals[q_idx].getNormalVector4fMap (),
f1, f2, f3, f4);
return (true);
}
std::vector<float> nn_dists (k_);
std::set<int> spfh_indices;
- spfh_hist_lookup.resize (surface_->points.size ());
+ spfh_hist_lookup.resize (surface_->size ());
// Build a list of (unique) indices for which we will need to compute SPFH signatures
// (We need an SPFH signature for every point that is a neighbor of any point in input_[indices_])
if (surface_ != input_ ||
- indices_->size () != surface_->points.size ())
+ indices_->size () != surface_->size ())
{
for (const auto& p_idx: *indices_)
{
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
for (Eigen::Index d = 0; d < fpfh_histogram_.size (); ++d)
- output.points[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
weightPointSPFHSignature (hist_f1_, hist_f2_, hist_f3_, nn_indices, nn_dists, fpfh_histogram_);
// ...and copy it into the output cloud
- std::copy_n(fpfh_histogram_.data (), fpfh_histogram_.size (), output.points[idx].histogram);
+ std::copy_n(fpfh_histogram_.data (), fpfh_histogram_.size (), output[idx].histogram);
}
}
else
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
for (Eigen::Index d = 0; d < fpfh_histogram_.size (); ++d)
- output.points[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
weightPointSPFHSignature (hist_f1_, hist_f2_, hist_f3_, nn_indices, nn_dists, fpfh_histogram_);
// ...and copy it into the output cloud
- std::copy_n(fpfh_histogram_.data (), fpfh_histogram_.size (), output.points[idx].histogram);
+ std::copy_n(fpfh_histogram_.data (), fpfh_histogram_.size (), output[idx].histogram);
}
}
}
pcl::FPFHEstimationOMP<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
std::vector<int> spfh_indices_vec;
- std::vector<int> spfh_hist_lookup (surface_->points.size ());
+ std::vector<int> spfh_hist_lookup (surface_->size ());
// Build a list of (unique) indices for which we will need to compute SPFH signatures
// (We need an SPFH signature for every point that is a neighbor of any point in input_[indices_])
if (surface_ != input_ ||
- indices_->size () != surface_->points.size ())
+ indices_->size () != surface_->size ())
{
std::vector<int> nn_indices (k_); // \note These resizes are irrelevant for a radiusSearch ().
std::vector<float> nn_dists (k_);
#pragma omp parallel for \
default(none) \
shared(spfh_hist_lookup, spfh_indices_vec) \
- private(nn_indices, nn_dists) \
+ firstprivate(nn_indices, nn_dists) \
num_threads(threads_)
for (std::ptrdiff_t i = 0; i < static_cast<std::ptrdiff_t> (spfh_indices_vec.size ()); ++i)
{
#pragma omp parallel for \
default(none) \
shared(nr_bins, output, spfh_hist_lookup) \
- private(nn_dists, nn_indices) \
+ firstprivate(nn_dists, nn_indices) \
num_threads(threads_)
for (std::ptrdiff_t idx = 0; idx < static_cast<std::ptrdiff_t> (indices_->size ()); ++idx)
{
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
for (int d = 0; d < nr_bins; ++d)
- output.points[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
// ...and copy it into the output cloud
for (int d = 0; d < nr_bins; ++d)
- output.points[idx].histogram[d] = fpfh_histogram[d];
+ output[idx].histogram[d] = fpfh_histogram[d];
}
}
#include <pcl/features/gasd.h>
#include <pcl/common/transforms.h>
-#include <pcl/point_types_conversion.h>
#include <vector>
{
const std::size_t idx = ( (i + 1) * (grid_size + 2) + (j + 1)) * (grid_size + 2) + (k + 1);
- std::copy (hists[idx].data () + 1, hists[idx].data () + hists_size + 1, output.points[0].histogram + pos);
+ std::copy (hists[idx].data () + 1, hists[idx].data () + hists_size + 1, output[0].histogram + pos);
pos += hists_size;
}
}
copyShapeHistogramsToOutput (shape_grid_size, shape_hists_size_, shape_hists, output, pos_);
// set remaining values of the descriptor to zero (if any)
- std::fill (output.points[0].histogram + pos_, output.points[0].histogram + output.points[0].descriptorSize (), 0.0f);
+ std::fill (output[0].histogram + pos_, output[0].histogram + output[0].descriptorSize (), 0.0f);
}
//////////////////////////////////////////////////////////////////////////////////////////////
hists[idx][1] += hists[idx][hists_size + 1];
hists[idx][hists_size] += hists[idx][0];
- std::copy (hists[idx].data () + 1, hists[idx].data () + hists_size + 1, output.points[0].histogram + pos);
+ std::copy (hists[idx].data () + 1, hists[idx].data () + hists_size + 1, output[0].histogram + pos);
pos += hists_size;
}
}
copyColorHistogramsToOutput (color_grid_size, color_hists_size_, color_hists, output, pos_);
// set remaining values of the descriptor to zero (if any)
- std::fill (output.points[0].histogram + pos_, output.points[0].histogram + output.points[0].descriptorSize (), 0.0f);
+ std::fill (output[0].histogram + pos_, output[0].histogram + output[0].descriptorSize (), 0.0f);
}
#define PCL_INSTANTIATE_GASDEstimation(InT, OutT) template class PCL_EXPORTS pcl::GASDEstimation<InT, OutT>;
#include <pcl/features/gfpfh.h>
#include <pcl/octree/octree_search.h>
-#include <pcl/common/eigen.h>
+#include <Eigen/Core> // for Vector3f
#include <algorithm>
#include <fstream>
output.width = 1;
output.height = 1;
output.points.resize (1);
- std::copy (gfpfh_histogram.cbegin (), gfpfh_histogram.cend (), output.points[0].histogram);
+ std::copy (gfpfh_histogram.cbegin (), gfpfh_histogram.cend (), output[0].histogram);
}
//////////////////////////////////////////////////////////////////////////////////////////////
std::vector<std::uint32_t> counts (getNumberOfClasses () + 1, 0);
for (const int &nn_index : indices)
{
- std::uint32_t label = labels_->points[nn_index].label;
+ std::uint32_t label = (*labels_)[nn_index].label;
counts[label] += 1;
}
// Save the type of each point
int NR_CLASS = 5; // TODO make this nicer
- std::vector<int> types (radii->points.size ());
+ std::vector<int> types (radii->size ());
std::transform(radii->points.cbegin (), radii->points.cend (), types.begin (),
[](const auto& point) {
// GCC 5.4 can't find unqualified getSimpleType
// Get the transitions between surface types between neighbors of occupied cells
Eigen::MatrixXi transition_matrix = Eigen::MatrixXi::Zero (NR_CLASS + 1, NR_CLASS + 1);
- for (std::size_t idx = 0; idx < cloud_downsampled->points.size (); ++idx)
+ for (std::size_t idx = 0; idx < cloud_downsampled->size (); ++idx)
{
const int source_type = types[idx];
- std::vector<int> neighbors = grid.getNeighborCentroidIndices (cloud_downsampled->points[idx], relative_coordinates_all_);
+ std::vector<int> neighbors = grid.getNeighborCentroidIndices ((*cloud_downsampled)[idx], relative_coordinates_all_);
for (const int &neighbor : neighbors)
{
int neighbor_type = NR_CLASS;
int nrf = 0;
for (int i = 0; i < NR_CLASS + 1; i++)
for (int j = i; j < NR_CLASS + 1; j++)
- output.points[0].histogram[nrf++] = transition_matrix (i, j) + transition_matrix (j, i);
+ output[0].histogram[nrf++] = transition_matrix (i, j) + transition_matrix (j, i);
}
#define PCL_INSTANTIATE_GRSDEstimation(T,NT,OutT) template class PCL_EXPORTS pcl::GRSDEstimation<T,NT,OutT>;
// number of DataType entries per row (equal or bigger than element_stride number of elements per row)
int row_stride = element_stride * input_->width;
- const float *data_ = reinterpret_cast<const float*> (&input_->points[0]);
+ const float *data_ = reinterpret_cast<const float*> (&(*input_)[0]);
integral_image_XYZ_.setSecondOrderComputation (false);
integral_image_XYZ_.setInput (data_, input_->width, input_->height, element_stride, row_stride);
// number of DataType entries per row (equal or bigger than element_stride number of elements per row)
int row_stride = element_stride * input_->width;
- const float *data_ = reinterpret_cast<const float*> (&input_->points[0]);
+ const float *data_ = reinterpret_cast<const float*> (&(*input_)[0]);
integral_image_XYZ_.setSecondOrderComputation (true);
integral_image_XYZ_.setInput (data_, input_->width, input_->height, element_stride, row_stride);
template <typename PointInT, typename PointOutT> void
pcl::IntegralImageNormalEstimation<PointInT, PointOutT>::initAverage3DGradientMethod ()
{
- std::size_t data_size = (input_->points.size () << 2);
+ std::size_t data_size = (input_->size () << 2);
diff_x_ = new float[data_size];
diff_y_ = new float[data_size];
// number of DataType entries per row (equal or bigger than element_stride number of elements per row)
int row_stride = element_stride * input_->width;
- const float *data_ = reinterpret_cast<const float*> (&input_->points[0]);
+ const float *data_ = reinterpret_cast<const float*> (&(*input_)[0]);
// integral image over the z - value
integral_image_depth_.setInput (&(data_[2]), input_->width, input_->height, element_stride, row_stride);
float eigen_value;
Eigen::Vector3f eigen_vector;
pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector);
- flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, eigen_vector[0], eigen_vector[1], eigen_vector[2]);
+ flipNormalTowardsViewpoint ((*input_)[point_index], vpx_, vpy_, vpz_, eigen_vector[0], eigen_vector[1], eigen_vector[2]);
normal.getNormalVector3fMap () = eigen_vector;
// Compute the curvature surface change
float ny = static_cast<float> (normal_vector [1]);
float nz = static_cast<float> (normal_vector [2]);
- flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, nx, ny, nz);
+ flipNormalTowardsViewpoint ((*input_)[point_index], vpx_, vpy_, vpz_, nx, ny, nz);
normal.normal_x = nx;
normal.normal_y = ny;
float mean_U_z = static_cast<float> (integral_image_depth_.getFirstOrderSum (pos_x - rect_width_4_, pos_y - rect_height_2_, rect_width_2_, rect_height_2_) / count_U_z);
float mean_D_z = static_cast<float> (integral_image_depth_.getFirstOrderSum (pos_x - rect_width_4_, pos_y + 1 , rect_width_2_, rect_height_2_) / count_D_z);
- PointInT pointL = input_->points[point_index - rect_width_4_ - 1];
- PointInT pointR = input_->points[point_index + rect_width_4_ + 1];
- PointInT pointU = input_->points[point_index - rect_height_4_ * input_->width - 1];
- PointInT pointD = input_->points[point_index + rect_height_4_ * input_->width + 1];
+ PointInT pointL = (*input_)[point_index - rect_width_4_ - 1];
+ PointInT pointR = (*input_)[point_index + rect_width_4_ + 1];
+ PointInT pointU = (*input_)[point_index - rect_height_4_ * input_->width - 1];
+ PointInT pointD = (*input_)[point_index + rect_height_4_ * input_->width + 1];
const float mean_x_z = mean_R_z - mean_L_z;
const float mean_y_z = mean_D_z - mean_U_z;
return;
}
- flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z);
+ flipNormalTowardsViewpoint ((*input_)[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z);
const float scale = 1.0f / std::sqrt (normal_length);
float ny = static_cast<float> (normal_vector [1]);
float nz = static_cast<float> (normal_vector [2]);
- flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, nx, ny, nz);
+ flipNormalTowardsViewpoint ((*input_)[point_index], vpx_, vpy_, vpz_, nx, ny, nz);
normal.normal_x = nx;
normal.normal_y = ny;
float eigen_value;
Eigen::Vector3f eigen_vector;
pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector);
- flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, eigen_vector[0], eigen_vector[1], eigen_vector[2]);
+ flipNormalTowardsViewpoint ((*input_)[point_index], vpx_, vpy_, vpz_, eigen_vector[0], eigen_vector[1], eigen_vector[2]);
normal.getNormalVector3fMap () = eigen_vector;
// Compute the curvature surface change
float ny = static_cast<float> (normal_vector [1]);
float nz = static_cast<float> (normal_vector [2]);
- flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, nx, ny, nz);
+ flipNormalTowardsViewpoint ((*input_)[point_index], vpx_, vpy_, vpz_, nx, ny, nz);
normal.normal_x = nx;
normal.normal_y = ny;
mean_D_z /= float (count_D_z);
- PointInT pointL = input_->points[point_index_L_y*width + point_index_L_x];
- PointInT pointR = input_->points[point_index_R_y*width + point_index_R_x];
- PointInT pointU = input_->points[point_index_U_y*width + point_index_U_x];
- PointInT pointD = input_->points[point_index_D_y*width + point_index_D_x];
+ PointInT pointL = (*input_)[point_index_L_y*width + point_index_L_x];
+ PointInT pointR = (*input_)[point_index_R_y*width + point_index_R_x];
+ PointInT pointU = (*input_)[point_index_U_y*width + point_index_U_x];
+ PointInT pointD = (*input_)[point_index_D_y*width + point_index_D_x];
const float mean_x_z = mean_R_z - mean_L_z;
const float mean_y_z = mean_D_z - mean_U_z;
return;
}
- flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z);
+ flipNormalTowardsViewpoint ((*input_)[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z);
const float scale = 1.0f / std::sqrt (normal_length);
float bad_point = std::numeric_limits<float>::quiet_NaN ();
// compute depth-change map
- unsigned char * depthChangeMap = new unsigned char[input_->points.size ()];
- memset (depthChangeMap, 255, input_->points.size ());
+ unsigned char * depthChangeMap = new unsigned char[input_->size ()];
+ memset (depthChangeMap, 255, input_->size ());
unsigned index = 0;
for (unsigned int ri = 0; ri < input_->height-1; ++ri)
}
// compute distance map
- //float *distanceMap = new float[input_->points.size ()];
+ //float *distanceMap = new float[input_->size ()];
delete[] distance_map_;
- distance_map_ = new float[input_->points.size ()];
+ distance_map_ = new float[input_->size ()];
float *distanceMap = distance_map_;
- for (std::size_t index = 0; index < input_->points.size (); ++index)
+ for (std::size_t index = 0; index < input_->size (); ++index)
{
if (depthChangeMap[index] == 0)
distanceMap[index] = 0.0f;
{
index = ri * input_->width + ci;
- const float depth = input_->points[index].z;
+ const float depth = (*input_)[index].z;
if (!std::isfinite (depth))
{
output[index].getNormalVector3fMap ().setConstant (bad_point);
{
index = ri * input_->width + ci;
- if (!std::isfinite (input_->points[index].z))
+ if (!std::isfinite ((*input_)[index].z))
{
output [index].getNormalVector3fMap ().setConstant (bad_point);
output [index].curvature = bad_point;
{
index = ri * input_->width + ci;
- const float depth = input_->points[index].z;
+ const float depth = (*input_)[index].z;
if (!std::isfinite (depth))
{
output[index].getNormalVector3fMap ().setConstant (bad_point);
{
index = ri * input_->width + ci;
- if (!std::isfinite (input_->points[index].z))
+ if (!std::isfinite ((*input_)[index].z))
{
output [index].getNormalVector3fMap ().setConstant (bad_point);
output [index].curvature = bad_point;
unsigned v = pt_index / input_->width;
if (v < border || v > bottom)
{
- output.points[idx].getNormalVector3fMap ().setConstant (bad_point);
- output.points[idx].curvature = bad_point;
+ output[idx].getNormalVector3fMap ().setConstant (bad_point);
+ output[idx].curvature = bad_point;
continue;
}
if (u < border || u > right)
{
- output.points[idx].getNormalVector3fMap ().setConstant (bad_point);
- output.points[idx].curvature = bad_point;
+ output[idx].getNormalVector3fMap ().setConstant (bad_point);
+ output[idx].curvature = bad_point;
continue;
}
- const float depth = input_->points[pt_index].z;
+ const float depth = (*input_)[pt_index].z;
if (!std::isfinite (depth))
{
- output.points[idx].getNormalVector3fMap ().setConstant (bad_point);
- output.points[idx].curvature = bad_point;
+ output[idx].getNormalVector3fMap ().setConstant (bad_point);
+ output[idx].curvature = bad_point;
continue;
}
unsigned v = pt_index / input_->width;
if (v < border || v > bottom)
{
- output.points[idx].getNormalVector3fMap ().setConstant (bad_point);
- output.points[idx].curvature = bad_point;
+ output[idx].getNormalVector3fMap ().setConstant (bad_point);
+ output[idx].curvature = bad_point;
continue;
}
if (u < border || u > right)
{
- output.points[idx].getNormalVector3fMap ().setConstant (bad_point);
- output.points[idx].curvature = bad_point;
+ output[idx].getNormalVector3fMap ().setConstant (bad_point);
+ output[idx].curvature = bad_point;
continue;
}
- if (!std::isfinite (input_->points[pt_index].z))
+ if (!std::isfinite ((*input_)[pt_index].z))
{
output [idx].getNormalVector3fMap ().setConstant (bad_point);
output [idx].curvature = bad_point;
unsigned u = pt_index % input_->width;
unsigned v = pt_index / input_->width;
- const float depth = input_->points[pt_index].z;
+ const float depth = (*input_)[pt_index].z;
if (!std::isfinite (depth))
{
output[idx].getNormalVector3fMap ().setConstant (bad_point);
unsigned u = pt_index % input_->width;
unsigned v = pt_index / input_->width;
- if (!std::isfinite (input_->points[pt_index].z))
+ if (!std::isfinite ((*input_)[pt_index].z))
{
output [idx].getNormalVector3fMap ().setConstant (bad_point);
output [idx].curvature = bad_point;
for (const int &nn_index : indices)
{
- PointInT p = cloud.points[nn_index];
+ PointInT p = cloud[nn_index];
if (!std::isfinite (p.x) ||
!std::isfinite (p.y) ||
!std::isfinite (p.z) ||
#pragma omp parallel for \
default(none) \
shared(output) \
- private(nn_indices, nn_dists) \
+ firstprivate(nn_indices, nn_dists) \
num_threads(threads_)
// Iterating over the entire index vector
for (std::ptrdiff_t idx = 0; idx < static_cast<std::ptrdiff_t> (indices_->size ()); ++idx)
{
- PointOutT &p_out = output.points[idx];
+ PointOutT &p_out = output[idx];
if (!this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists))
{
centroid.setZero ();
for (const int &nn_index : nn_indices)
{
- centroid += surface_->points[nn_index].getVector3fMap ();
- mean_intensity += intensity_ (surface_->points[nn_index]);
+ centroid += (*surface_)[nn_index].getVector3fMap ();
+ mean_intensity += intensity_ ((*surface_)[nn_index]);
}
centroid /= static_cast<float> (nn_indices.size ());
mean_intensity /= static_cast<float> (nn_indices.size ());
- Eigen::Vector3f normal = Eigen::Vector3f::Map (normals_->points[(*indices_) [idx]].normal);
+ Eigen::Vector3f normal = Eigen::Vector3f::Map ((*normals_)[(*indices_) [idx]].normal);
Eigen::Vector3f gradient;
computePointIntensityGradient (*surface_, nn_indices, centroid, mean_intensity, normal, gradient);
#pragma omp parallel for \
default(none) \
shared(output) \
- private(nn_indices, nn_dists) \
+ firstprivate(nn_indices, nn_dists) \
num_threads(threads_)
// Iterating over the entire index vector
for (std::ptrdiff_t idx = 0; idx < static_cast<std::ptrdiff_t> (indices_->size ()); ++idx)
{
- PointOutT &p_out = output.points[idx];
+ PointOutT &p_out = output[idx];
if (!isFinite ((*surface_) [(*indices_)[idx]]) ||
!this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists))
{
}
centroid /= static_cast<float> (cp);
mean_intensity /= static_cast<float> (cp);
- Eigen::Vector3f normal = Eigen::Vector3f::Map (normals_->points[(*indices_) [idx]].normal);
+ Eigen::Vector3f normal = Eigen::Vector3f::Map ((*normals_)[(*indices_) [idx]].normal);
Eigen::Vector3f gradient;
computePointIntensityGradient (*surface_, nn_indices, centroid, mean_intensity, normal, gradient);
float max_intensity = -std::numeric_limits<float>::max ();
for (int idx = 0; idx < k; ++idx)
{
- min_intensity = (std::min) (min_intensity, cloud.points[indices[idx]].intensity);
- max_intensity = (std::max) (max_intensity, cloud.points[indices[idx]].intensity);
+ min_intensity = (std::min) (min_intensity, cloud[indices[idx]].intensity);
+ max_intensity = (std::max) (max_intensity, cloud[indices[idx]].intensity);
}
float constant = 1.0f / (2.0f * sigma_ * sigma_);
const float eps = std::numeric_limits<float>::epsilon ();
float d = static_cast<float> (nr_distance_bins) * std::sqrt (squared_distances[idx]) / (radius + eps);
float i = static_cast<float> (nr_intensity_bins) *
- (cloud.points[indices[idx]].intensity - min_intensity) / (max_intensity - min_intensity + eps);
+ (cloud[indices[idx]].intensity - min_intensity) / (max_intensity - min_intensity + eps);
if (sigma == 0)
{
Eigen::MatrixXf intensity_spin_image (nr_intensity_bins_, nr_distance_bins_);
// Allocate enough space to hold the radiusSearch results
- std::vector<int> nn_indices (surface_->points.size ());
- std::vector<float> nn_dist_sqr (surface_->points.size ());
+ std::vector<int> nn_indices (surface_->size ());
+ std::vector<float> nn_dist_sqr (surface_->size ());
output.is_dense = true;
// Iterating over the entire index vector
if (k == 0)
{
for (int bin = 0; bin < nr_intensity_bins_ * nr_distance_bins_; ++bin)
- output.points[idx].histogram[bin] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].histogram[bin] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
std::size_t bin = 0;
for (Eigen::Index bin_j = 0; bin_j < intensity_spin_image.cols (); ++bin_j)
for (Eigen::Index bin_i = 0; bin_i < intensity_spin_image.rows (); ++bin_i)
- output.points[idx].histogram[bin++] = intensity_spin_image (bin_i, bin_j);
+ output[idx].histogram[bin++] = intensity_spin_image (bin_i, bin_j);
}
}
#define EIGEN_II_METHOD 1
#include <pcl/features/linear_least_squares_normal.h>
-#include <pcl/common/time.h>
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT>
pcl::LinearLeastSquaresNormalEstimation<PointInT, PointOutT>::~LinearLeastSquaresNormalEstimation ()
const int index = y * width + x;
- const float px = input_->points[index].x;
- const float py = input_->points[index].y;
- const float pz = input_->points[index].z;
+ const float px = (*input_)[index].x;
+ const float py = (*input_)[index].y;
+ const float pz = (*input_)[index].z;
if (std::isnan (px))
{
const int index2 = v * width + u;
- const float qx = input_->points[index2].x;
- const float qy = input_->points[index2].y;
- const float qz = input_->points[index2].z;
+ const float qx = (*input_)[index2].x;
+ const float qy = (*input_)[index2].y;
+ const float qz = (*input_)[index2].z;
if (std::isnan (qx)) continue;
{
const int index = y * width + x;
- const float px = input_->points[index].x;
- const float py = input_->points[index].y;
- const float pz = input_->points[index].z;
+ const float px = (*input_)[index].x;
+ const float py = (*input_)[index].y;
+ const float pz = (*input_)[index].z;
if (std::isnan(px)) continue;
const int index2 = v * width + u;
- const float qx = input_->points[index2].x;
- const float qy = input_->points[index2].y;
- const float qz = input_->points[index2].z;
+ const float qx = (*input_)[index2].x;
+ const float qy = (*input_)[index2].y;
+ const float qz = (*input_)[index2].z;
if (std::isnan(qx)) continue;
if (length <= 0.0f)
{
- output.points[index].normal_x = bad_point;
- output.points[index].normal_y = bad_point;
- output.points[index].normal_z = bad_point;
- output.points[index].curvature = bad_point;
+ output[index].normal_x = bad_point;
+ output[index].normal_y = bad_point;
+ output[index].normal_z = bad_point;
+ output[index].curvature = bad_point;
}
else
{
const float normInv = 1.0f / std::sqrt (length);
- output.points[index].normal_x = nx * normInv;
- output.points[index].normal_y = ny * normInv;
- output.points[index].normal_z = nz * normInv;
- output.points[index].curvature = bad_point;
+ output[index].normal_x = nx * normInv;
+ output[index].normal_y = ny * normInv;
+ output[index].normal_z = nz * normInv;
+ output[index].curvature = bad_point;
}
}
}
for (const int &index : indices)
{
// Demean the points
- temp_pt_[0] = cloud.points[index].x - xyz_centroid_[0];
- temp_pt_[1] = cloud.points[index].y - xyz_centroid_[1];
- temp_pt_[2] = cloud.points[index].z - xyz_centroid_[2];
+ temp_pt_[0] = cloud[index].x - xyz_centroid_[0];
+ temp_pt_[1] = cloud[index].y - xyz_centroid_[1];
+ temp_pt_[2] = cloud[index].z - xyz_centroid_[2];
mu200 += temp_pt_[0] * temp_pt_[0];
mu020 += temp_pt_[1] * temp_pt_[1];
{
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
- output.points[idx].j1 = output.points[idx].j2 = output.points[idx].j3 = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].j1 = output[idx].j2 = output[idx].j3 = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
computePointMomentInvariants (*surface_, nn_indices,
- output.points[idx].j1, output.points[idx].j2, output.points[idx].j3);
+ output[idx].j1, output[idx].j2, output[idx].j3);
}
}
else
if (!isFinite ((*input_)[(*indices_)[idx]]) ||
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
- output.points[idx].j1 = output.points[idx].j2 = output.points[idx].j3 = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].j1 = output[idx].j2 = output[idx].j3 = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
computePointMomentInvariants (*surface_, nn_indices,
- output.points[idx].j1, output.points[idx].j2, output.points[idx].j3);
+ output[idx].j1, output[idx].j2, output[idx].j3);
}
}
}
#define PCL_MOMENT_OF_INERTIA_ESTIMATION_HPP_
#include <pcl/features/moment_of_inertia_estimation.h>
+#include <pcl/features/feature.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT>
unsigned int number_of_points = static_cast <unsigned int> (indices_->size ());
for (unsigned int i_point = 0; i_point < number_of_points; i_point++)
{
- float x = (input_->points[(*indices_)[i_point]].x - mean_value_ (0)) * major_axis_ (0) +
- (input_->points[(*indices_)[i_point]].y - mean_value_ (1)) * major_axis_ (1) +
- (input_->points[(*indices_)[i_point]].z - mean_value_ (2)) * major_axis_ (2);
- float y = (input_->points[(*indices_)[i_point]].x - mean_value_ (0)) * middle_axis_ (0) +
- (input_->points[(*indices_)[i_point]].y - mean_value_ (1)) * middle_axis_ (1) +
- (input_->points[(*indices_)[i_point]].z - mean_value_ (2)) * middle_axis_ (2);
- float z = (input_->points[(*indices_)[i_point]].x - mean_value_ (0)) * minor_axis_ (0) +
- (input_->points[(*indices_)[i_point]].y - mean_value_ (1)) * minor_axis_ (1) +
- (input_->points[(*indices_)[i_point]].z - mean_value_ (2)) * minor_axis_ (2);
+ float x = ((*input_)[(*indices_)[i_point]].x - mean_value_ (0)) * major_axis_ (0) +
+ ((*input_)[(*indices_)[i_point]].y - mean_value_ (1)) * major_axis_ (1) +
+ ((*input_)[(*indices_)[i_point]].z - mean_value_ (2)) * major_axis_ (2);
+ float y = ((*input_)[(*indices_)[i_point]].x - mean_value_ (0)) * middle_axis_ (0) +
+ ((*input_)[(*indices_)[i_point]].y - mean_value_ (1)) * middle_axis_ (1) +
+ ((*input_)[(*indices_)[i_point]].z - mean_value_ (2)) * middle_axis_ (2);
+ float z = ((*input_)[(*indices_)[i_point]].x - mean_value_ (0)) * minor_axis_ (0) +
+ ((*input_)[(*indices_)[i_point]].y - mean_value_ (1)) * minor_axis_ (1) +
+ ((*input_)[(*indices_)[i_point]].z - mean_value_ (2)) * minor_axis_ (2);
if (x <= obb_min_point_.x) obb_min_point_.x = x;
if (y <= obb_min_point_.y) obb_min_point_.y = y;
unsigned int number_of_points = static_cast <unsigned int> (indices_->size ());
for (unsigned int i_point = 0; i_point < number_of_points; i_point++)
{
- mean_value_ (0) += input_->points[(*indices_)[i_point]].x;
- mean_value_ (1) += input_->points[(*indices_)[i_point]].y;
- mean_value_ (2) += input_->points[(*indices_)[i_point]].z;
+ mean_value_ (0) += (*input_)[(*indices_)[i_point]].x;
+ mean_value_ (1) += (*input_)[(*indices_)[i_point]].y;
+ mean_value_ (2) += (*input_)[(*indices_)[i_point]].z;
- if (input_->points[(*indices_)[i_point]].x <= aabb_min_point_.x) aabb_min_point_.x = input_->points[(*indices_)[i_point]].x;
- if (input_->points[(*indices_)[i_point]].y <= aabb_min_point_.y) aabb_min_point_.y = input_->points[(*indices_)[i_point]].y;
- if (input_->points[(*indices_)[i_point]].z <= aabb_min_point_.z) aabb_min_point_.z = input_->points[(*indices_)[i_point]].z;
+ if ((*input_)[(*indices_)[i_point]].x <= aabb_min_point_.x) aabb_min_point_.x = (*input_)[(*indices_)[i_point]].x;
+ if ((*input_)[(*indices_)[i_point]].y <= aabb_min_point_.y) aabb_min_point_.y = (*input_)[(*indices_)[i_point]].y;
+ if ((*input_)[(*indices_)[i_point]].z <= aabb_min_point_.z) aabb_min_point_.z = (*input_)[(*indices_)[i_point]].z;
- if (input_->points[(*indices_)[i_point]].x >= aabb_max_point_.x) aabb_max_point_.x = input_->points[(*indices_)[i_point]].x;
- if (input_->points[(*indices_)[i_point]].y >= aabb_max_point_.y) aabb_max_point_.y = input_->points[(*indices_)[i_point]].y;
- if (input_->points[(*indices_)[i_point]].z >= aabb_max_point_.z) aabb_max_point_.z = input_->points[(*indices_)[i_point]].z;
+ if ((*input_)[(*indices_)[i_point]].x >= aabb_max_point_.x) aabb_max_point_.x = (*input_)[(*indices_)[i_point]].x;
+ if ((*input_)[(*indices_)[i_point]].y >= aabb_max_point_.y) aabb_max_point_.y = (*input_)[(*indices_)[i_point]].y;
+ if ((*input_)[(*indices_)[i_point]].z >= aabb_max_point_.z) aabb_max_point_.z = (*input_)[(*indices_)[i_point]].z;
}
if (number_of_points == 0)
for (unsigned int i_point = 0; i_point < number_of_points; i_point++)
{
Eigen::Vector3f current_point (0.0f, 0.0f, 0.0f);
- current_point (0) = input_->points[(*indices_)[i_point]].x - mean_value_ (0);
- current_point (1) = input_->points[(*indices_)[i_point]].y - mean_value_ (1);
- current_point (2) = input_->points[(*indices_)[i_point]].z - mean_value_ (2);
+ current_point (0) = (*input_)[(*indices_)[i_point]].x - mean_value_ (0);
+ current_point (1) = (*input_)[(*indices_)[i_point]].y - mean_value_ (1);
+ current_point (2) = (*input_)[(*indices_)[i_point]].z - mean_value_ (2);
covariance_matrix += current_point * current_point.transpose ();
}
{
covariance_matrix.setZero ();
- unsigned int number_of_points = static_cast <unsigned int> (cloud->points.size ());
+ const auto number_of_points = cloud->size ();
float factor = 1.0f / static_cast <float> ((number_of_points - 1 > 0)?(number_of_points - 1):1);
Eigen::Vector3f current_point;
for (unsigned int i_point = 0; i_point < number_of_points; i_point++)
{
- current_point (0) = cloud->points[i_point].x - mean_value_ (0);
- current_point (1) = cloud->points[i_point].y - mean_value_ (1);
- current_point (2) = cloud->points[i_point].z - mean_value_ (2);
+ current_point (0) = (*cloud)[i_point].x - mean_value_ (0);
+ current_point (1) = (*cloud)[i_point].y - mean_value_ (1);
+ current_point (2) = (*cloud)[i_point].z - mean_value_ (2);
covariance_matrix += current_point * current_point.transpose ();
}
for (unsigned int i_point = 0; i_point < number_of_points; i_point++)
{
Eigen::Vector3f vector;
- vector (0) = mean_value (0) - input_->points[(*indices_)[i_point]].x;
- vector (1) = mean_value (1) - input_->points[(*indices_)[i_point]].y;
- vector (2) = mean_value (2) - input_->points[(*indices_)[i_point]].z;
+ vector (0) = mean_value (0) - (*input_)[(*indices_)[i_point]].x;
+ vector (1) = mean_value (1) - (*input_)[(*indices_)[i_point]].y;
+ vector (2) = mean_value (2) - (*input_)[(*indices_)[i_point]].z;
Eigen::Vector3f product = vector.cross (current_axis);
for (unsigned int i_point = 0; i_point < number_of_points; i_point++)
{
const unsigned int index = (*indices_)[i_point];
- float K = - (D + normal_vector (0) * input_->points[index].x + normal_vector (1) * input_->points[index].y + normal_vector (2) * input_->points[index].z);
+ float K = - (D + normal_vector (0) * (*input_)[index].x + normal_vector (1) * (*input_)[index].y + normal_vector (2) * (*input_)[index].z);
PointT projected_point;
- projected_point.x = input_->points[index].x + K * normal_vector (0);
- projected_point.y = input_->points[index].y + K * normal_vector (1);
- projected_point.z = input_->points[index].z + K * normal_vector (2);
- projected_cloud->points[i_point] = projected_point;
+ projected_point.x = (*input_)[index].x + K * normal_vector (0);
+ projected_point.y = (*input_)[index].y + K * normal_vector (1);
+ projected_point.z = (*input_)[index].z + K * normal_vector (2);
+ (*projected_cloud)[i_point] = projected_point;
}
projected_cloud->width = number_of_points;
projected_cloud->height = 1;
#ifndef PCL_FEATURES_IMPL_MULTISCALE_FEATURE_PERSISTENCE_H_
#define PCL_FEATURES_IMPL_MULTISCALE_FEATURE_PERSISTENCE_H_
-#include <numeric>
#include <pcl/features/multiscale_feature_persistence.h>
//////////////////////////////////////////////////////////////////////////////////////////////
// Vectorize each feature and insert it into the vectorized feature storage
std::vector<std::vector<float> > feature_cloud_vectorized;
- feature_cloud_vectorized.reserve (feature_cloud->points.size ());
+ feature_cloud_vectorized.reserve (feature_cloud->size ());
for (const auto& feature: feature_cloud->points)
{
// Select only points outside (mean +/- alpha * standard_dev)
std::list<std::size_t> indices_per_scale;
- std::vector<bool> indices_table_per_scale (features_at_scale_[scale_i]->points.size (), false);
- for (std::size_t point_i = 0; point_i < features_at_scale_[scale_i]->points.size (); ++point_i)
+ std::vector<bool> indices_table_per_scale (features_at_scale_[scale_i]->size (), false);
+ for (std::size_t point_i = 0; point_i < features_at_scale_[scale_i]->size (); ++point_i)
{
if (diff_vector[point_i] > alpha_ * standard_dev)
{
{
if (unique_features_table_[scale_i][*feature_it] == true)
{
- output_features.points.push_back (features_at_scale[scale_i]->points[*feature_it]);
+ output_features.points.push_back ((*features_at_scale_[scale_i])[*feature_it]);
output_indices->push_back (feature_estimator_->getIndices ()->at (*feature_it));
}
}
if (present_in_all)
{
- output_features.points.emplace_back (features_at_scale_.front ()->points[feature]);
+ output_features.points.emplace_back ((*features_at_scale_.front ())[feature]);
output_indices->emplace_back (feature_estimator_->getIndices ()->at (feature));
}
}
// Consider that output cloud is unorganized
output_features.header = feature_estimator_->getInputCloud ()->header;
output_features.is_dense = feature_estimator_->getInputCloud ()->is_dense;
- output_features.width = static_cast<std::uint32_t> (output_features.points.size ());
+ output_features.width = output_features.size ();
output_features.height = 1;
}
#include <iostream>
#include <map>
+#include <pcl/common/norms.h> // for L1_Norm
+#include <pcl/common/eigen.h>
namespace pcl {
for (std::size_t idx = 0; idx < indices_->size (); ++idx)
{
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 ||
- !computePointNormal (*surface_, nn_indices, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2], output.points[idx].curvature))
+ !computePointNormal (*surface_, nn_indices, output[idx].normal[0], output[idx].normal[1], output[idx].normal[2], output[idx].curvature))
{
- output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].normal[0] = output[idx].normal[1] = output[idx].normal[2] = output[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
- flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_,
- output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]);
+ flipNormalTowardsViewpoint ((*input_)[(*indices_)[idx]], vpx_, vpy_, vpz_,
+ output[idx].normal[0], output[idx].normal[1], output[idx].normal[2]);
}
}
{
if (!isFinite ((*input_)[(*indices_)[idx]]) ||
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 ||
- !computePointNormal (*surface_, nn_indices, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2], output.points[idx].curvature))
+ !computePointNormal (*surface_, nn_indices, output[idx].normal[0], output[idx].normal[1], output[idx].normal[2], output[idx].curvature))
{
- output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].normal[0] = output[idx].normal[1] = output[idx].normal[2] = output[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
- flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_,
- output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]);
+ flipNormalTowardsViewpoint ((*input_)[(*indices_)[idx]], vpx_, vpy_, vpz_,
+ output[idx].normal[0], output[idx].normal[1], output[idx].normal[2]);
}
}
{
Eigen::Vector4f n;
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 ||
- !pcl::computePointNormal (*surface_, nn_indices, n, output.points[idx].curvature))
+ !pcl::computePointNormal (*surface_, nn_indices, n, output[idx].curvature))
{
- output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].normal[0] = output[idx].normal[1] = output[idx].normal[2] = output[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
- output.points[idx].normal_x = n[0];
- output.points[idx].normal_y = n[1];
- output.points[idx].normal_z = n[2];
+ output[idx].normal_x = n[0];
+ output[idx].normal_y = n[1];
+ output[idx].normal_z = n[2];
- flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_,
- output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]);
+ flipNormalTowardsViewpoint ((*input_)[(*indices_)[idx]], vpx_, vpy_, vpz_,
+ output[idx].normal[0], output[idx].normal[1], output[idx].normal[2]);
}
}
Eigen::Vector4f n;
if (!isFinite ((*input_)[(*indices_)[idx]]) ||
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 ||
- !pcl::computePointNormal (*surface_, nn_indices, n, output.points[idx].curvature))
+ !pcl::computePointNormal (*surface_, nn_indices, n, output[idx].curvature))
{
- output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].normal[0] = output[idx].normal[1] = output[idx].normal[2] = output[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
- output.points[idx].normal_x = n[0];
- output.points[idx].normal_y = n[1];
- output.points[idx].normal_z = n[2];
+ output[idx].normal_x = n[0];
+ output[idx].normal_y = n[1];
+ output[idx].normal_z = n[2];
- flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_,
- output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]);
+ flipNormalTowardsViewpoint ((*input_)[(*indices_)[idx]], vpx_, vpy_, vpz_,
+ output[idx].normal[0], output[idx].normal[1], output[idx].normal[2]);
}
}
// do a few checks before starting the computations
PointFeature test_feature;
- (void)test_feature;
if (N_prime_ * M_prime_ != sizeof (test_feature.values) / sizeof (float))
{
PCL_ERROR ("NormalBasedSignatureEstimation: not using the proper signature size: %u vs %u\n", N_prime_ * M_prime_, sizeof (test_feature.values) / sizeof (float));
std::size_t point_i = (*indices_)[index_i];
Eigen::MatrixXf s_matrix (N_, M_);
- Eigen::Vector4f center_point = input_->points[point_i].getVector4fMap ();
+ Eigen::Vector4f center_point = (*input_)[point_i].getVector4fMap ();
for (std::size_t k = 0; k < N_; ++k)
{
for (std::size_t l = 0; l < M_; ++l)
{
- Eigen::Vector4f normal = normals_->points[point_i].getNormalVector4fMap ();
+ Eigen::Vector4f normal = (*normals_)[point_i].getNormalVector4fMap ();
Eigen::Vector4f normal_u = Eigen::Vector4f::Zero ();
Eigen::Vector4f normal_v = Eigen::Vector4f::Zero ();
{
if (k_sqr_distances[nn_i] < 1e-7f)
{
- average_normal = normals_->points[k_indices[nn_i]].getNormalVector4fMap ();
+ average_normal = (*normals_)[k_indices[nn_i]].getNormalVector4fMap ();
average_normalization_factor = 1.0f;
break;
}
- average_normal += normals_->points[k_indices[nn_i]].getNormalVector4fMap () / k_sqr_distances[nn_i];
+ average_normal += (*normals_)[k_indices[nn_i]].getNormalVector4fMap () / k_sqr_distances[nn_i];
average_normalization_factor += 1.0f / k_sqr_distances[nn_i];
}
average_normal /= average_normalization_factor;
for (std::size_t j = 0; j < M_prime_; ++j)
feature_point.values[i*M_prime_ + j] = final_matrix (i, j);
- output.points[index_i] = feature_point;
+ output[index_i] = feature_point;
}
}
#include <pcl/2d/edge.h>
#include <pcl/features/organized_edge_detection.h>
-#include <pcl/console/print.h>
/**
* Directions: 1 2 3
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->points.size (), invalid_pt);
+ labels.points.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
{
const unsigned invalid_label = unsigned (0);
label_indices.resize (num_of_edgetype_);
- for (std::size_t idx = 0; idx < input_->points.size (); idx++)
+ for (std::size_t idx = 0; idx < input_->size (); idx++)
{
if (labels[idx].label != invalid_label)
{
for (int col = 1; col < int(input_->width) - 1; col++)
{
int curr_idx = row*int(input_->width) + col;
- if (!std::isfinite (input_->points[curr_idx].z))
+ if (!std::isfinite ((*input_)[curr_idx].z))
continue;
- float curr_depth = std::abs (input_->points[curr_idx].z);
+ float curr_depth = std::abs ((*input_)[curr_idx].z);
// Calculate depth distances between current point and neighboring points
std::vector<float> nghr_dist;
for (int d_idx = 0; d_idx < num_of_ngbr; d_idx++)
{
int nghr_idx = curr_idx + directions[d_idx].d_index;
- assert (nghr_idx >= 0 && nghr_idx < input_->points.size ());
- if (!std::isfinite (input_->points[nghr_idx].z))
+ assert (nghr_idx >= 0 && nghr_idx < input_->size ());
+ if (!std::isfinite ((*input_)[nghr_idx].z))
{
found_invalid_neighbor = true;
break;
}
- nghr_dist[d_idx] = curr_depth - std::abs (input_->points[nghr_idx].z);
+ nghr_dist[d_idx] = curr_depth - std::abs ((*input_)[nghr_idx].z);
}
if (!found_invalid_neighbor)
std::tie (min_itr, max_itr) = std::minmax_element (nghr_dist.cbegin (), nghr_dist.cend ());
float nghr_dist_min = *min_itr;
float nghr_dist_max = *max_itr;
- float dist_dominant = std::max(std::abs (nghr_dist_min),std::abs (nghr_dist_max));
+ float dist_dominant = std::abs (nghr_dist_min) > std::abs (nghr_dist_max) ? nghr_dist_min : nghr_dist_max;
if (std::abs (dist_dominant) > th_depth_discon_*std::abs (curr_depth))
{
// Found a depth discontinuity
for (const auto &direction : directions)
{
int nghr_idx = curr_idx + direction.d_index;
- assert (nghr_idx >= 0 && nghr_idx < input_->points.size ());
- if (!std::isfinite (input_->points[nghr_idx].z))
+ assert (nghr_idx >= 0 && nghr_idx < input_->size ());
+ if (!std::isfinite ((*input_)[nghr_idx].z))
{
dx += direction.d_x;
dy += direction.d_y;
if (s_row < 0 || s_row >= int(input_->height) || s_col < 0 || s_col >= int(input_->width))
break;
- if (std::isfinite (input_->points[s_row*int(input_->width)+s_col].z))
+ if (std::isfinite ((*input_)[s_row*int(input_->width)+s_col].z))
{
- corr_depth = std::abs (input_->points[s_row*int(input_->width)+s_col].z);
+ corr_depth = std::abs ((*input_)[s_row*int(input_->width)+s_col].z);
break;
}
}
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->points.size (), invalid_pt);
+ labels.points.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->points.size (), invalid_pt);
+ labels.points.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
{
for (std::uint32_t col=0; col<normals_->width; col++)
{
- nx (col, row).intensity = normals_->points[row*normals_->width + col].normal_x;
- ny (col, row).intensity = normals_->points[row*normals_->width + col].normal_y;
+ nx (col, row).intensity = (*normals_)[row*normals_->width + col].normal_x;
+ ny (col, row).intensity = (*normals_)[row*normals_->width + col].normal_y;
}
}
{
pcl::Label invalid_pt;
invalid_pt.label = unsigned (0);
- labels.points.resize (input_->points.size (), invalid_pt);
+ labels.points.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
#include <pcl/features/our_cvfh.h>
#include <pcl/features/vfh.h>
#include <pcl/features/normal_3d.h>
-#include <pcl/features/pfh_tools.h>
#include <pcl/common/transforms.h>
//////////////////////////////////////////////////////////////////////////////////////////////
unsigned int min_pts_per_cluster,
unsigned int max_pts_per_cluster)
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different point cloud "
+ "dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
- if (cloud.points.size () != normals.points.size ())
+ if (cloud.size () != normals.size ())
{
- PCL_ERROR ("[pcl::extractEuclideanClusters] Number of points in the input point cloud (%lu) different than normals (%lu)!\n", cloud.points.size (), normals.points.size ());
+ PCL_ERROR("[pcl::extractEuclideanClusters] Number of points in the input point "
+ "cloud (%zu) different than normals (%zu)!\n",
+ static_cast<std::size_t>(cloud.size()),
+ static_cast<std::size_t>(normals.size()));
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
if (processed[i])
continue;
//processed[nn_indices[j]] = true;
// [-1;1]
- double dot_p = normals.points[seed_queue[sq_idx]].normal[0] * normals.points[nn_indices[j]].normal[0]
- + normals.points[seed_queue[sq_idx]].normal[1] * normals.points[nn_indices[j]].normal[1] + normals.points[seed_queue[sq_idx]].normal[2]
- * normals.points[nn_indices[j]].normal[2];
+ double dot_p = normals[seed_queue[sq_idx]].normal[0] * normals[nn_indices[j]].normal[0]
+ + normals[seed_queue[sq_idx]].normal[1] * normals[nn_indices[j]].normal[1] + normals[seed_queue[sq_idx]].normal[2]
+ * normals[nn_indices[j]].normal[2];
if (std::abs (std::acos (dot_p)) < eps_angle)
{
std::vector<int> &indices_out, std::vector<int> &indices_in,
float threshold)
{
- indices_out.resize (cloud.points.size ());
- indices_in.resize (cloud.points.size ());
+ indices_out.resize (cloud.size ());
+ indices_in.resize (cloud.size ());
std::size_t in, out;
in = out = 0;
for (const int &index : indices_to_use)
{
- if (cloud.points[index].curvature > threshold)
+ if (cloud[index].curvature > threshold)
{
indices_out[out] = index;
out++;
Eigen::Affine3f transformPC (Eigen::AngleAxisf (static_cast<float> (rotation), axis));
- grid->points.resize (processed->points.size ());
- for (std::size_t k = 0; k < processed->points.size (); k++)
- grid->points[k].getVector4fMap () = processed->points[k].getVector4fMap ();
+ grid->resize (processed->size ());
+ for (std::size_t k = 0; k < processed->size (); k++)
+ (*grid)[k].getVector4fMap () = (*processed)[k].getVector4fMap ();
pcl::transformPointCloud (*grid, *grid, transformPC);
for (const int &index : indices.indices)
{
- Eigen::Vector3f pvector = grid->points[index].getVector3fMap ();
+ Eigen::Vector3f pvector = (*grid)[index].getVector3fMap ();
float d_k = (pvector).norm ();
float w = (max_dist - d_k);
Eigen::Vector3f diff = (pvector);
float hist_incr;
if (normalize_bins_)
- hist_incr = 100.0f / static_cast<float> (grid->points.size () - 1);
+ hist_incr = 100.0f / static_cast<float> (grid->size () - 1);
else
hist_incr = 1.0f;
vfh_signature.points.resize (1);
vfh_signature.width = vfh_signature.height = 1;
for (int d = 0; d < 308; ++d)
- vfh_signature.points[0].histogram[d] = output.points[i].histogram[d];
+ vfh_signature[0].histogram[d] = output[i].histogram[d];
int pos = 45 * 3;
for (int k = 0; k < num_hists; k++)
{
for (int ii = 0; ii < size_hists; ii++, pos++)
{
- vfh_signature.points[0].histogram[pos] = quadrants[k][ii];
+ vfh_signature[0].histogram[pos] = quadrants[k][ii];
}
}
- ourcvfh_output.points.push_back (vfh_signature.points[0]);
- ourcvfh_output.width = ourcvfh_output.points.size ();
+ ourcvfh_output.push_back (vfh_signature[0]);
+ ourcvfh_output.width = ourcvfh_output.size ();
delete[] weights;
}
}
output.points.clear ();
return;
}
- if (normals_->points.size () != surface_->points.size ())
+ if (normals_->size () != surface_->size ())
{
PCL_ERROR ("[pcl::%s::computeFeature] The number of points in the input dataset differs from the number of points in the dataset containing the normals!\n", getClassName ().c_str ());
output.width = output.height = 0;
filterNormalsWithHighCurvature (*normals_, *indices_, indices_out, indices_in, curv_threshold_);
pcl::PointCloud<pcl::PointNormal>::Ptr normals_filtered_cloud (new pcl::PointCloud<pcl::PointNormal> ());
- normals_filtered_cloud->width = static_cast<std::uint32_t> (indices_in.size ());
+ normals_filtered_cloud->width = indices_in.size ();
normals_filtered_cloud->height = 1;
normals_filtered_cloud->points.resize (normals_filtered_cloud->width);
for (std::size_t i = 0; i < indices_in.size (); ++i)
{
- normals_filtered_cloud->points[i].x = surface_->points[indices_in[i]].x;
- normals_filtered_cloud->points[i].y = surface_->points[indices_in[i]].y;
- normals_filtered_cloud->points[i].z = surface_->points[indices_in[i]].z;
- //normals_filtered_cloud->points[i].getNormalVector4fMap() = normals_->points[indices_in[i]].getNormalVector4fMap();
+ (*normals_filtered_cloud)[i].x = (*surface_)[indices_in[i]].x;
+ (*normals_filtered_cloud)[i].y = (*surface_)[indices_in[i]].y;
+ (*normals_filtered_cloud)[i].z = (*surface_)[indices_in[i]].z;
+ //(*normals_filtered_cloud)[i].getNormalVector4fMap() = (*normals_)[indices_in[i]].getNormalVector4fMap();
indices_from_nfc_to_indices[i] = indices_in[i];
}
std::vector<pcl::PointIndices> clusters;
- if (normals_filtered_cloud->points.size () >= min_points_)
+ if (normals_filtered_cloud->size () >= min_points_)
{
//recompute normals and use them for clustering
{
for (const auto &index : cluster.indices)
{
- avg_normal += normals_filtered_cloud->points[index].getNormalVector4fMap ();
- avg_centroid += normals_filtered_cloud->points[index].getVector4fMap ();
+ avg_normal += (*normals_filtered_cloud)[index].getNormalVector4fMap ();
+ avg_centroid += (*normals_filtered_cloud)[index].getVector4fMap ();
}
avg_normal /= static_cast<float> (cluster.indices.size ());
for (const auto &index : cluster.indices)
{
//decide if normal should be added
- double dot_p = avg_normal.dot (normals_filtered_cloud->points[index].getNormalVector4fMap ());
+ double dot_p = avg_normal.dot ((*normals_filtered_cloud)[index].getNormalVector4fMap ());
if (std::abs (std::acos (dot_p)) < (eps_angle_threshold_ * refine_clusters_))
{
clusters_[cluster_filtered_idx].indices.push_back (indices_from_nfc_to_indices[index]);
for (const auto &index : cluster.indices)
{
- avg_normal += normals_filtered_cloud->points[index].getNormalVector4fMap ();
- avg_centroid += normals_filtered_cloud->points[index].getVector4fMap ();
+ avg_normal += (*normals_filtered_cloud)[index].getNormalVector4fMap ();
+ avg_centroid += (*normals_filtered_cloud)[index].getVector4fMap ();
}
avg_normal /= static_cast<float> (cluster.indices.size ());
//compute modified VFH for all dominant clusters and add them to the list!
output.points.resize (dominant_normals_.size ());
- output.width = static_cast<std::uint32_t> (dominant_normals_.size ());
+ output.width = dominant_normals_.size ();
for (std::size_t i = 0; i < dominant_normals_.size (); ++i)
{
vfh.setCentroidToUse (centroids_dominant_orientations_[i]);
pcl::PointCloud<pcl::VFHSignature308> vfh_signature;
vfh.compute (vfh_signature);
- output.points[i] = vfh_signature.points[0];
+ output[i] = vfh_signature[0];
}
//finish filling the descriptor with the shape distribution
output.points.resize (1);
output.width = 1;
- output.points[0] = vfh_signature.points[0];
+ output[0] = vfh_signature[0];
Eigen::Matrix4f id = Eigen::Matrix4f::Identity ();
transforms_.push_back (id);
valid_transforms_.push_back (false);
const pcl::PointCloud<PointInT> &cloud, const pcl::PointCloud<PointNT> &normals,
int p_idx, int q_idx, float &f1, float &f2, float &f3, float &f4)
{
- pcl::computePairFeatures (cloud.points[p_idx].getVector4fMap (), normals.points[p_idx].getNormalVector4fMap (),
- cloud.points[q_idx].getVector4fMap (), normals.points[q_idx].getNormalVector4fMap (),
+ pcl::computePairFeatures (cloud[p_idx].getVector4fMap (), normals[p_idx].getNormalVector4fMap (),
+ cloud[q_idx].getVector4fMap (), normals[q_idx].getNormalVector4fMap (),
f1, f2, f3, f4);
return (true);
}
for (std::size_t j_idx = 0; j_idx < i_idx; ++j_idx)
{
// If the 3D points are invalid, don't bother estimating, just continue
- if (!isFinite (cloud.points[indices[i_idx]]) || !isFinite (cloud.points[indices[j_idx]]))
+ if (!isFinite (cloud[indices[i_idx]]) || !isFinite (cloud[indices[j_idx]]))
continue;
if (use_cache_)
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
for (Eigen::Index d = 0; d < pfh_histogram_.size (); ++d)
- output.points[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
// Copy into the resultant cloud
for (Eigen::Index d = 0; d < pfh_histogram_.size (); ++d)
- output.points[idx].histogram[d] = pfh_histogram_[d];
+ output[idx].histogram[d] = pfh_histogram_[d];
}
}
else
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
for (Eigen::Index d = 0; d < pfh_histogram_.size (); ++d)
- output.points[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].histogram[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
// Copy into the resultant cloud
for (Eigen::Index d = 0; d < pfh_histogram_.size (); ++d)
- output.points[idx].histogram[d] = pfh_histogram_[d];
+ output[idx].histogram[d] = pfh_histogram_[d];
}
}
}
int p_idx, int q_idx,
float &f1, float &f2, float &f3, float &f4, float &f5, float &f6, float &f7)
{
- Eigen::Vector4i colors1 (cloud.points[p_idx].r, cloud.points[p_idx].g, cloud.points[p_idx].b, 0),
- colors2 (cloud.points[q_idx].r, cloud.points[q_idx].g, cloud.points[q_idx].b, 0);
- pcl::computeRGBPairFeatures (cloud.points[p_idx].getVector4fMap (), normals.points[p_idx].getNormalVector4fMap (),
+ Eigen::Vector4i colors1 (cloud[p_idx].r, cloud[p_idx].g, cloud[p_idx].b, 0),
+ colors2 (cloud[q_idx].r, cloud[q_idx].g, cloud[q_idx].b, 0);
+ pcl::computeRGBPairFeatures (cloud[p_idx].getVector4fMap (), normals[p_idx].getNormalVector4fMap (),
colors1,
- cloud.points[q_idx].getVector4fMap (), normals.points[q_idx].getNormalVector4fMap (),
+ cloud[q_idx].getVector4fMap (), normals[q_idx].getNormalVector4fMap (),
colors2,
f1, f2, f3, f4, f5, f6, f7);
return (true);
computePointPFHRGBSignature (*surface_, *normals_, nn_indices, nr_subdiv_, pfhrgb_histogram_);
std::copy_n (pfhrgb_histogram_.data (), pfhrgb_histogram_.size (),
- output.points[idx].histogram);
+ output[idx].histogram);
}
}
pcl::PPFEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
// Initialize output container - overwrite the sizes done by Feature::initCompute ()
- output.points.resize (indices_->size () * input_->points.size ());
+ output.points.resize (indices_->size () * input_->size ());
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.is_dense = true;
// Compute point pair features for every pair of points in the cloud
for (std::size_t index_i = 0; index_i < indices_->size (); ++index_i)
{
std::size_t i = (*indices_)[index_i];
- for (std::size_t j = 0 ; j < input_->points.size (); ++j)
+ for (std::size_t j = 0 ; j < input_->size (); ++j)
{
PointOutT p;
if (i != j)
{
if (//pcl::computePPFPairFeature
- pcl::computePairFeatures (input_->points[i].getVector4fMap (),
- normals_->points[i].getNormalVector4fMap (),
- input_->points[j].getVector4fMap (),
- normals_->points[j].getNormalVector4fMap (),
+ pcl::computePairFeatures ((*input_)[i].getVector4fMap (),
+ (*normals_)[i].getNormalVector4fMap (),
+ (*input_)[j].getVector4fMap (),
+ (*normals_)[j].getNormalVector4fMap (),
p.f1, p.f2, p.f3, p.f4))
{
// Calculate alpha_m angle
- Eigen::Vector3f model_reference_point = input_->points[i].getVector3fMap (),
- model_reference_normal = normals_->points[i].getNormalVector3fMap (),
- model_point = input_->points[j].getVector3fMap ();
+ Eigen::Vector3f model_reference_point = (*input_)[i].getVector3fMap (),
+ model_reference_normal = (*normals_)[i].getNormalVector3fMap (),
+ model_point = (*input_)[j].getVector3fMap ();
float rotation_angle = std::acos (model_reference_normal.dot (Eigen::Vector3f::UnitX ()));
bool parallel_to_x = (model_reference_normal.y() == 0.0f && model_reference_normal.z() == 0.0f);
Eigen::Vector3f rotation_axis = (parallel_to_x)?(Eigen::Vector3f::UnitY ()):(model_reference_normal.cross (Eigen::Vector3f::UnitX ()). normalized());
output.is_dense = false;
}
- output.points[index_i*input_->points.size () + j] = p;
+ output[index_i*input_->size () + j] = p;
}
}
}
pcl::PPFRGBEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
// Initialize output container - overwrite the sizes done by Feature::initCompute ()
- output.points.resize (indices_->size () * input_->points.size ());
+ output.points.resize (indices_->size () * input_->size ());
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
// Compute point pair features for every pair of points in the cloud
for (std::size_t index_i = 0; index_i < indices_->size (); ++index_i)
{
std::size_t i = (*indices_)[index_i];
- for (std::size_t j = 0 ; j < input_->points.size (); ++j)
+ for (std::size_t j = 0 ; j < input_->size (); ++j)
{
PointOutT p;
if (i != j)
{
if (pcl::computeRGBPairFeatures
- (input_->points[i].getVector4fMap (), normals_->points[i].getNormalVector4fMap (), input_->points[i].getRGBVector4i (),
- input_->points[j].getVector4fMap (), normals_->points[j].getNormalVector4fMap (), input_->points[j].getRGBVector4i (),
+ ((*input_)[i].getVector4fMap (), (*normals_)[i].getNormalVector4fMap (), (*input_)[i].getRGBVector4i (),
+ (*input_)[j].getVector4fMap (), (*normals_)[j].getNormalVector4fMap (), (*input_)[j].getRGBVector4i (),
p.f1, p.f2, p.f3, p.f4, p.r_ratio, p.g_ratio, p.b_ratio))
{
// Calculate alpha_m angle
- Eigen::Vector3f model_reference_point = input_->points[i].getVector3fMap (),
- model_reference_normal = normals_->points[i].getNormalVector3fMap (),
- model_point = input_->points[j].getVector3fMap ();
+ Eigen::Vector3f model_reference_point = (*input_)[i].getVector3fMap (),
+ model_reference_normal = (*normals_)[i].getNormalVector3fMap (),
+ model_point = (*input_)[j].getVector3fMap ();
Eigen::AngleAxisf rotation_mg (std::acos (model_reference_normal.dot (Eigen::Vector3f::UnitX ())),
model_reference_normal.cross (Eigen::Vector3f::UnitX ()).normalized ());
Eigen::Affine3f transform_mg = Eigen::Translation3f ( rotation_mg * ((-1) * model_reference_point)) * rotation_mg;
else
p.f1 = p.f2 = p.f3 = p.f4 = p.alpha_m = p.r_ratio = p.g_ratio = p.b_ratio = 0.f;
- output.points[index_i*input_->points.size () + j] = p;
+ output[index_i*input_->size () + j] = p;
}
}
}
{
float f1, f2, f3, f4, r_ratio, g_ratio, b_ratio;
if (pcl::computeRGBPairFeatures
- (input_->points[i].getVector4fMap (), normals_->points[i].getNormalVector4fMap (), input_->points[i].getRGBVector4i (),
- input_->points[j].getVector4fMap (), normals_->points[j].getNormalVector4fMap (), input_->points[j].getRGBVector4i (),
+ ((*input_)[i].getVector4fMap (), (*normals_)[i].getNormalVector4fMap (), (*input_)[i].getRGBVector4i (),
+ (*input_)[j].getVector4fMap (), (*normals_)[j].getNormalVector4fMap (), (*input_)[j].getRGBVector4i (),
f1, f2, f3, f4, r_ratio, g_ratio, b_ratio))
{
average_feature_nn.f1 += f1;
average_feature_nn.r_ratio /= normalization_factor;
average_feature_nn.g_ratio /= normalization_factor;
average_feature_nn.b_ratio /= normalization_factor;
- output.points[index_i] = average_feature_nn;
+ output[index_i] = average_feature_nn;
}
- PCL_INFO ("Output size: %u\n", output.points.size ());
+ PCL_INFO ("Output size: %zu\n", static_cast<std::size_t>(output.size ()));
}
float &pcx, float &pcy, float &pcz, float &pc1, float &pc2)
{
EIGEN_ALIGN16 Eigen::Matrix3f I = Eigen::Matrix3f::Identity ();
- Eigen::Vector3f n_idx (normals.points[p_idx].normal[0], normals.points[p_idx].normal[1], normals.points[p_idx].normal[2]);
+ Eigen::Vector3f n_idx (normals[p_idx].normal[0], normals[p_idx].normal[1], normals[p_idx].normal[2]);
EIGEN_ALIGN16 Eigen::Matrix3f M = I - n_idx * n_idx.transpose (); // projection matrix (into tangent plane)
// Project normals into the tangent plane
xyz_centroid_.setZero ();
for (std::size_t idx = 0; idx < indices.size(); ++idx)
{
- normal[0] = normals.points[indices[idx]].normal[0];
- normal[1] = normals.points[indices[idx]].normal[1];
- normal[2] = normals.points[indices[idx]].normal[2];
+ normal[0] = normals[indices[idx]].normal[0];
+ normal[1] = normals[indices[idx]].normal[1];
+ normal[2] = normals[indices[idx]].normal[2];
projected_normals_[idx] = M * normal;
xyz_centroid_ += projected_normals_[idx];
{
if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
- output.points[idx].principal_curvature[0] = output.points[idx].principal_curvature[1] = output.points[idx].principal_curvature[2] =
- output.points[idx].pc1 = output.points[idx].pc2 = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].principal_curvature[0] = output[idx].principal_curvature[1] = output[idx].principal_curvature[2] =
+ output[idx].pc1 = output[idx].pc2 = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
// Estimate the principal curvatures at each patch
computePointPrincipalCurvatures (*normals_, (*indices_)[idx], nn_indices,
- output.points[idx].principal_curvature[0], output.points[idx].principal_curvature[1], output.points[idx].principal_curvature[2],
- output.points[idx].pc1, output.points[idx].pc2);
+ output[idx].principal_curvature[0], output[idx].principal_curvature[1], output[idx].principal_curvature[2],
+ output[idx].pc1, output[idx].pc2);
}
}
else
if (!isFinite ((*input_)[(*indices_)[idx]]) ||
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
{
- output.points[idx].principal_curvature[0] = output.points[idx].principal_curvature[1] = output.points[idx].principal_curvature[2] =
- output.points[idx].pc1 = output.points[idx].pc2 = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].principal_curvature[0] = output[idx].principal_curvature[1] = output[idx].principal_curvature[2] =
+ output[idx].pc1 = output[idx].pc2 = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
}
// Estimate the principal curvatures at each patch
computePointPrincipalCurvatures (*normals_, (*indices_)[idx], nn_indices,
- output.points[idx].principal_curvature[0], output.points[idx].principal_curvature[1], output.points[idx].principal_curvature[2],
- output.points[idx].pc1, output.points[idx].pc2);
+ output[idx].principal_curvature[0], output[idx].principal_curvature[1], output[idx].principal_curvature[2],
+ output[idx].pc1, output[idx].pc2);
}
}
}
int index = y*range_image_->width + x;
Eigen::Vector3f*& border_direction = border_directions_[index];
border_direction = nullptr;
- const BorderDescription& border_description = border_descriptions_->points[index];
+ const BorderDescription& border_description = (*border_descriptions_)[index];
const BorderTraits& border_traits = border_description.traits;
if (!border_traits[BORDER_TRAIT__OBSTACLE_BORDER])
return;
int index2 = y2*range_image_->width + x2;
- const BorderTraits& border_traits = border_descriptions_->points[index2].traits;
+ const BorderTraits& border_traits = (*border_descriptions_)[index2].traits;
if (border_traits[BORDER_TRAIT__VEIL_POINT] || border_traits[BORDER_TRAIT__SHADOW_BORDER])
{
beam_valid = false;
int nr_gradient_bins = static_cast<int> (rift_descriptor.cols ());
// Get the center point
- pcl::Vector3fMapConst p0 = cloud.points[p_idx].getVector3fMap ();
+ pcl::Vector3fMapConst p0 = cloud[p_idx].getVector3fMap ();
// Compute the RIFT descriptor
rift_descriptor.setZero ();
for (std::size_t idx = 0; idx < indices.size (); ++idx)
{
// Compute the gradient magnitude and orientation (relative to the center point)
- pcl::Vector3fMapConst point = cloud.points[indices[idx]].getVector3fMap ();
- Eigen::Map<const Eigen::Vector3f> gradient_vector (& (gradient.points[indices[idx]].gradient[0]));
+ pcl::Vector3fMapConst point = cloud[indices[idx]].getVector3fMap ();
+ Eigen::Map<const Eigen::Vector3f> gradient_vector (& (gradient[indices[idx]].gradient[0]));
float gradient_magnitude = gradient_vector.norm ();
float gradient_angle_from_center = std::acos (gradient_vector.dot ((point - p0).normalized ()) / gradient_magnitude);
output.points.clear ();
return;
}
- if (gradient_->points.size () != surface_->points.size ())
+ if (gradient_->size () != surface_->size ())
{
PCL_ERROR ("[pcl::%s::computeFeature] ", getClassName ().c_str ());
PCL_ERROR ("The number of points in the input dataset differs from the number of points in the gradient!\n");
computeRIFT (*surface_, *gradient_, (*indices_)[idx], static_cast<float> (search_radius_), nn_indices, nn_dist_sqr, rift_descriptor);
// Default layout is column major, copy elementwise
- std::copy_n (rift_descriptor.data (), rift_descriptor.size (), output.points[idx].histogram);
+ std::copy_n (rift_descriptor.data (), rift_descriptor.size (), output[idx].histogram);
}
}
{
std::set <unsigned int> local_triangles;
std::vector <int> local_points;
- getLocalSurface (input_->points[idx], local_triangles, local_points);
+ getLocalSurface ((*input_)[idx], local_triangles, local_points);
Eigen::Matrix3f lrf_matrix;
- computeLRF (input_->points[idx], local_triangles, lrf_matrix);
+ computeLRF ((*input_)[idx], local_triangles, lrf_matrix);
PointCloudIn transformed_cloud;
- transformCloud (input_->points[idx], lrf_matrix, local_points, transformed_cloud);
+ transformCloud ((*input_)[idx], lrf_matrix, local_points, transformed_cloud);
std::array<PointInT, 3> axes;
axes[0].x = 1.0f; axes[0].y = 0.0f; axes[0].z = 0.0f;
for (unsigned int i_vertex = 0; i_vertex < 3; i_vertex++)
{
const unsigned int index = triangles_[triangle].vertices[i_vertex];
- pt[i_vertex] (0) = surface_->points[index].x;
- pt[i_vertex] (1) = surface_->points[index].y;
- pt[i_vertex] (2) = surface_->points[index].z;
+ pt[i_vertex] (0) = (*surface_)[index].x;
+ pt[i_vertex] (1) = (*surface_)[index].y;
+ pt[i_vertex] (2) = (*surface_)[index].z;
}
const float curr_area = ((pt[1] - pt[0]).cross (pt[2] - pt[0])).norm ();
for (unsigned int i_vertex = 0; i_vertex < 3; i_vertex++)
{
const unsigned int index = triangles_[triangle].vertices[i_vertex];
- pt[i_vertex] (0) = surface_->points[index].x;
- pt[i_vertex] (1) = surface_->points[index].y;
- pt[i_vertex] (2) = surface_->points[index].z;
+ pt[i_vertex] (0) = (*surface_)[index].x;
+ pt[i_vertex] (1) = (*surface_)[index].y;
+ pt[i_vertex] (2) = (*surface_)[index].z;
}
float factor1 = 0.0f;
for (const auto& idx: local_points)
{
- Eigen::Vector3f transformed_point (surface_->points[idx].x - point.x,
- surface_->points[idx].y - point.y,
- surface_->points[idx].z - point.z);
+ Eigen::Vector3f transformed_point ((*surface_)[idx].x - point.x,
+ (*surface_)[idx].y - point.y,
+ (*surface_)[idx].z - point.z);
transformed_point = matrix * transformed_point;
(1 - cosine) * y * x + sine * z, cosine + (1 - cosine) * y * y, (1 - cosine) * y * z - sine * x,
(1 - cosine) * z * x - sine * y, (1 - cosine) * z * y + sine * x, cosine + (1 - cosine) * z * z;
- const auto number_of_points = cloud.points.size ();
+ const auto number_of_points = cloud.size ();
rotated_cloud.header = cloud.header;
rotated_cloud.width = number_of_points;
matrix (row, col) += 1.0f;
}
- matrix /= std::max<float> (1, cloud.points.size ());
+ matrix /= std::max<float> (1, cloud.size ());
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
for (i = begin+1; i != end; ++i)
{
// compute angle between the two lines going through normals (disregard orientation!)
- double cosine = normals.points[*i].normal[0] * normals.points[*begin].normal[0] +
- normals.points[*i].normal[1] * normals.points[*begin].normal[1] +
- normals.points[*i].normal[2] * normals.points[*begin].normal[2];
+ double cosine = normals[*i].normal[0] * normals[*begin].normal[0] +
+ normals[*i].normal[1] * normals[*begin].normal[1] +
+ normals[*i].normal[2] * normals[*begin].normal[2];
if (cosine > 1) cosine = 1;
if (cosine < -1) cosine = -1;
double angle = std::acos (cosine);
if (angle > M_PI/2) angle = M_PI - angle; /// \note: orientation is neglected!
// Compute point to point distance
- double dist = sqrt ((surface.points[*i].x - surface.points[*begin].x) * (surface.points[*i].x - surface.points[*begin].x) +
- (surface.points[*i].y - surface.points[*begin].y) * (surface.points[*i].y - surface.points[*begin].y) +
- (surface.points[*i].z - surface.points[*begin].z) * (surface.points[*i].z - surface.points[*begin].z));
+ double dist = sqrt ((surface[*i].x - surface[*begin].x) * (surface[*i].x - surface[*begin].x) +
+ (surface[*i].y - surface[*begin].y) * (surface[*i].y - surface[*begin].y) +
+ (surface[*i].z - surface[*begin].z) * (surface[*i].z - surface[*begin].z));
if (dist > max_dist)
continue; /// \note: we neglect points that are outside the specified interval!
for (i = begin+1; i != end; ++i)
{
// compute angle between the two lines going through normals (disregard orientation!)
- double cosine = normals.points[*i].normal[0] * normals.points[*begin].normal[0] +
- normals.points[*i].normal[1] * normals.points[*begin].normal[1] +
- normals.points[*i].normal[2] * normals.points[*begin].normal[2];
+ double cosine = normals[*i].normal[0] * normals[*begin].normal[0] +
+ normals[*i].normal[1] * normals[*begin].normal[1] +
+ normals[*i].normal[2] * normals[*begin].normal[2];
if (cosine > 1) cosine = 1;
if (cosine < -1) cosine = -1;
double angle = std::acos (cosine);
{
// Reserve space for the output histogram dataset
histograms_.reset (new std::vector<Eigen::MatrixXf, Eigen::aligned_allocator<Eigen::MatrixXf> >);
- histograms_->reserve (output.points.size ());
+ histograms_->reserve (output.size ());
// Iterating over the entire index vector
for (std::size_t idx = 0; idx < indices_->size (); ++idx)
{
// Compute and store r_min and r_max in the output cloud
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_sqr_dists);
- //histograms_->push_back (computeRSD (*surface_, *normals_, nn_indices, search_radius_, nr_subdiv_, plane_radius_, output.points[idx], true));
- histograms_->push_back (computeRSD (*normals_, nn_indices, nn_sqr_dists, search_radius_, nr_subdiv_, plane_radius_, output.points[idx], true));
+ //histograms_->push_back (computeRSD (*surface_, *normals_, nn_indices, search_radius_, nr_subdiv_, plane_radius_, output[idx], true));
+ histograms_->push_back (computeRSD (*normals_, nn_indices, nn_sqr_dists, search_radius_, nr_subdiv_, plane_radius_, output[idx], true));
}
}
else
{
// Compute and store r_min and r_max in the output cloud
this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_sqr_dists);
- //computeRSD (*surface_, *normals_, nn_indices, search_radius_, nr_subdiv_, plane_radius_, output.points[idx], false);
- computeRSD (*normals_, nn_indices, nn_sqr_dists, search_radius_, nr_subdiv_, plane_radius_, output.points[idx], false);
+ //computeRSD (*surface_, *normals_, nn_indices, search_radius_, nr_subdiv_, plane_radius_, output[idx], false);
+ computeRSD (*normals_, nn_indices, nn_sqr_dists, search_radius_, nr_subdiv_, plane_radius_, output[idx], false);
}
}
}
#include <pcl/features/shot.h>
#include <pcl/features/shot_lrf.h>
-#include <utility>
// Useful constants.
#define PST_PI 3.1415926535897932384626433832795
{
bin_distance_shape.resize (indices.size ());
- const PointRFT& current_frame = frames_->points[index];
+ const PointRFT& current_frame = (*frames_)[index];
//if (!std::isfinite (current_frame.rf[0]) || !std::isfinite (current_frame.rf[4]) || !std::isfinite (current_frame.rf[11]))
//return;
for (std::size_t i_idx = 0; i_idx < indices.size (); ++i_idx)
{
// check NaN normal
- const Eigen::Vector4f& normal_vec = normals_->points[indices[i_idx]].getNormalVector4fMap ();
+ const Eigen::Vector4f& normal_vec = (*normals_)[indices[i_idx]].getNormalVector4fMap ();
if (!std::isfinite (normal_vec[0]) ||
!std::isfinite (normal_vec[1]) ||
!std::isfinite (normal_vec[2]))
if (!std::isfinite(binDistance[i_idx]))
continue;
- Eigen::Vector4f delta = surface_->points[indices[i_idx]].getVector4fMap () - central_point;
+ Eigen::Vector4f delta = (*surface_)[indices[i_idx]].getVector4fMap () - central_point;
delta[3] = 0;
// Compute the Euclidean norm
if (!std::isfinite(binDistanceShape[i_idx]))
continue;
- Eigen::Vector4f delta = surface_->points[indices[i_idx]].getVector4fMap () - central_point;
+ Eigen::Vector4f delta = (*surface_)[indices[i_idx]].getVector4fMap () - central_point;
delta[3] = 0;
// Compute the Euclidean norm
{
binDistanceColor.reserve (nNeighbors);
- //unsigned char redRef = input_->points[(*indices_)[index]].rgba >> 16 & 0xFF;
- //unsigned char greenRef = input_->points[(*indices_)[index]].rgba >> 8& 0xFF;
- //unsigned char blueRef = input_->points[(*indices_)[index]].rgba & 0xFF;
- unsigned char redRef = input_->points[(*indices_)[index]].r;
- unsigned char greenRef = input_->points[(*indices_)[index]].g;
- unsigned char blueRef = input_->points[(*indices_)[index]].b;
+ //unsigned char redRef = (*input_)[(*indices_)[index]].rgba >> 16 & 0xFF;
+ //unsigned char greenRef = (*input_)[(*indices_)[index]].rgba >> 8& 0xFF;
+ //unsigned char blueRef = (*input_)[(*indices_)[index]].rgba & 0xFF;
+ unsigned char redRef = (*input_)[(*indices_)[index]].r;
+ unsigned char greenRef = (*input_)[(*indices_)[index]].g;
+ unsigned char blueRef = (*input_)[(*indices_)[index]].b;
float LRef, aRef, bRef;
for (const auto& idx: indices)
{
- unsigned char red = surface_->points[idx].r;
- unsigned char green = surface_->points[idx].g;
- unsigned char blue = surface_->points[idx].b;
+ unsigned char red = (*surface_)[idx].r;
+ unsigned char green = (*surface_)[idx].g;
+ unsigned char blue = (*surface_)[idx].b;
float L, a, b;
{
// Copy into the resultant cloud
for (int d = 0; d < descLength_; ++d)
- output.points[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
for (int d = 0; d < 9; ++d)
- output.points[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
// Copy into the resultant cloud
for (int d = 0; d < descLength_; ++d)
- output.points[idx].descriptor[d] = shot_[d];
+ output[idx].descriptor[d] = shot_[d];
for (int d = 0; d < 3; ++d)
{
- output.points[idx].rf[d + 0] = frames_->points[idx].x_axis[d];
- output.points[idx].rf[d + 3] = frames_->points[idx].y_axis[d];
- output.points[idx].rf[d + 6] = frames_->points[idx].z_axis[d];
+ output[idx].rf[d + 0] = (*frames_)[idx].x_axis[d];
+ output[idx].rf[d + 3] = (*frames_)[idx].y_axis[d];
+ output[idx].rf[d + 6] = (*frames_)[idx].z_axis[d];
}
}
}
{
// Copy into the resultant cloud
for (int d = 0; d < descLength_; ++d)
- output.points[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
for (int d = 0; d < 9; ++d)
- output.points[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
// Copy into the resultant cloud
for (int d = 0; d < descLength_; ++d)
- output.points[idx].descriptor[d] = shot_[d];
+ output[idx].descriptor[d] = shot_[d];
for (int d = 0; d < 3; ++d)
{
- output.points[idx].rf[d + 0] = frames_->points[idx].x_axis[d];
- output.points[idx].rf[d + 3] = frames_->points[idx].y_axis[d];
- output.points[idx].rf[d + 6] = frames_->points[idx].z_axis[d];
+ output[idx].rf[d + 0] = (*frames_)[idx].x_axis[d];
+ output[idx].rf[d + 3] = (*frames_)[idx].y_axis[d];
+ output[idx].rf[d + 6] = (*frames_)[idx].z_axis[d];
}
}
}
for (std::size_t i_idx = 0; i_idx < n_indices.size (); ++i_idx)
{
- Eigen::Vector4f pt = surface_->points[n_indices[i_idx]].getVector4fMap ();
+ Eigen::Vector4f pt = (*surface_)[n_indices[i_idx]].getVector4fMap ();
if (pt.head<3> () == central_point.head<3> ())
continue;
{
// Copy into the resultant cloud
for (Eigen::Index d = 0; d < shot.size (); ++d)
- output.points[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
for (int d = 0; d < 9; ++d)
- output.points[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
// Copy into the resultant cloud
for (Eigen::Index d = 0; d < shot.size (); ++d)
- output.points[idx].descriptor[d] = shot[d];
+ output[idx].descriptor[d] = shot[d];
for (int d = 0; d < 3; ++d)
{
- output.points[idx].rf[d + 0] = frames_->points[idx].x_axis[d];
- output.points[idx].rf[d + 3] = frames_->points[idx].y_axis[d];
- output.points[idx].rf[d + 6] = frames_->points[idx].z_axis[d];
+ output[idx].rf[d + 0] = (*frames_)[idx].x_axis[d];
+ output[idx].rf[d + 3] = (*frames_)[idx].y_axis[d];
+ output[idx].rf[d + 6] = (*frames_)[idx].z_axis[d];
}
}
}
{
// Copy into the resultant cloud
for (Eigen::Index d = 0; d < shot.size (); ++d)
- output.points[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].descriptor[d] = std::numeric_limits<float>::quiet_NaN ();
for (int d = 0; d < 9; ++d)
- output.points[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
+ output[idx].rf[d] = std::numeric_limits<float>::quiet_NaN ();
output.is_dense = false;
continue;
// Copy into the resultant cloud
for (Eigen::Index d = 0; d < shot.size (); ++d)
- output.points[idx].descriptor[d] = shot[d];
+ output[idx].descriptor[d] = shot[d];
for (int d = 0; d < 3; ++d)
{
- output.points[idx].rf[d + 0] = frames_->points[idx].x_axis[d];
- output.points[idx].rf[d + 3] = frames_->points[idx].y_axis[d];
- output.points[idx].rf[d + 6] = frames_->points[idx].z_axis[d];
+ output[idx].rf[d + 0] = (*frames_)[idx].x_axis[d];
+ output[idx].rf[d + 3] = (*frames_)[idx].y_axis[d];
+ output[idx].rf[d + 6] = (*frames_)[idx].z_axis[d];
}
}
}
#define PCL_FEATURES_IMPL_SPIN_IMAGE_H_
#include <limits>
-#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/exceptions.h>
#include <pcl/features/spin_image.h>
assert (image_width_ > 0);
assert (support_angle_cos_ <= 1.0 && support_angle_cos_ >= 0.0); // may be permit negative cosine?
- const Eigen::Vector3f origin_point (input_->points[index].getVector3fMap ());
+ const Eigen::Vector3f origin_point ((*input_)[index].getVector3fMap ());
Eigen::Vector3f origin_normal;
origin_normal =
input_normals_ ?
- input_normals_->points[index].getNormalVector3fMap () :
+ (*input_normals_)[index].getNormalVector3fMap () :
Eigen::Vector3f (); // just a placeholder; should never be used!
const Eigen::Vector3f rotation_axis = use_custom_axis_ ?
rotation_axis_.getNormalVector3fMap () :
use_custom_axes_cloud_ ?
- rotation_axes_cloud_->points[index].getNormalVector3fMap () :
+ (*rotation_axes_cloud_)[index].getNormalVector3fMap () :
origin_normal;
Eigen::ArrayXXd m_matrix (Eigen::ArrayXXd::Zero (image_width_+1, 2*image_width_+1));
double cos_between_normals = -2.0; // should be initialized if used
if (support_angle_cos_ > 0.0 || is_angular_) // not bogus
{
- cos_between_normals = origin_normal.dot (input_normals_->points[nn_indices[i_neigh]].getNormalVector3fMap ());
+ cos_between_normals = origin_normal.dot ((*input_normals_)[nn_indices[i_neigh]].getNormalVector3fMap ());
if (std::abs (cos_between_normals) > (1.0 + 10*std::numeric_limits<float>::epsilon ())) // should be okay for numeric stability
{
PCL_ERROR ("[pcl::%s::computeSiForPoint] Normal for the point %d and/or the point %d are not normalized, dot ptoduct is %f.\n",
// now compute the coordinate in cylindric coordinate system associated with the origin point
const Eigen::Vector3f direction (
- surface_->points[nn_indices[i_neigh]].getVector3fMap () - origin_point);
+ (*surface_)[nn_indices[i_neigh]].getVector3fMap () - origin_point);
const double direction_norm = direction.norm ();
if (std::abs(direction_norm) < 10*std::numeric_limits<double>::epsilon ())
continue; // ignore the point itself; it does not contribute really
}
// Check if the size of normals is the same as the size of the surface
- if (input_normals_->points.size () != input_->points.size ())
+ if (input_normals_->size () != input_->size ())
{
PCL_ERROR ("[pcl::%s::initCompute] ", getClassName ().c_str ());
PCL_ERROR ("The number of points in the input dataset differs from ");
{
for (Eigen::Index iCol = 0; iCol < res.cols () ; iCol++)
{
- output.points[i_input].histogram[ iRow*res.cols () + iCol ] = static_cast<float> (res (iRow, iCol));
+ output[i_input].histogram[ iRow*res.cols () + iCol ] = static_cast<float> (res (iRow, iCol));
}
}
}
using Graph = adjacency_list<vecS, vecS, undirectedS, no_property, Weight>;
Graph cloud_graph;
- for (std::size_t point_i = 0; point_i < input_->points.size (); ++point_i)
+ for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
{
std::vector<int> k_indices (16);
std::vector<float> k_distances (16);
// declare and initialize data structure
F_scales_.resize (scale_values_.size ());
- std::vector<float> point_density (input_->points.size ()),
- F (input_->points.size ());
- std::vector<std::vector<float> > phi (input_->points.size ());
- std::vector<float> phi_row (input_->points.size ());
+ std::vector<float> point_density (input_->size ()),
+ F (input_->size ());
+ std::vector<std::vector<float> > phi (input_->size ());
+ std::vector<float> phi_row (input_->size ());
for (std::size_t scale_i = 0; scale_i < scale_values_.size (); ++scale_i)
{
float scale_squared = scale_values_[scale_i] * scale_values_[scale_i];
// calculate point density for each point x_i
- for (std::size_t point_i = 0; point_i < input_->points.size (); ++point_i)
+ for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
{
float point_density_i = 0.0;
- for (std::size_t point_j = 0; point_j < input_->points.size (); ++point_j)
+ for (std::size_t point_j = 0; point_j < input_->size (); ++point_j)
{
float d_g = geodesic_distances_[point_i][point_j];
float phi_i_j = 1.0f / std::sqrt (2.0f * static_cast<float> (M_PI) * scale_squared) * std::exp ( (-1) * d_g*d_g / (2.0f * scale_squared));
}
// compute weights for each pair (x_i, x_j), evaluate the operator A_hat
- for (std::size_t point_i = 0; point_i < input_->points.size (); ++point_i)
+ for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
{
float A_hat_normalization = 0.0;
PointT A_hat; A_hat.x = A_hat.y = A_hat.z = 0.0;
- for (std::size_t point_j = 0; point_j < input_->points.size (); ++point_j)
+ for (std::size_t point_j = 0; point_j < input_->size (); ++point_j)
{
float phi_hat_i_j = phi[point_i][point_j] / (point_density[point_i] * point_density[point_j]);
A_hat_normalization += phi_hat_i_j;
- PointT aux = input_->points[point_j];
+ PointT aux = (*input_)[point_j];
aux.x *= phi_hat_i_j; aux.y *= phi_hat_i_j; aux.z *= phi_hat_i_j;
A_hat.x += aux.x; A_hat.y += aux.y; A_hat.z += aux.z;
A_hat.x /= A_hat_normalization; A_hat.y /= A_hat_normalization; A_hat.z /= A_hat_normalization;
// compute the invariant F
- float aux = 2.0f / scale_values_[scale_i] * euclideanDistance<PointT, PointT> (A_hat, input_->points[point_i]);
+ float aux = 2.0f / scale_values_[scale_i] * euclideanDistance<PointT, PointT> (A_hat, (*input_)[point_i]);
F[point_i] = aux * std::exp (-aux);
}
// for each point, check if it is a local extrema on each scale
for (std::size_t scale_i = 0; scale_i < scale_values_.size (); ++scale_i)
{
- std::vector<bool> is_min_scale (input_->points.size ()),
- is_max_scale (input_->points.size ());
- for (std::size_t point_i = 0; point_i < input_->points.size (); ++point_i)
+ std::vector<bool> is_min_scale (input_->size ()),
+ is_max_scale (input_->size ());
+ for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
{
std::vector<int> nn_indices;
geodesicFixedRadiusSearch (point_i, scale_values_[scale_i], nn_indices);
// look for points that are min/max over three consecutive scales
for (std::size_t scale_i = 1; scale_i < scale_values_.size () - 1; ++scale_i)
{
- for (std::size_t point_i = 0; point_i < input_->points.size (); ++point_i)
+ for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
if ((is_min[scale_i - 1][point_i] && is_min[scale_i][point_i] && is_min[scale_i + 1][point_i]) ||
(is_max[scale_i - 1][point_i] && is_max[scale_i][point_i] && is_max[scale_i + 1][point_i]))
{
template <typename PointInT, typename PointOutT, typename PointRFT> void
pcl::UniqueShapeContext<PointInT, PointOutT, PointRFT>::computePointDescriptor (std::size_t index, /*float rf[9],*/ std::vector<float> &desc)
{
- pcl::Vector3fMapConst origin = input_->points[(*indices_)[index]].getVector3fMap ();
+ pcl::Vector3fMapConst origin = (*input_)[(*indices_)[index]].getVector3fMap ();
- const Eigen::Vector3f x_axis (frames_->points[index].x_axis[0],
- frames_->points[index].x_axis[1],
- frames_->points[index].x_axis[2]);
- //const Eigen::Vector3f& y_axis = frames_->points[index].y_axis.getNormalVector3fMap ();
- const Eigen::Vector3f normal (frames_->points[index].z_axis[0],
- frames_->points[index].z_axis[1],
- frames_->points[index].z_axis[2]);
+ const Eigen::Vector3f x_axis ((*frames_)[index].x_axis[0],
+ (*frames_)[index].x_axis[1],
+ (*frames_)[index].x_axis[2]);
+ //const Eigen::Vector3f& y_axis = (*frames_)[index].y_axis.getNormalVector3fMap ();
+ const Eigen::Vector3f normal ((*frames_)[index].z_axis[0],
+ (*frames_)[index].z_axis[1],
+ (*frames_)[index].z_axis[2]);
// Find every point within specified search_radius_
std::vector<int> nn_indices;
if (pcl::utils::equal(nn_dists[ne], 0.0f))
continue;
// Get neighbours coordinates
- Eigen::Vector3f neighbour = surface_->points[nn_indices[ne]].getVector3fMap ();
+ Eigen::Vector3f neighbour = (*surface_)[nn_indices[ne]].getVector3fMap ();
// ----- Compute current neighbour polar coordinates -----
!std::isfinite (current_frame.y_axis[0]) ||
!std::isfinite (current_frame.z_axis[0]) )
{
- std::fill (output.points[point_index].descriptor, output.points[point_index].descriptor + descriptor_length_,
+ std::fill (output[point_index].descriptor, output[point_index].descriptor + descriptor_length_,
std::numeric_limits<float>::quiet_NaN ());
- std::fill (output.points[point_index].rf, output.points[point_index].rf + 9, 0);
+ std::fill (output[point_index].rf, output[point_index].rf + 9, 0);
output.is_dense = false;
continue;
}
for (int d = 0; d < 3; ++d)
{
- output.points[point_index].rf[0 + d] = current_frame.x_axis[d];
- output.points[point_index].rf[3 + d] = current_frame.y_axis[d];
- output.points[point_index].rf[6 + d] = current_frame.z_axis[d];
+ output[point_index].rf[0 + d] = current_frame.x_axis[d];
+ output[point_index].rf[3 + d] = current_frame.y_axis[d];
+ output[point_index].rf[6 + d] = current_frame.z_axis[d];
}
std::vector<float> descriptor (descriptor_length_);
computePointDescriptor (point_index, descriptor);
- std::copy (descriptor.begin (), descriptor.end (), output.points[point_index].descriptor);
+ std::copy (descriptor.begin (), descriptor.end (), output[point_index].descriptor);
}
}
template<typename PointInT, typename PointNT, typename PointOutT> bool
pcl::VFHEstimation<PointInT, PointNT, PointOutT>::initCompute ()
{
- if (input_->points.size () < 2 || (surface_ && surface_->points.size () < 2))
+ if (input_->size () < 2 || (surface_ && surface_->size () < 2))
{
PCL_ERROR ("[pcl::VFHEstimation::initCompute] Input dataset must have at least 2 points!\n");
return (false);
for (const int &index : indices)
{
// Compute the pair P to NNi
- if (!computePairFeatures (centroid_p, centroid_n, cloud.points[index].getVector4fMap (),
- normals.points[index].getNormalVector4fMap (), pfh_tuple[0], pfh_tuple[1],
+ if (!computePairFeatures (centroid_p, centroid_n, cloud[index].getVector4fMap (),
+ normals[index].getNormalVector4fMap (), pfh_tuple[0], pfh_tuple[1],
pfh_tuple[2], pfh_tuple[3]))
continue;
{
for (const auto& index: *indices_)
{
- normal_centroid.noalias () += normals_->points[index].getNormalVector4fMap ();
+ normal_centroid.noalias () += (*normals_)[index].getNormalVector4fMap ();
}
cp = indices_->size();
}
{
for (const auto& index: *indices_)
{
- if (!std::isfinite (normals_->points[index].normal[0]) ||
- !std::isfinite (normals_->points[index].normal[1]) ||
- !std::isfinite (normals_->points[index].normal[2]))
+ if (!std::isfinite ((*normals_)[index].normal[0]) ||
+ !std::isfinite ((*normals_)[index].normal[1]) ||
+ !std::isfinite ((*normals_)[index].normal[2]))
continue;
- normal_centroid.noalias () += normals_->points[index].getNormalVector4fMap ();
+ normal_centroid.noalias () += (*normals_)[index].getNormalVector4fMap ();
cp++;
}
}
for (const auto& index: *indices_)
{
- Eigen::Vector4f normal (normals_->points[index].normal[0],
- normals_->points[index].normal[1],
- normals_->points[index].normal[2], 0);
+ Eigen::Vector4f normal ((*normals_)[index].normal[0],
+ (*normals_)[index].normal[1],
+ (*normals_)[index].normal[2], 0);
// Normalize
double alpha = (normal.dot (d_vp_p) + 1.0) * 0.5;
std::size_t fi = static_cast<std::size_t> (std::floor (alpha * hist_vp_.size ()));
output.height = 1;
// Estimate the FPFH at nn_indices[0] using the entire cloud and copy the resultant signature
- auto outPtr = std::begin (output.points[0].histogram);
+ auto outPtr = std::begin (output[0].histogram);
for (int i = 0; i < 4; ++i)
{
#pragma once
#include <vector>
-#include <cmath>
-#include <pcl/features/feature.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
+#include <pcl/pcl_base.h>
#include <pcl/PointIndices.h>
namespace pcl
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/features/eigen.h>
-#include <pcl/common/common_headers.h>
+#include <pcl/point_cloud.h>
#include <pcl/point_representation.h>
namespace pcl
namespace pcl
{
- /** \brief OrganizedEdgeBase, OrganizedEdgeFromRGB, OrganizedEdgeFromNormals,
- * and OrganizedEdgeFromRGBNormals find 3D edges from an organized point
- * cloud data. Given an organized point cloud, they will output a PointCloud
+ /** \brief OrganizedEdgeBase, OrganizedEdgeFromRGB, OrganizedEdgeFromNormals,
+ * and OrganizedEdgeFromRGBNormals find 3D edges from an organized point
+ * cloud data. Given an organized point cloud, they will output a PointCloud
* of edge labels and a vector of PointIndices.
* OrganizedEdgeBase accepts PCL_XYZ_POINT_TYPES and returns EDGELABEL_NAN_BOUNDARY, EDGELABEL_OCCLUDING, and EDGELABEL_OCCLUDED.
* OrganizedEdgeFromRGB accepts PCL_RGB_POINT_TYPES and returns EDGELABEL_NAN_BOUNDARY, EDGELABEL_OCCLUDING, EDGELABEL_OCCLUDED, and EDGELABEL_RGB_CANNY.
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudPtr = typename PointCloud::Ptr;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
+
using PointCloudL = pcl::PointCloud<PointLT>;
using PointCloudLPtr = typename PointCloudL::Ptr;
using PointCloudLConstPtr = typename PointCloudL::ConstPtr;
}
/** \brief Destructor for OrganizedEdgeBase */
-
+
~OrganizedEdgeBase ()
{
}
*/
void
compute (pcl::PointCloud<PointLT>& labels, std::vector<pcl::PointIndices>& label_indices) const;
-
- /** \brief Set the tolerance in meters for difference in depth values between neighboring points. */
+
+ /** \brief Set the tolerance in meters for the relative difference in depth values between neighboring points.
+ * e.g. If a point has a depth (z) value of 2.0 meters, a neighboring point is discontinuous if its depth differs by > 2.0 * th. */
inline void
setDepthDisconThreshold (const float th)
{
th_depth_discon_ = th;
}
- /** \brief Get the tolerance in meters for difference in depth values between neighboring points. */
+ /** \brief Get the tolerance in meters for the relative difference in depth values between neighboring points.
+ * e.g. If a point has a depth (z) value of 2.0 meters, a neighboring point is discontinuous if its depth differs by > 2.0 * th. */
inline float
getDepthDisconThreshold () const
{
{
return detecting_edge_types_;
}
-
+
enum {EDGELABEL_NAN_BOUNDARY=1, EDGELABEL_OCCLUDING=2, EDGELABEL_OCCLUDED=4, EDGELABEL_HIGH_CURVATURE=8, EDGELABEL_RGB_CANNY=16};
static const int num_of_edgetype_ = 5;
*/
void
extractEdges (pcl::PointCloud<PointLT>& labels) const;
-
+
/** \brief Assign point indices for each edge label
* \param[out] labels a PointCloud of edge labels
* \param[out] label_indices a vector of PointIndices corresponding to each edge label
*/
void
assignLabelIndices (pcl::PointCloud<PointLT>& labels, std::vector<pcl::PointIndices>& label_indices) const;
-
+
struct Neighbor
{
Neighbor (int dx, int dy, int didx)
, d_y (dy)
, d_index (didx)
{}
-
+
int d_x;
int d_y;
int d_index; // = dy * width + dx: pre-calculated
};
- /** \brief The tolerance in meters for difference in depth values between neighboring points
- * (The value is set for 1 meter and is adapted with respect to depth value linearly.
- * (e.g. 2.0*th_depth_discon_ in 2 meter depth))
- */
+ /** \brief The tolerance in meters for the relative difference in depth values between neighboring points
+ * (The default value is set for .02 meters and is adapted with respect to depth value linearly.
+ * e.g. If a point has a depth (z) value of 2.0 meters, a neighboring point is discontinuous if its depth differs by > 2.0 * th. */
float th_depth_discon_;
/** \brief The max search distance for deciding occluding and occluded edges */
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudPtr = typename PointCloud::Ptr;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
+
using PointCloudL = pcl::PointCloud<PointLT>;
using PointCloudLPtr = typename PointCloudL::Ptr;
using PointCloudLConstPtr = typename PointCloudL::ConstPtr;
}
/** \brief Destructor for OrganizedEdgeFromRGB */
-
+
~OrganizedEdgeFromRGB ()
{
}
*/
void
compute (pcl::PointCloud<PointLT>& labels, std::vector<pcl::PointIndices>& label_indices) const;
-
+
/** \brief Set the low threshold value for RGB Canny edge detection */
inline void
setRGBCannyLowThreshold (const float th)
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudPtr = typename PointCloud::Ptr;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
+
using PointCloudN = pcl::PointCloud<PointNT>;
using PointCloudNPtr = typename PointCloudN::Ptr;
using PointCloudNConstPtr = typename PointCloudN::ConstPtr;
using OrganizedEdgeBase<PointT, PointLT>::EDGELABEL_HIGH_CURVATURE;
/** \brief Constructor for OrganizedEdgeFromNormals */
- OrganizedEdgeFromNormals ()
+ OrganizedEdgeFromNormals ()
: OrganizedEdgeBase<PointT, PointLT> ()
, normals_ ()
, th_hc_canny_low_ (0.4f)
}
/** \brief Destructor for OrganizedEdgeFromNormals */
-
+
~OrganizedEdgeFromNormals ()
{
}
* \param[in] normals the input normal cloud
*/
inline void
- setInputNormals (const PointCloudNConstPtr &normals)
+ setInputNormals (const PointCloudNConstPtr &normals)
{
normals_ = normals;
}
{
return (th_hc_canny_high_);
}
-
+
protected:
/** \brief Perform the 3D edge detection (edges from depth discontinuities and high curvature regions)
* \param[out] labels a PointCloud of edge labels
using PointCloud = pcl::PointCloud<PointT>;
using PointCloudPtr = typename PointCloud::Ptr;
using PointCloudConstPtr = typename PointCloud::ConstPtr;
-
+
using PointCloudN = pcl::PointCloud<PointNT>;
using PointCloudNPtr = typename PointCloudN::Ptr;
using PointCloudNConstPtr = typename PointCloudN::ConstPtr;
using OrganizedEdgeBase<PointT, PointLT>::EDGELABEL_OCCLUDED;
using OrganizedEdgeBase<PointT, PointLT>::EDGELABEL_HIGH_CURVATURE;
using OrganizedEdgeBase<PointT, PointLT>::EDGELABEL_RGB_CANNY;
-
+
/** \brief Constructor for OrganizedEdgeFromRGBNormals */
- OrganizedEdgeFromRGBNormals ()
+ OrganizedEdgeFromRGBNormals ()
: OrganizedEdgeFromRGB<PointT, PointLT> ()
, OrganizedEdgeFromNormals<PointT, PointNT, PointLT> ()
{
}
/** \brief Destructor for OrganizedEdgeFromRGBNormals */
-
+
~OrganizedEdgeFromRGBNormals ()
{
}
#pragma once
#include <pcl/features/feature.h>
-#include <pcl/features/boost.h>
namespace pcl
{
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/PolygonMesh.h>
#include <pcl/features/feature.h>
#include <set>
* the centroid and 128 binning subdivisions for the viewpoint component, which results in a
* 308-byte array of float values. These are stored in a pcl::VFHSignature308 point type.
* A major difference between the PFH/FPFH descriptors and VFH, is that for a given point cloud dataset, only a
- * single VFH descriptor will be estimated (vfhs->points.size() should be 1), while the resultant PFH/FPFH data
+ * single VFH descriptor will be estimated (vfhs->size() should be 1), while the resultant PFH/FPFH data
* will have the same number of entries as the number of points in the cloud.
*
* \note If you use this code in any academic work, please cite:
using Eigen::Vector3f;
#include <pcl/range_image/range_image.h>
-#include <pcl/common/vector_average.h>
-#include <pcl/common/common_headers.h>
namespace pcl
{
schedule(dynamic, 10) \
num_threads(max_no_of_threads)
//!!! nizar 20110408 : for OpenMP sake on MSVC this must be kept signed
- for (std::ptrdiff_t idx = 0; idx < static_cast<std::ptrdiff_t>(interest_points.points.size ()); ++idx)
+ for (std::ptrdiff_t idx = 0; idx < static_cast<std::ptrdiff_t>(interest_points.size ()); ++idx)
{
- const auto& interest_point = interest_points.points[idx];
+ const auto& interest_point = interest_points[idx];
Vector3fMapConst point = interest_point.getVector3fMap ();
Narf* feature = new Narf;
output.points.resize(feature_list.size());
for (std::size_t i = 0; i < feature_list.size(); ++i)
{
- feature_list[i]->copyToNarf36(output.points[i]);
+ feature_list[i]->copyToNarf36(output[i]);
}
// Cleanup
#include <iostream>
using std::cout;
using std::cerr;
-#include <map>
-#include <set>
#include <cmath>
#include <pcl/pcl_macros.h>
-#include <pcl/common/common_headers.h>
#include <pcl/range_image/range_image.h>
#include <pcl/point_cloud.h>
-#include <pcl/common/vector_average.h>
-#include <pcl/features/eigen.h>
#include <pcl/features/range_image_border_extractor.h>
+#include <Eigen/Core> // for Vector3f
namespace pcl
{
for (int x = 0; x < static_cast<int> (width); ++x)
{
int index = y*width+x;
- BorderDescription& border_description = border_descriptions_->points[index];
+ BorderDescription& border_description = (*border_descriptions_)[index];
border_description.x = x;
border_description.y = y;
BorderTraits& border_traits = border_description.traits;
int shadow_border_index = shadow_border_indices->left;
if (shadow_border_index >= 0 && checkIfMaximum(x, y, -1, 0, border_scores_left_.data (), shadow_border_index))
{
- BorderTraits& shadow_traits = border_descriptions_->points[shadow_border_index].traits;
+ BorderTraits& shadow_traits = (*border_descriptions_)[shadow_border_index].traits;
border_traits[BORDER_TRAIT__OBSTACLE_BORDER] = border_traits[BORDER_TRAIT__OBSTACLE_BORDER_LEFT] = true;
shadow_traits[BORDER_TRAIT__SHADOW_BORDER] = shadow_traits[BORDER_TRAIT__SHADOW_BORDER_RIGHT] = true;
for (int index3=index-1; index3>shadow_border_index; --index3)
{
- BorderTraits& veil_point = border_descriptions_->points[index3].traits;
+ BorderTraits& veil_point = (*border_descriptions_)[index3].traits;
veil_point[BORDER_TRAIT__VEIL_POINT] = veil_point[BORDER_TRAIT__VEIL_POINT_RIGHT] = true;
}
}
shadow_border_index = shadow_border_indices->right;
if (shadow_border_index >= 0 && checkIfMaximum(x, y, 1, 0, border_scores_right_.data (), shadow_border_index))
{
- BorderTraits& shadow_traits = border_descriptions_->points[shadow_border_index].traits;
+ BorderTraits& shadow_traits = (*border_descriptions_)[shadow_border_index].traits;
border_traits[BORDER_TRAIT__OBSTACLE_BORDER] = border_traits[BORDER_TRAIT__OBSTACLE_BORDER_RIGHT] = true;
shadow_traits[BORDER_TRAIT__SHADOW_BORDER] = shadow_traits[BORDER_TRAIT__SHADOW_BORDER_LEFT] = true;
for (int index3=index+1; index3<shadow_border_index; ++index3)
{
- BorderTraits& veil_point = border_descriptions_->points[index3].traits;
+ BorderTraits& veil_point = (*border_descriptions_)[index3].traits;
veil_point[BORDER_TRAIT__VEIL_POINT] = veil_point[BORDER_TRAIT__VEIL_POINT_LEFT] = true;
}
}
shadow_border_index = shadow_border_indices->top;
if (shadow_border_index >= 0 && checkIfMaximum(x, y, 0, -1, border_scores_top_.data (), shadow_border_index))
{
- BorderTraits& shadow_traits = border_descriptions_->points[shadow_border_index].traits;
+ BorderTraits& shadow_traits = (*border_descriptions_)[shadow_border_index].traits;
border_traits[BORDER_TRAIT__OBSTACLE_BORDER] = border_traits[BORDER_TRAIT__OBSTACLE_BORDER_TOP] = true;
shadow_traits[BORDER_TRAIT__SHADOW_BORDER] = shadow_traits[BORDER_TRAIT__SHADOW_BORDER_BOTTOM] = true;
for (int index3=index-width; index3>shadow_border_index; index3-=width)
{
//std::cout << "Adding veil point at "<<(index3-index)%width<<","<<(index3-index)/width<<".\n";
- BorderTraits& veil_point = border_descriptions_->points[index3].traits;
+ BorderTraits& veil_point = (*border_descriptions_)[index3].traits;
veil_point[BORDER_TRAIT__VEIL_POINT] = veil_point[BORDER_TRAIT__VEIL_POINT_BOTTOM] = true;
}
}
shadow_border_index = shadow_border_indices->bottom;
if (shadow_border_index >= 0 && checkIfMaximum(x, y, 0, 1, border_scores_bottom_.data (), shadow_border_index))
{
- BorderTraits& shadow_traits = border_descriptions_->points[shadow_border_index].traits;
+ BorderTraits& shadow_traits = (*border_descriptions_)[shadow_border_index].traits;
border_traits[BORDER_TRAIT__OBSTACLE_BORDER] = border_traits[BORDER_TRAIT__OBSTACLE_BORDER_BOTTOM] = true;
shadow_traits[BORDER_TRAIT__SHADOW_BORDER] = shadow_traits[BORDER_TRAIT__SHADOW_BORDER_TOP] = true;
for (int index3=index+width; index3<shadow_border_index; index3+=width)
{
//std::cout << "Adding veil point at "<<(index3-index)%width<<","<<(index3-index)/width<<".\n";
- BorderTraits& veil_point = border_descriptions_->points[index3].traits;
+ BorderTraits& veil_point = (*border_descriptions_)[index3].traits;
veil_point[BORDER_TRAIT__VEIL_POINT] = veil_point[BORDER_TRAIT__VEIL_POINT_TOP] = true;
}
}
Eigen::Vector3f& surface_change_direction = surface_change_directions_[index];
surface_change_direction.setZero();
- const BorderTraits& border_traits = border_descriptions_->points[index].traits;
+ const BorderTraits& border_traits = (*border_descriptions_)[index].traits;
if (border_traits[BORDER_TRAIT__VEIL_POINT] || border_traits[BORDER_TRAIT__SHADOW_BORDER])
continue;
if (border_directions_[index]!=nullptr)
new_score = 0.0f;
if (!range_image.isValid(index))
continue;
- const BorderTraits& border_traits = border_descriptions_->points[index].traits;
+ const BorderTraits& border_traits = (*border_descriptions_)[index].traits;
if (border_traits[BORDER_TRAIT__VEIL_POINT] || border_traits[BORDER_TRAIT__SHADOW_BORDER])
continue;
const Eigen::Vector3f& point = surface_change_directions_[index];
"include/pcl/${SUBSYS_NAME}/extract_indices.h"
"include/pcl/${SUBSYS_NAME}/filter.h"
"include/pcl/${SUBSYS_NAME}/filter_indices.h"
+ "include/pcl/${SUBSYS_NAME}/experimental/functor_filter.h"
"include/pcl/${SUBSYS_NAME}/passthrough.h"
"include/pcl/${SUBSYS_NAME}/shadowpoints.h"
"include/pcl/${SUBSYS_NAME}/project_inliers.h"
#pragma once
-#include <pcl/common/eigen.h>
#include <pcl/point_cloud.h>
-#include <pcl/exceptions.h>
-#include <pcl/pcl_base.h>
namespace pcl
{
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
+ *
+ * All rights reserved
+ */
+
+#pragma once
+
+#include <pcl/filters/filter_indices.h>
+#include <pcl/type_traits.h> // for is_invocable
+
+namespace pcl {
+namespace experimental {
+template <typename PointT, typename Function>
+constexpr static bool is_functor_for_filter_v =
+ pcl::is_invocable_r_v<bool,
+ Function,
+ const pcl::remove_cvref_t<pcl::PointCloud<PointT>>&,
+ pcl::index_t>;
+
+/**
+ * \brief Filter point clouds and indices based on a functor passed in the ctor
+ * \ingroup filters
+ */
+template <typename PointT, typename Functor>
+class FunctorFilter : public FilterIndices<PointT> {
+ using Base = FilterIndices<PointT>;
+ using PCLBase = pcl::PCLBase<PointT>;
+
+public:
+ using FunctorT = Functor;
+ // using in type would complicate signature
+ static_assert(is_functor_for_filter_v<PointT, FunctorT>,
+ "Functor signature must be similar to `bool(const PointCloud<PointT>&, "
+ "index_t)`");
+
+protected:
+ using Base::extract_removed_indices_;
+ using Base::filter_name_;
+ using Base::negative_;
+ using Base::removed_indices_;
+ using PCLBase::indices_;
+ using PCLBase::input_;
+
+ // need to hold a value because functors can only be copy or move constructed
+ FunctorT functor_;
+
+public:
+ /** \brief Constructor.
+ * \param[in] extract_removed_indices Set to true if you want to be able to
+ * extract the indices of points being removed (default = false).
+ */
+ FunctorFilter(FunctorT functor, bool extract_removed_indices = false)
+ : Base(extract_removed_indices), functor_(std::move(functor))
+ {
+ filter_name_ = "functor_filter";
+ }
+
+ const FunctorT&
+ getFunctor() const noexcept
+ {
+ return functor_;
+ }
+
+ FunctorT&
+ getFunctor() noexcept
+ {
+ return functor_;
+ }
+
+ /**
+ * \brief Filtered results are indexed by an indices array.
+ * \param[out] indices The resultant indices.
+ */
+ void
+ applyFilter(Indices& indices) override
+ {
+ indices.clear();
+ indices.reserve(input_->size());
+ if (extract_removed_indices_) {
+ removed_indices_->clear();
+ removed_indices_->reserve(input_->size());
+ }
+
+ for (const auto index : *indices_) {
+ // functor returns true for points that should be selected
+ if (negative_ != functor_(*input_, index)) {
+ indices.push_back(index);
+ }
+ else if (extract_removed_indices_) {
+ removed_indices_->push_back(index);
+ }
+ }
+ }
+};
+} // namespace experimental
+} // namespace pcl
/** \brief Removes points with x, y, or z equal to NaN
* \param[in] cloud_in the input point cloud
* \param[out] cloud_out the output point cloud
- * \param[out] index the mapping (ordered): cloud_out.points[i] = cloud_in.points[index[i]]
+ * \param[out] index the mapping (ordered): cloud_out[i] = cloud_in[index[i]]
* \note The density of the point cloud is lost.
* \note Can be called with cloud_in == cloud_out
* \ingroup filters
/** \brief Removes points that have their normals invalid (i.e., equal to NaN)
* \param[in] cloud_in the input point cloud
* \param[out] cloud_out the output point cloud
- * \param[out] index the mapping (ordered): cloud_out.points[i] = cloud_in.points[index[i]]
+ * \param[out] index the mapping (ordered): cloud_out[i] = cloud_in[index[i]]
* \note The density of the point cloud is lost.
* \note Can be called with cloud_in == cloud_out
* \ingroup filters
* \note This function does not modify the input point cloud!
*
* \param cloud_in the input point cloud
- * \param index the mapping (ordered): filtered_cloud.points[i] = cloud_in.points[index[i]]
+ * \param index the mapping (ordered): filtered_cloud[i] = cloud_in[index[i]]
*
* \see removeNaNFromPointCloud
* \ingroup filters
#ifndef PCL_FILTERS_IMPL_FAST_VOXEL_GRID_H_
#define PCL_FILTERS_IMPL_FAST_VOXEL_GRID_H_
-#include <pcl/common/common.h>
#include <pcl/common/io.h>
#include <pcl/filters/approximate_voxel_grid.h>
pcl::ApproximateVoxelGrid<PointT>::flush (PointCloud &output, std::size_t op, he *hhe, int rgba_index, int centroid_size)
{
hhe->centroid /= static_cast<float> (hhe->count);
- pcl::for_each_type <FieldList> (pcl::xNdCopyEigenPointFunctor <PointT> (hhe->centroid, output.points[op]));
+ pcl::for_each_type <FieldList> (pcl::xNdCopyEigenPointFunctor <PointT> (hhe->centroid, output[op]));
// ---[ RGB special case
if (rgba_index >= 0)
{
g = hhe->centroid[centroid_size-2],
b = hhe->centroid[centroid_size-1];
int rgb = (static_cast<int> (r)) << 16 | (static_cast<int> (g)) << 8 | (static_cast<int> (b));
- memcpy (reinterpret_cast<char*> (&output.points[op]) + rgba_index, &rgb, sizeof (float));
+ memcpy (reinterpret_cast<char*> (&output[op]) + rgba_index, &rgb, sizeof (float));
}
}
}
Eigen::VectorXf scratch = Eigen::VectorXf::Zero (centroid_size);
- output.points.resize (input_->points.size ()); // size output for worst case
+ output.points.resize (input_->size ()); // size output for worst case
std::size_t op = 0; // output pointer
- for (std::size_t cp = 0; cp < input_->points.size (); ++cp)
+ for (const auto& point: *input_)
{
- int ix = static_cast<int> (std::floor (input_->points[cp].x * inverse_leaf_size_[0]));
- int iy = static_cast<int> (std::floor (input_->points[cp].y * inverse_leaf_size_[1]));
- int iz = static_cast<int> (std::floor (input_->points[cp].z * inverse_leaf_size_[2]));
+ int ix = static_cast<int> (std::floor (point.x * inverse_leaf_size_[0]));
+ int iy = static_cast<int> (std::floor (point.y * inverse_leaf_size_[1]));
+ int iz = static_cast<int> (std::floor (point.z * inverse_leaf_size_[2]));
unsigned int hash = static_cast<unsigned int> ((ix * 7171 + iy * 3079 + iz * 4231) & (histsize_ - 1));
he *hhe = &history_[hash];
if (hhe->count && ((ix != hhe->ix) || (iy != hhe->iy) || (iz != hhe->iz)))
{
// fill r/g/b data
pcl::RGB rgb;
- memcpy (&rgb, (reinterpret_cast<const char *> (&input_->points[cp])) + rgba_index, sizeof (RGB));
+ memcpy (&rgb, (reinterpret_cast<const char *> (&point)) + rgba_index, sizeof (RGB));
scratch[centroid_size-3] = rgb.r;
scratch[centroid_size-2] = rgb.g;
scratch[centroid_size-1] = rgb.b;
}
- pcl::for_each_type <FieldList> (xNdCopyPointEigenFunctor <PointT> (input_->points[cp], scratch));
+ pcl::for_each_type <FieldList> (xNdCopyPointEigenFunctor <PointT> (point, scratch));
hhe->centroid += scratch;
}
for (std::size_t i = 0; i < histsize_; i++)
flush (output, op++, hhe, rgba_index, centroid_size);
}
output.points.resize (op);
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.height = 1; // downsampling breaks the organized structure
output.is_dense = false; // we filter out invalid points
}
{
int id = indices[n_id];
// Compute the difference in intensity
- double intensity_dist = std::abs (input_->points[pid].intensity - input_->points[id].intensity);
+ double intensity_dist = std::abs ((*input_)[pid].intensity - (*input_)[id].intensity);
// Compute the Gaussian intensity weights both in Euclidean and in intensity space
double dist = std::sqrt (distances[n_id]);
double weight = kernel (dist, sigma_s_) * kernel (intensity_dist, sigma_r_);
// Calculate the bilateral filter response
- BF += weight * input_->points[id].intensity;
+ BF += weight * (*input_)[id].intensity;
W += weight;
}
return (BF / W);
tree_->radiusSearch ((*indices_)[i], sigma_s_ * 2, k_indices, k_distances);
// Overwrite the intensity value with the computed average
- output.points[(*indices_)[i]].intensity = static_cast<float> (computePointWeight ((*indices_)[i], k_indices, k_distances));
+ output[(*indices_)[i]].intensity = static_cast<float> (computePointWeight ((*indices_)[i], k_indices, k_distances));
}
}
output.width = this->input_->width;
output.is_dense = this->input_->is_dense;
}
- output.points.resize (input_->points.size ());
- removed_indices_->resize (input_->points.size ());
+ output.points.resize (input_->size ());
+ removed_indices_->resize (input_->size ());
int nr_removed_p = 0;
for (std::size_t index: (*Filter<PointT>::indices_))
{
- const PointT& point = input_->points[index];
+ const PointT& point = (*input_)[index];
// Check if the point is invalid
if (!std::isfinite (point.x)
|| !std::isfinite (point.y)
if (condition_->evaluate (point))
{
- copyPoint (point, output.points[nr_p]);
+ copyPoint (point, output[nr_p]);
nr_p++;
}
else
std::sort (indices.begin (), indices.end ()); //TODO: is this necessary or can we assume the indices to be sorted?
bool removed_p = false;
std::size_t ci = 0;
- for (std::size_t cp = 0; cp < input_->points.size (); ++cp)
+ for (std::size_t cp = 0; cp < input_->size (); ++cp)
{
if (cp == static_cast<std::size_t> (indices[ci]))
{
}
// copy all the fields
- copyPoint (input_->points[cp], output.points[cp]);
+ copyPoint ((*input_)[cp], output[cp]);
- if (!condition_->evaluate (input_->points[cp]))
+ if (!condition_->evaluate ((*input_)[cp]))
{
- output.points[cp].getVector4fMap ().setConstant (user_filter_value_);
+ output[cp].getVector4fMap ().setConstant (user_filter_value_);
removed_p = true;
if (extract_removed_indices_)
}
else
{
- output.points[cp].getVector4fMap ().setConstant (user_filter_value_);
+ output[cp].getVector4fMap ().setConstant (user_filter_value_);
removed_p = true;
//as for !keep_organized_: removed points due to setIndices are not considered as removed_indices_
}
}
template<> pcl::PointXYZRGB
-Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneRowDense (int i, int j);
+PCL_EXPORTS Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneRowDense (int i, int j);
template<> pcl::PointXYZRGB
-Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneColDense (int i, int j);
+PCL_EXPORTS Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneColDense (int i, int j);
template<> pcl::PointXYZRGB
-Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneRowNonDense (int i, int j);
+PCL_EXPORTS Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneRowNonDense (int i, int j);
template<> pcl::PointXYZRGB
-Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneColNonDense (int i, int j);
+PCL_EXPORTS Convolution<pcl::PointXYZRGB, pcl::PointXYZRGB>::convolveOneColNonDense (int i, int j);
template<> pcl::RGB
-Convolution<pcl::RGB, pcl::RGB>::convolveOneRowDense (int i, int j);
+PCL_EXPORTS Convolution<pcl::RGB, pcl::RGB>::convolveOneRowDense (int i, int j);
template<> pcl::RGB
-Convolution<pcl::RGB, pcl::RGB>::convolveOneColDense (int i, int j);
+PCL_EXPORTS Convolution<pcl::RGB, pcl::RGB>::convolveOneColDense (int i, int j);
template<> inline pcl::RGB
Convolution<pcl::RGB, pcl::RGB>::convolveOneRowNonDense (int i, int j)
#pragma omp parallel for \
default(none) \
shared(output) \
- private(nn_indices, nn_distances) \
+ firstprivate(nn_indices, nn_distances) \
num_threads(threads_)
for (std::int64_t point_idx = 0; point_idx < static_cast<std::int64_t> (surface_->size ()); ++point_idx)
{
output.resize (sampled_indices.size ());
output.header = input_->header;
output.height = 1;
- output.width = std::uint32_t (output.size ());
+ output.width = output.size ();
output.is_dense = true;
for (std::size_t i = 0; i < sampled_indices.size (); ++i)
output[i] = (*input_)[sampled_indices[i]];
#define PCL_FILTERS_IMPL_CROP_BOX_H_
#include <pcl/filters/crop_box.h>
-#include <pcl/common/io.h>
///////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
pcl::CropBox<PointT>::applyFilter (std::vector<int> &indices)
{
- indices.resize (input_->points.size ());
- removed_indices_->resize (input_->points.size ());
+ indices.resize (input_->size ());
+ removed_indices_->resize (input_->size ());
int indices_count = 0;
int removed_indices_count = 0;
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!isFinite (input_->points[index]))
+ if (!isFinite ((*input_)[index]))
continue;
// Get local point
- PointT local_pt = input_->points[index];
+ PointT local_pt = (*input_)[index];
// Transform point to world space
if (!transform_matrix_is_identity)
);
for (std::size_t index = 0; index < indices_->size (); index++)
{
- Eigen::Vector3f pt = input_->points[(*indices_)[index]].getVector3fMap ();
+ Eigen::Vector3f pt = (*input_)[(*indices_)[index]].getVector3fMap ();
for (int i = 0; i < 3; i++)
{
if (pt[i] < cloud_min[i]) cloud_min[i] = pt[i];
for (poly = 0; poly < hull_polygons_.size (); poly++)
{
if (isPointIn2DPolyWithVertIndices<PlaneDim1,PlaneDim2> (
- input_->points[(*indices_)[index]], hull_polygons_[poly], *hull_cloud_
+ (*input_)[(*indices_)[index]], hull_polygons_[poly], *hull_cloud_
))
{
if (crop_outside_)
- output.push_back (input_->points[(*indices_)[index]]);
+ output.push_back ((*input_)[(*indices_)[index]]);
// once a point has tested +ve for being inside one polygon, we can
// stop checking the others:
break;
// If we're removing points *inside* the hull, only remove points that
// haven't been found inside any polygons
if (poly == hull_polygons_.size () && !crop_outside_)
- output.push_back (input_->points[(*indices_)[index]]);
+ output.push_back ((*input_)[(*indices_)[index]]);
}
}
for (poly = 0; poly < hull_polygons_.size (); poly++)
{
if (isPointIn2DPolyWithVertIndices<PlaneDim1,PlaneDim2> (
- input_->points[(*indices_)[index]], hull_polygons_[poly], *hull_cloud_
+ (*input_)[(*indices_)[index]], hull_polygons_[poly], *hull_cloud_
))
{
if (crop_outside_)
for (std::size_t poly = 0; poly < hull_polygons_.size (); poly++)
for (std::size_t ray = 0; ray < 3; ray++)
crossings[ray] += rayTriangleIntersect
- (input_->points[(*indices_)[index]], rays[ray], hull_polygons_[poly], *hull_cloud_);
+ ((*input_)[(*indices_)[index]], rays[ray], hull_polygons_[poly], *hull_cloud_);
if (crop_outside_ && (crossings[0]&1) + (crossings[1]&1) + (crossings[2]&1) > 1)
- output.push_back (input_->points[(*indices_)[index]]);
+ output.push_back ((*input_)[(*indices_)[index]]);
else if (!crop_outside_)
- output.push_back (input_->points[(*indices_)[index]]);
+ output.push_back ((*input_)[(*indices_)[index]]);
}
}
for (std::size_t poly = 0; poly < hull_polygons_.size (); poly++)
for (std::size_t ray = 0; ray < 3; ray++)
crossings[ray] += rayTriangleIntersect
- (input_->points[(*indices_)[index]], rays[ray], hull_polygons_[poly], *hull_cloud_);
+ ((*input_)[(*indices_)[index]], rays[ray], hull_polygons_[poly], *hull_cloud_);
if (crop_outside_ && (crossings[0]&1) + (crossings[1]&1) + (crossings[2]&1) > 1)
indices.push_back ((*indices_)[index]);
#define PCL_FILTERS_IMPL_EXTRACT_INDICES_HPP_
#include <pcl/filters/extract_indices.h>
-#include <pcl/common/io.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
for (int rii = 0; rii < static_cast<int> (removed_indices_->size ()); ++rii) // rii = removed indices iterator
{
std::size_t pt_index = (std::size_t) (*removed_indices_)[rii];
- if (pt_index >= input_->points.size ())
+ if (pt_index >= input_->size ())
{
PCL_ERROR ("[pcl::%s::filterDirectly] The index exceeds the size of the input. Do nothing.\n",
getClassName ().c_str ());
*cloud = *input_;
return;
}
- std::uint8_t* pt_data = reinterpret_cast<std::uint8_t*> (&cloud->points[pt_index]);
+ std::uint8_t* pt_data = reinterpret_cast<std::uint8_t*> (&(*cloud)[pt_index]);
for (const auto &field : fields)
memcpy (pt_data + field.offset, &user_filter_value_, sizeof (float));
}
for (const auto ri : *removed_indices_) // ri = removed index
{
std::size_t pt_index = (std::size_t)ri;
- if (pt_index >= input_->points.size ())
+ if (pt_index >= input_->size ())
{
PCL_ERROR ("[pcl::%s::applyFilter] The index exceeds the size of the input. Do nothing.\n",
getClassName ().c_str ());
output = *input_;
return;
}
- std::uint8_t* pt_data = reinterpret_cast<std::uint8_t*> (&output.points[pt_index]);
+ std::uint8_t* pt_data = reinterpret_cast<std::uint8_t*> (&output[pt_index]);
for (const auto &field : fields)
memcpy (pt_data + field.offset, &user_filter_value_, sizeof (float));
}
template <typename PointT> void
pcl::ExtractIndices<PointT>::applyFilterIndices (std::vector<int> &indices)
{
- if (indices_->size () > input_->points.size ())
+ if (indices_->size () > input_->size ())
{
PCL_ERROR ("[pcl::%s::applyFilter] The indices size exceeds the size of the input.\n", getClassName ().c_str ());
indices.clear ();
if (extract_removed_indices_)
{
// Set up the full indices set
- std::vector<int> full_indices (input_->points.size ());
+ std::vector<int> full_indices (input_->size ());
for (int fii = 0; fii < static_cast<int> (full_indices.size ()); ++fii) // fii = full indices iterator
full_indices[fii] = fii;
else // Inverted functionality
{
// Set up the full indices set
- std::vector<int> full_indices (input_->points.size ());
+ std::vector<int> full_indices (input_->size ());
for (int fii = 0; fii < static_cast<int> (full_indices.size ()); ++fii) // fii = full indices iterator
full_indices[fii] = fii;
float base_max = -std::numeric_limits<float>::max (),
base_min = std::numeric_limits<float>::max ();
bool found_finite = false;
- for (std::size_t x = 0; x < output.width; ++x)
+ for (const auto& pt: output)
{
- for (std::size_t y = 0; y < output.height; ++y)
+ if (std::isfinite(pt.z))
{
- if (std::isfinite (output (x, y).z))
- {
- if (base_max < output (x, y).z)
- base_max = output (x, y).z;
- if (base_min > output (x, y).z)
- base_min = output (x, y).z;
- found_finite = true;
- }
+ base_max = std::max<float>(pt.z, base_max);
+ base_min = std::min<float>(pt.z, base_min);
+ found_finite = true;
}
}
if (!found_finite)
return;
}
- for (std::size_t x = 0; x < output.width; ++x)
- for (std::size_t y = 0; y < output.height; ++y)
- if (!std::isfinite (output (x, y).z))
- output (x, y).z = base_max;
+ for (auto& pt: output)
+ {
+ if (!std::isfinite(pt.z))
+ {
+ pt.z = base_max;
+ }
+ }
const float base_delta = base_max - base_min;
float base_max = -std::numeric_limits<float>::max (),
base_min = std::numeric_limits<float>::max ();
bool found_finite = false;
- for (std::size_t x = 0; x < output.width; ++x)
+ for (const auto& pt: output)
{
- for (std::size_t y = 0; y < output.height; ++y)
+ if (std::isfinite(pt.z))
{
- if (std::isfinite (output (x, y).z))
- {
- if (base_max < output (x, y).z)
- base_max = output (x, y).z;
- if (base_min > output (x, y).z)
- base_min = output (x, y).z;
- found_finite = true;
- }
+ base_max = std::max<float>(pt.z, base_max);
+ base_min = std::min<float>(pt.z, base_min);
+ found_finite = true;
}
}
if (!found_finite)
if (&cloud_in != &cloud_out)
{
cloud_out.header = cloud_in.header;
- cloud_out.points.resize (cloud_in.points.size ());
+ cloud_out.points.resize (cloud_in.size ());
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
}
// Reserve enough space for the indices
- index.resize (cloud_in.points.size ());
+ index.resize (cloud_in.size ());
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
// Simply copy the data
cloud_out = cloud_in;
- for (std::size_t j = 0; j < cloud_out.points.size (); ++j)
- index[j] = static_cast<int>(j);
+ for (std::size_t j = 0; j < cloud_out.size (); ++j)
+ index[j] = j;
}
else
{
std::size_t j = 0;
- for (std::size_t i = 0; i < cloud_in.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_in.size (); ++i)
{
- if (!std::isfinite (cloud_in.points[i].x) ||
- !std::isfinite (cloud_in.points[i].y) ||
- !std::isfinite (cloud_in.points[i].z))
+ if (!std::isfinite (cloud_in[i].x) ||
+ !std::isfinite (cloud_in[i].y) ||
+ !std::isfinite (cloud_in[i].z))
continue;
- cloud_out.points[j] = cloud_in.points[i];
- index[j] = static_cast<int>(i);
+ cloud_out[j] = cloud_in[i];
+ index[j] = i;
j++;
}
- if (j != cloud_in.points.size ())
+ if (j != cloud_in.size ())
{
// Resize to the correct size
cloud_out.points.resize (j);
if (&cloud_in != &cloud_out)
{
cloud_out.header = cloud_in.header;
- cloud_out.points.resize (cloud_in.points.size ());
+ cloud_out.points.resize (cloud_in.size ());
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
}
// Reserve enough space for the indices
- index.resize (cloud_in.points.size ());
+ index.resize (cloud_in.size ());
std::size_t j = 0;
// Assume cloud is dense
cloud_out.is_dense = true;
- for (std::size_t i = 0; i < cloud_in.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_in.size (); ++i)
{
- if (!std::isfinite (cloud_in.points[i].normal_x) ||
- !std::isfinite (cloud_in.points[i].normal_y) ||
- !std::isfinite (cloud_in.points[i].normal_z))
+ if (!std::isfinite (cloud_in[i].normal_x) ||
+ !std::isfinite (cloud_in[i].normal_y) ||
+ !std::isfinite (cloud_in[i].normal_z))
continue;
- if (cloud_out.is_dense && !pcl::isFinite(cloud_in.points[i]))
+ if (cloud_out.is_dense && !pcl::isFinite(cloud_in[i]))
cloud_out.is_dense = false;
- cloud_out.points[j] = cloud_in.points[i];
- index[j] = static_cast<int>(i);
+ cloud_out[j] = cloud_in[i];
+ index[j] = i;
j++;
}
- if (j != cloud_in.points.size ())
+ if (j != cloud_in.size ())
{
// Resize to the correct size
cloud_out.points.resize (j);
}
cloud_out.height = 1;
- cloud_out.width = static_cast<std::uint32_t>(j);
+ cloud_out.width = j;
}
#ifndef PCL_FILTERS_IMPL_FILTER_INDICES_H_
#define PCL_FILTERS_IMPL_FILTER_INDICES_H_
-#include <pcl/pcl_macros.h>
#include <pcl/filters/filter_indices.h>
template <typename PointT> void
std::vector<int> &index)
{
// Reserve enough space for the indices
- index.resize (cloud_in.points.size ());
+ index.resize (cloud_in.size ());
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
- for (int j = 0; j < static_cast<int> (cloud_in.points.size ()); ++j)
+ for (int j = 0; j < static_cast<int> (cloud_in.size ()); ++j)
index[j] = j;
}
else
{
int j = 0;
- for (int i = 0; i < static_cast<int> (cloud_in.points.size ()); ++i)
+ for (int i = 0; i < static_cast<int> (cloud_in.size ()); ++i)
{
- if (!std::isfinite (cloud_in.points[i].x) ||
- !std::isfinite (cloud_in.points[i].y) ||
- !std::isfinite (cloud_in.points[i].z))
+ if (!std::isfinite (cloud_in[i].x) ||
+ !std::isfinite (cloud_in[i].y) ||
+ !std::isfinite (cloud_in[i].z))
continue;
index[j] = i;
j++;
}
- if (j != static_cast<int> (cloud_in.points.size ()))
+ if (j != static_cast<int> (cloud_in.size ()))
{
// Resize to the correct size
index.resize (j);
#define PCL_FILTERS_IMPL_FRUSTUM_CULLING_HPP_
#include <pcl/filters/frustum_culling.h>
-#include <pcl/common/io.h>
#include <vector>
///////////////////////////////////////////////////////////////////////////////
for (std::size_t i = 0; i < indices_->size (); i++)
{
int idx = indices_->at (i);
- Eigen::Vector4f pt (input_->points[idx].x,
- input_->points[idx].y,
- input_->points[idx].z,
+ Eigen::Vector4f pt ((*input_)[idx].x,
+ (*input_)[idx].y,
+ (*input_)[idx].z,
1.0f);
bool is_in_fov = (pt.dot (pl_l) <= 0) &&
(pt.dot (pl_r) <= 0) &&
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite (input_->points[*it].x) ||
- !std::isfinite (input_->points[*it].y) ||
- !std::isfinite (input_->points[*it].z))
+ if (!std::isfinite ((*input_)[*it].x) ||
+ !std::isfinite ((*input_)[*it].y) ||
+ !std::isfinite ((*input_)[*it].z))
continue;
- int ijk0 = static_cast<int> (std::floor (input_->points[*it].x * inverse_resolution_) - static_cast<float> (min_b[0]));
- int ijk1 = static_cast<int> (std::floor (input_->points[*it].y * inverse_resolution_) - static_cast<float> (min_b[1]));
+ int ijk0 = static_cast<int> (std::floor ((*input_)[*it].x * inverse_resolution_) - static_cast<float> (min_b[0]));
+ int ijk1 = static_cast<int> (std::floor ((*input_)[*it].y * inverse_resolution_) - static_cast<float> (min_b[1]));
// Compute the grid cell index
int idx = ijk0 * divb_mul[0] + ijk1 * divb_mul[1];
unsigned int first_index = cp.first;
unsigned int last_index = cp.second;
unsigned int min_index = index_vector[first_index].cloud_point_index;
- float min_z = input_->points[index_vector[first_index].cloud_point_index].z;
+ float min_z = (*input_)[index_vector[first_index].cloud_point_index].z;
for (unsigned int i = first_index + 1; i < last_index; ++i)
{
- if (input_->points[index_vector[i].cloud_point_index].z < min_z)
+ if ((*input_)[index_vector[i].cloud_point_index].z < min_z)
{
- min_z = input_->points[index_vector[i].cloud_point_index].z;
+ min_z = (*input_)[index_vector[i].cloud_point_index].z;
min_index = index_vector[i].cloud_point_index;
}
}
// cylinder)
for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii)
{
- if (!isFinite (input_->points[(*indices_)[iii]]))
+ if (!isFinite ((*input_)[(*indices_)[iii]]))
{
continue;
}
// Perform the radius search in the projected cloud
std::vector<int> radius_indices;
std::vector<float> radius_dists;
- PointT p = cloud_projected->points[(*indices_)[iii]];
+ PointT p = (*cloud_projected)[(*indices_)[iii]];
if (searcher_->radiusSearch (p, radius_, radius_indices, radius_dists) == 0)
{
PCL_WARN ("[pcl::%s::applyFilter] Searching for neighbors within radius %f failed.\n", getClassName ().c_str (), radius_);
}
// Check to see if a neighbor is higher than the query point
- float query_z = input_->points[(*indices_)[iii]].z;
+ float query_z = (*input_)[(*indices_)[iii]].z;
for (std::size_t k = 1; k < radius_indices.size (); ++k) // k = 1 is the first neighbor
{
- if (input_->points[radius_indices[k]].z > query_z)
+ if ((*input_)[radius_indices[k]].z > query_z)
{
// Query point is not the local max, no need to check others
point_is_max[(*indices_)[iii]] = false;
#pragma once
#include <pcl/filters/median_filter.h>
-#include <pcl/common/io.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
template <typename PointT> void
#pragma once
#include <pcl/filters/model_outlier_removal.h>
-#include <pcl/common/io.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
#include <pcl/sample_consensus/sac_model_circle.h>
#include <pcl/sample_consensus/sac_model_cylinder.h>
for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
{
// Non-finite entries are always passed to removed indices
- if (!isFinite (input_->points[ (*indices_)[iii]]))
+ if (!isFinite ((*input_)[ (*indices_)[iii]]))
{
if (extract_removed_indices_)
(*removed_indices_)[rii++] = (*indices_)[iii];
for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
{
// Non-finite entries are always passed to removed indices
- if (!isFinite (input_->points[ (*indices_)[iii]]))
+ if (!isFinite ((*input_)[ (*indices_)[iii]]))
{
if (extract_removed_indices_)
(*removed_indices_)[rii++] = (*indices_)[iii];
case MORPH_DILATE:
case MORPH_ERODE:
{
- for (std::size_t p_idx = 0; p_idx < cloud_in->points.size (); ++p_idx)
+ for (std::size_t p_idx = 0; p_idx < cloud_in->size (); ++p_idx)
{
Eigen::Vector3f bbox_min, bbox_max;
std::vector<int> pt_indices;
- float minx = cloud_in->points[p_idx].x - half_res;
- float miny = cloud_in->points[p_idx].y - half_res;
+ float minx = (*cloud_in)[p_idx].x - half_res;
+ float miny = (*cloud_in)[p_idx].y - half_res;
float minz = -std::numeric_limits<float>::max ();
- float maxx = cloud_in->points[p_idx].x + half_res;
- float maxy = cloud_in->points[p_idx].y + half_res;
+ float maxx = (*cloud_in)[p_idx].x + half_res;
+ float maxy = (*cloud_in)[p_idx].y + half_res;
float maxz = std::numeric_limits<float>::max ();
bbox_min = Eigen::Vector3f (minx, miny, minz);
bbox_max = Eigen::Vector3f (maxx, maxy, maxz);
{
case MORPH_DILATE:
{
- cloud_out.points[p_idx].z = max_pt.z ();
+ cloud_out[p_idx].z = max_pt.z ();
break;
}
case MORPH_ERODE:
{
- cloud_out.points[p_idx].z = min_pt.z ();
+ cloud_out[p_idx].z = min_pt.z ();
break;
}
}
pcl::copyPointCloud (*cloud_in, cloud_temp);
- for (std::size_t p_idx = 0; p_idx < cloud_temp.points.size (); ++p_idx)
+ for (std::size_t p_idx = 0; p_idx < cloud_temp.size (); ++p_idx)
{
Eigen::Vector3f bbox_min, bbox_max;
std::vector<int> pt_indices;
- float minx = cloud_temp.points[p_idx].x - half_res;
- float miny = cloud_temp.points[p_idx].y - half_res;
+ float minx = cloud_temp[p_idx].x - half_res;
+ float miny = cloud_temp[p_idx].y - half_res;
float minz = -std::numeric_limits<float>::max ();
- float maxx = cloud_temp.points[p_idx].x + half_res;
- float maxy = cloud_temp.points[p_idx].y + half_res;
+ float maxx = cloud_temp[p_idx].x + half_res;
+ float maxy = cloud_temp[p_idx].y + half_res;
float maxz = std::numeric_limits<float>::max ();
bbox_min = Eigen::Vector3f (minx, miny, minz);
bbox_max = Eigen::Vector3f (maxx, maxy, maxz);
{
case MORPH_OPEN:
{
- cloud_out.points[p_idx].z = min_pt.z ();
+ cloud_out[p_idx].z = min_pt.z ();
break;
}
case MORPH_CLOSE:
{
- cloud_out.points[p_idx].z = max_pt.z ();
+ cloud_out[p_idx].z = max_pt.z ();
break;
}
}
cloud_temp.swap (cloud_out);
- for (std::size_t p_idx = 0; p_idx < cloud_temp.points.size (); ++p_idx)
+ for (std::size_t p_idx = 0; p_idx < cloud_temp.size (); ++p_idx)
{
Eigen::Vector3f bbox_min, bbox_max;
std::vector<int> pt_indices;
- float minx = cloud_temp.points[p_idx].x - half_res;
- float miny = cloud_temp.points[p_idx].y - half_res;
+ float minx = cloud_temp[p_idx].x - half_res;
+ float miny = cloud_temp[p_idx].y - half_res;
float minz = -std::numeric_limits<float>::max ();
- float maxx = cloud_temp.points[p_idx].x + half_res;
- float maxy = cloud_temp.points[p_idx].y + half_res;
+ float maxx = cloud_temp[p_idx].x + half_res;
+ float maxy = cloud_temp[p_idx].y + half_res;
float maxz = std::numeric_limits<float>::max ();
bbox_min = Eigen::Vector3f (minx, miny, minz);
bbox_max = Eigen::Vector3f (maxx, maxy, maxz);
case MORPH_OPEN:
default:
{
- cloud_out.points[p_idx].z = max_pt.z ();
+ cloud_out[p_idx].z = max_pt.z ();
break;
}
case MORPH_CLOSE:
{
- cloud_out.points[p_idx].z = min_pt.z ();
+ cloud_out[p_idx].z = min_pt.z ();
break;
}
}
#define PCL_FILTERS_IMPL_NORMAL_SPACE_SAMPLE_H_
#include <pcl/filters/normal_space.h>
-#include <pcl/common/io.h>
#include <vector>
#include <list>
}
// Maintaining flags to check if a point is sampled
- boost::dynamic_bitset<> is_sampled_flag (input_normals_->points.size ());
+ boost::dynamic_bitset<> is_sampled_flag (input_normals_->size ());
// Maintaining flags to check if all points in the bin are sampled
boost::dynamic_bitset<> bin_empty_flag (normals_hg.size ());
unsigned int i = 0;
#define PCL_FILTERS_IMPL_PASSTHROUGH_HPP_
#include <pcl/filters/passthrough.h>
-#include <pcl/common/io.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
for (const auto ii : *indices_) // ii = input index
{
// Non-finite entries are always passed to removed indices
- if (!std::isfinite (input_->points[ii].x) ||
- !std::isfinite (input_->points[ii].y) ||
- !std::isfinite (input_->points[ii].z))
+ if (!std::isfinite ((*input_)[ii].x) ||
+ !std::isfinite ((*input_)[ii].y) ||
+ !std::isfinite ((*input_)[ii].z))
{
if (extract_removed_indices_)
(*removed_indices_)[rii++] = ii;
for (const auto ii : *indices_) // ii = input index
{
// Non-finite entries are always passed to removed indices
- if (!std::isfinite (input_->points[ii].x) ||
- !std::isfinite (input_->points[ii].y) ||
- !std::isfinite (input_->points[ii].z))
+ if (!std::isfinite ((*input_)[ii].x) ||
+ !std::isfinite ((*input_)[ii].y) ||
+ !std::isfinite ((*input_)[ii].z))
{
if (extract_removed_indices_)
(*removed_indices_)[rii++] = ii;
}
// Get the field's value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&input_->points[ii]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*input_)[ii]);
float field_value = 0;
memcpy (&field_value, pt_data + fields[distance_idx].offset, sizeof (float));
#define PCL_FILTERS_IMPL_RADIUS_OUTLIER_REMOVAL_H_
#include <pcl/filters/radius_outlier_removal.h>
-#include <pcl/common/io.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
#define PCL_FILTERS_IMPL_RANDOM_SAMPLE_H_
#include <pcl/filters/random_sample.h>
-#include <pcl/common/io.h>
#include <pcl/type_traits.h>
pcl::SamplingSurfaceNormal<PointT>::applyFilter (PointCloud &output)
{
std::vector <int> indices;
- std::size_t npts = input_->points.size ();
+ std::size_t npts = input_->size ();
for (std::size_t i = 0; i < npts; i++)
indices.push_back (i);
findXYZMaxMin (*input_, max_vec, min_vec);
PointCloud data = *input_;
partition (data, 0, npts, min_vec, max_vec, indices, output);
- output.width = 1;
- output.height = std::uint32_t (output.points.size ());
+ output.height = 1;
+ output.width = output.size ();
}
///////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
pcl::SamplingSurfaceNormal<PointT>::findXYZMaxMin (const PointCloud& cloud, Vector& max_vec, Vector& min_vec)
{
- float maxval = cloud.points[0].x;
- float minval = cloud.points[0].x;
-
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- {
- if (cloud.points[i].x > maxval)
- {
- maxval = cloud.points[i].x;
- }
- if (cloud.points[i].x < minval)
- {
- minval = cloud.points[i].x;
- }
+ // 4f to ease vectorization
+ Eigen::Array4f min_array =
+ Eigen::Array4f::Constant(std::numeric_limits<float>::max());
+ Eigen::Array4f max_array =
+ Eigen::Array4f::Constant(std::numeric_limits<float>::lowest());
+
+ for (const auto& point : cloud) {
+ min_array = min_array.min(point.getArray4fMap());
+ max_array = max_array.max(point.getArray4fMap());
}
- max_vec (0) = maxval;
- min_vec (0) = minval;
- maxval = cloud.points[0].y;
- minval = cloud.points[0].y;
-
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- {
- if (cloud.points[i].y > maxval)
- {
- maxval = cloud.points[i].y;
- }
- if (cloud.points[i].y < minval)
- {
- minval = cloud.points[i].y;
- }
- }
- max_vec (1) = maxval;
- min_vec (1) = minval;
-
- maxval = cloud.points[0].z;
- minval = cloud.points[0].z;
-
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- {
- if (cloud.points[i].z > maxval)
- {
- maxval = cloud.points[i].z;
- }
- if (cloud.points[i].z < minval)
- {
- minval = cloud.points[i].z;
- }
- }
- max_vec (2) = maxval;
- min_vec (2) = minval;
+ max_vec = max_array.head<3>();
+ min_vec = min_array.head<3>();
}
///////////////////////////////////////////////////////////////////////////////
for (int i = first; i < last; i++)
{
PointT pt;
- pt.x = data.points[indices[i]].x;
- pt.y = data.points[indices[i]].y;
- pt.z = data.points[indices[i]].z;
- cloud.points.push_back (pt);
+ pt.x = data[indices[i]].x;
+ pt.y = data[indices[i]].y;
+ pt.z = data[indices[i]].z;
+ cloud.push_back (pt);
}
- cloud.width = 1;
- cloud.height = std::uint32_t (cloud.points.size ());
+ cloud.height = 1;
+ cloud.width = cloud.size ();
Eigen::Vector4f normal;
float curvature = 0;
computeNormal (cloud, normal, curvature);
- for (std::size_t i = 0; i < cloud.points.size (); i++)
+ for (const auto& point: cloud)
{
// TODO: change to Boost random number generators!
const float r = float (std::rand ()) / float (RAND_MAX);
if (r < ratio_)
{
- PointT pt = cloud.points[i];
+ PointT pt = point;
pt.normal[0] = normal (0);
pt.normal[1] = normal (1);
pt.normal[2] = normal (2);
pt.curvature = curvature;
- output.points.push_back (pt);
+ output.push_back (pt);
}
}
}
Eigen::Matrix3f &covariance_matrix,
Eigen::Vector4f ¢roid)
{
- // create the buffer on the stack which is much faster than using cloud.points[indices[i]] and centroid as a buffer
+ // create the buffer on the stack which is much faster than using cloud[indices[i]] and centroid as a buffer
Eigen::Matrix<float, 1, 9, Eigen::RowMajor> accu = Eigen::Matrix<float, 1, 9, Eigen::RowMajor>::Zero ();
std::size_t point_count = 0;
- for (std::size_t i = 0; i < cloud.points.size (); i++)
+ for (const auto& point: cloud)
{
- if (!isFinite (cloud[i]))
+ if (!isXYZFinite (point))
{
continue;
}
++point_count;
- accu [0] += cloud[i].x * cloud[i].x;
- accu [1] += cloud[i].x * cloud[i].y;
- accu [2] += cloud[i].x * cloud[i].z;
- accu [3] += cloud[i].y * cloud[i].y; // 4
- accu [4] += cloud[i].y * cloud[i].z; // 5
- accu [5] += cloud[i].z * cloud[i].z; // 8
- accu [6] += cloud[i].x;
- accu [7] += cloud[i].y;
- accu [8] += cloud[i].z;
+ accu [0] += point.x * point.x;
+ accu [1] += point.x * point.y;
+ accu [2] += point.x * point.z;
+ accu [3] += point.y * point.y; // 4
+ accu [4] += point.y * point.z; // 5
+ accu [5] += point.z * point.z; // 8
+ accu [6] += point.x;
+ accu [7] += point.y;
+ accu [8] += point.z;
}
accu /= static_cast<float> (point_count);
const PointCloud& cloud, const int cut_dim, const int cut_index)
{
if (cut_dim == 0)
- return (cloud.points[cut_index].x);
+ return (cloud[cut_index].x);
if (cut_dim == 1)
- return (cloud.points[cut_index].y);
+ return (cloud[cut_index].y);
if (cut_dim == 2)
- return (cloud.points[cut_index].z);
+ return (cloud[cut_index].z);
return (0.0f);
}
pcl::ShadowPoints<PointT, NormalT>::applyFilter (PointCloud &output)
{
assert (input_normals_ != NULL);
- output.points.resize (input_->points.size ());
+ output.points.resize (input_->size ());
if (extract_removed_indices_)
- removed_indices_->resize (input_->points.size ());
+ removed_indices_->resize (input_->size ());
std::size_t cp = 0;
std::size_t ri = 0;
- for (std::size_t i = 0; i < input_->points.size (); i++)
+ for (std::size_t i = 0; i < input_->size (); i++)
{
- const NormalT &normal = input_normals_->points[i];
- const PointT &pt = input_->points[i];
+ const NormalT &normal = (*input_normals_)[i];
+ const PointT &pt = (*input_)[i];
const float val = std::abs (normal.normal_x * pt.x + normal.normal_y * pt.y + normal.normal_z * pt.z);
if ( (val >= threshold_) ^ negative_)
- output.points[cp++] = pt;
+ output[cp++] = pt;
else
{
if (extract_removed_indices_)
(*removed_indices_)[ri++] = i;
if (keep_organized_)
{
- PointT &pt_new = output.points[cp++] = pt;
+ PointT &pt_new = output[cp++] = pt;
pt_new.x = pt_new.y = pt_new.z = user_filter_value_;
}
}
output.points.resize (cp);
removed_indices_->resize (ri);
- output.width = 1;
- output.height = static_cast<std::uint32_t> (output.points.size ());
+ output.height = 1;
+ output.width = output.size ();
}
///////////////////////////////////////////////////////////////////////////////
pcl::ShadowPoints<PointT, NormalT>::applyFilter (std::vector<int> &indices)
{
assert (input_normals_ != NULL);
- indices.resize (input_->points.size ());
+ indices.resize (input_->size ());
if (extract_removed_indices_)
removed_indices_->resize (indices_->size ());
unsigned int z = 0;
for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
{
- const NormalT &normal = input_normals_->points[*idx];
- const PointT &pt = input_->points[*idx];
+ const NormalT &normal = (*input_normals_)[*idx];
+ const PointT &pt = (*input_)[*idx];
float val = std::abs (normal.normal_x * pt.x + normal.normal_y * pt.y + normal.normal_z * pt.z);
#define PCL_FILTERS_IMPL_STATISTICAL_OUTLIER_REMOVAL_H_
#include <pcl/filters/statistical_outlier_removal.h>
-#include <pcl/common/io.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
int valid_distances = 0;
for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
{
- if (!std::isfinite (input_->points[(*indices_)[iii]].x) ||
- !std::isfinite (input_->points[(*indices_)[iii]].y) ||
- !std::isfinite (input_->points[(*indices_)[iii]].z))
+ if (!std::isfinite ((*input_)[(*indices_)[iii]].x) ||
+ !std::isfinite ((*input_)[(*indices_)[iii]].y) ||
+ !std::isfinite ((*input_)[(*indices_)[iii]].z))
{
distances[iii] = 0.0;
continue;
{
PCL_WARN ("[pcl::%s::detectKeypoints] No input dataset given!\n", getClassName ().c_str ());
output.width = output.height = 0;
- output.points.clear ();
+ output.clear ();
return;
}
if (!input_->is_dense)
{
// Check if the point is invalid
- if (!std::isfinite (input_->points[(*indices_)[cp]].x) ||
- !std::isfinite (input_->points[(*indices_)[cp]].y) ||
- !std::isfinite (input_->points[(*indices_)[cp]].z))
+ if (!std::isfinite ((*input_)[(*indices_)[cp]].x) ||
+ !std::isfinite ((*input_)[(*indices_)[cp]].y) ||
+ !std::isfinite ((*input_)[(*indices_)[cp]].z))
{
if (extract_removed_indices_)
removed_indices_->push_back ((*indices_)[cp]);
}
Eigen::Vector4i ijk = Eigen::Vector4i::Zero ();
- ijk[0] = static_cast<int> (std::floor (input_->points[(*indices_)[cp]].x * inverse_leaf_size_[0]));
- ijk[1] = static_cast<int> (std::floor (input_->points[(*indices_)[cp]].y * inverse_leaf_size_[1]));
- ijk[2] = static_cast<int> (std::floor (input_->points[(*indices_)[cp]].z * inverse_leaf_size_[2]));
+ ijk[0] = static_cast<int> (std::floor ((*input_)[(*indices_)[cp]].x * inverse_leaf_size_[0]));
+ ijk[1] = static_cast<int> (std::floor ((*input_)[(*indices_)[cp]].y * inverse_leaf_size_[1]));
+ ijk[2] = static_cast<int> (std::floor ((*input_)[(*indices_)[cp]].z * inverse_leaf_size_[2]));
// Compute the leaf index
int idx = (ijk - min_b_).dot (divb_mul_);
}
// Check to see if this point is closer to the leaf center than the previous one we saved
- float diff_cur = (input_->points[(*indices_)[cp]].getVector4fMap () - ijk.cast<float> ()).squaredNorm ();
- float diff_prev = (input_->points[leaf.idx].getVector4fMap () - ijk.cast<float> ()).squaredNorm ();
+ float diff_cur = ((*input_)[(*indices_)[cp]].getVector4fMap () - ijk.cast<float> ()).squaredNorm ();
+ float diff_prev = ((*input_)[leaf.idx].getVector4fMap () - ijk.cast<float> ()).squaredNorm ();
// If current point is closer, copy its index instead
if (diff_cur < diff_prev)
}
// Second pass: go over all leaves and copy data
- output.points.resize (leaves_.size ());
+ output.resize (leaves_.size ());
int cp = 0;
for (const auto& leaf : leaves_)
- output.points[cp++] = input_->points[leaf.second.idx];
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output[cp++] = (*input_)[leaf.second.idx];
+ output.width = output.size ();
}
#define PCL_INSTANTIATE_UniformSampling(T) template class PCL_EXPORTS pcl::UniformSampling<T>;
// If dense, no need to check for NaNs
if (cloud->is_dense)
{
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (const auto& point: *cloud)
{
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&cloud->points[i]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&point);
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
if (limit_negative)
continue;
}
// Create the point structure and get the min/max
- pcl::Array4fMapConst pt = cloud->points[i].getArray4fMap ();
+ pcl::Array4fMapConst pt = point.getArray4fMap ();
min_p = min_p.min (pt);
max_p = max_p.max (pt);
}
}
else
{
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (const auto& point: *cloud)
{
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&cloud->points[i]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&point);
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
if (limit_negative)
}
// Check if the point is invalid
- if (!std::isfinite (cloud->points[i].x) ||
- !std::isfinite (cloud->points[i].y) ||
- !std::isfinite (cloud->points[i].z))
+ if (!isXYZFinite (point))
continue;
// Create the point structure and get the min/max
- pcl::Array4fMapConst pt = cloud->points[i].getArray4fMap ();
+ pcl::Array4fMapConst pt = point.getArray4fMap ();
min_p = min_p.min (pt);
max_p = max_p.max (pt);
}
for (const int &index : indices)
{
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&cloud->points[index]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud)[index]);
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
if (limit_negative)
continue;
}
// Create the point structure and get the min/max
- pcl::Array4fMapConst pt = cloud->points[index].getArray4fMap ();
+ pcl::Array4fMapConst pt = (*cloud)[index].getArray4fMap ();
min_p = min_p.min (pt);
max_p = max_p.max (pt);
}
for (const int &index : indices)
{
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&cloud->points[index]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud)[index]);
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
if (limit_negative)
}
// Check if the point is invalid
- if (!std::isfinite (cloud->points[index].x) ||
- !std::isfinite (cloud->points[index].y) ||
- !std::isfinite (cloud->points[index].z))
+ if (!std::isfinite ((*cloud)[index].x) ||
+ !std::isfinite ((*cloud)[index].y) ||
+ !std::isfinite ((*cloud)[index].z))
continue;
// Create the point structure and get the min/max
- pcl::Array4fMapConst pt = cloud->points[index].getArray4fMap ();
+ pcl::Array4fMapConst pt = (*cloud)[index].getArray4fMap ();
min_p = min_p.min (pt);
max_p = max_p.max (pt);
}
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite (input_->points[*it].x) ||
- !std::isfinite (input_->points[*it].y) ||
- !std::isfinite (input_->points[*it].z))
+ if (!std::isfinite ((*input_)[*it].x) ||
+ !std::isfinite ((*input_)[*it].y) ||
+ !std::isfinite ((*input_)[*it].z))
continue;
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&input_->points[*it]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*input_)[*it]);
float distance_value = 0;
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
continue;
}
- int ijk0 = static_cast<int> (std::floor (input_->points[*it].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
- int ijk1 = static_cast<int> (std::floor (input_->points[*it].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
- int ijk2 = static_cast<int> (std::floor (input_->points[*it].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
+ int ijk0 = static_cast<int> (std::floor ((*input_)[*it].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
+ int ijk1 = static_cast<int> (std::floor ((*input_)[*it].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
+ int ijk2 = static_cast<int> (std::floor ((*input_)[*it].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite (input_->points[*it].x) ||
- !std::isfinite (input_->points[*it].y) ||
- !std::isfinite (input_->points[*it].z))
+ if (!std::isfinite ((*input_)[*it].x) ||
+ !std::isfinite ((*input_)[*it].y) ||
+ !std::isfinite ((*input_)[*it].z))
continue;
- int ijk0 = static_cast<int> (std::floor (input_->points[*it].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
- int ijk1 = static_cast<int> (std::floor (input_->points[*it].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
- int ijk2 = static_cast<int> (std::floor (input_->points[*it].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
+ int ijk0 = static_cast<int> (std::floor ((*input_)[*it].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
+ int ijk1 = static_cast<int> (std::floor ((*input_)[*it].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
+ int ijk2 = static_cast<int> (std::floor ((*input_)[*it].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
Eigen::Vector4f centroid (Eigen::Vector4f::Zero ());
for (unsigned int li = first_index; li < last_index; ++li)
- centroid += input_->points[index_vector[li].cloud_point_index].getVector4fMap ();
+ centroid += (*input_)[index_vector[li].cloud_point_index].getVector4fMap ();
centroid /= static_cast<float> (last_index - first_index);
- output.points[index].getVector4fMap () = centroid;
+ output[index].getVector4fMap () = centroid;
}
else
{
// fill in the accumulator with leaf points
for (unsigned int li = first_index; li < last_index; ++li)
- centroid.add (input_->points[index_vector[li].cloud_point_index]);
+ centroid.add ((*input_)[index_vector[li].cloud_point_index]);
- centroid.get (output.points[index]);
+ centroid.get (output[index]);
}
++index;
}
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
}
#define PCL_INSTANTIATE_VoxelGrid(T) template class PCL_EXPORTS pcl::VoxelGrid<T>;
PCL_WARN ("[pcl::%s::applyFilter] Invalid filter field name. Index is %d.\n", getClassName ().c_str (), distance_idx);
// First pass: go over all points and insert them into the right leaf
- for (std::size_t cp = 0; cp < input_->points.size (); ++cp)
+ for (const auto& point: *input_)
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite (input_->points[cp].x) ||
- !std::isfinite (input_->points[cp].y) ||
- !std::isfinite (input_->points[cp].z))
+ if (!isXYZFinite (point))
continue;
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&input_->points[cp]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&point);
float distance_value = 0;
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
continue;
}
- int ijk0 = static_cast<int> (std::floor (input_->points[cp].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
- int ijk1 = static_cast<int> (std::floor (input_->points[cp].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
- int ijk2 = static_cast<int> (std::floor (input_->points[cp].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
-
// Compute the centroid leaf index
- int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
+ const Eigen::Vector4i ijk =
+ Eigen::floor(point.getArray4fMap() * inverse_leaf_size_.array())
+ .template cast<int>();
+ // divb_mul_[3] = 0 by assignment
+ int idx = (ijk - min_b_).dot(divb_mul_);
Leaf& leaf = leaves_[idx];
if (leaf.nr_points == 0)
leaf.centroid.setZero ();
}
- Eigen::Vector3d pt3d (input_->points[cp].x, input_->points[cp].y, input_->points[cp].z);
+ Eigen::Vector3d pt3d = point.getVector3fMap().template cast<double>();
// Accumulate point sum for centroid calculation
leaf.mean_ += pt3d;
// Accumulate x*xT for single pass covariance calculation
// Do we need to process all the fields?
if (!downsample_all_data_)
{
- Eigen::Vector4f pt (input_->points[cp].x, input_->points[cp].y, input_->points[cp].z, 0);
- leaf.centroid.template head<4> () += pt;
+ leaf.centroid.template head<3> () += point.getVector3fMap();
}
else
{
// Copy all the fields
Eigen::VectorXf centroid = Eigen::VectorXf::Zero (centroid_size);
- pcl::for_each_type<FieldList> (NdCopyPointEigenFunctor<PointT> (input_->points[cp], centroid));
+ pcl::for_each_type<FieldList> (NdCopyPointEigenFunctor<PointT> (point, centroid));
// ---[ RGB special case
if (rgba_index >= 0)
{
// Fill r/g/b data, assuming that the order is BGRA
- const pcl::RGB& rgb = *reinterpret_cast<const RGB*> (reinterpret_cast<const char*> (&input_->points[cp]) + rgba_index);
+ const pcl::RGB& rgb = *reinterpret_cast<const RGB*> (reinterpret_cast<const char*> (&point) + rgba_index);
centroid[centroid_size - 4] = rgb.a;
centroid[centroid_size - 3] = rgb.r;
centroid[centroid_size - 2] = rgb.g;
else
{
// First pass: go over all points and insert them into the right leaf
- for (std::size_t cp = 0; cp < input_->points.size (); ++cp)
+ for (const auto& point: *input_)
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite (input_->points[cp].x) ||
- !std::isfinite (input_->points[cp].y) ||
- !std::isfinite (input_->points[cp].z))
+ if (!isXYZFinite (point))
continue;
- int ijk0 = static_cast<int> (std::floor (input_->points[cp].x * inverse_leaf_size_[0]) - static_cast<float> (min_b_[0]));
- int ijk1 = static_cast<int> (std::floor (input_->points[cp].y * inverse_leaf_size_[1]) - static_cast<float> (min_b_[1]));
- int ijk2 = static_cast<int> (std::floor (input_->points[cp].z * inverse_leaf_size_[2]) - static_cast<float> (min_b_[2]));
-
// Compute the centroid leaf index
- int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
+ const Eigen::Vector4i ijk =
+ Eigen::floor(point.getArray4fMap() * inverse_leaf_size_.array())
+ .template cast<int>();
+ // divb_mul_[3] = 0 by assignment
+ int idx = (ijk - min_b_).dot(divb_mul_);
- //int idx = (((input_->points[cp].getArray4fmap () * inverse_leaf_size_).template cast<int> ()).matrix () - min_b_).dot (divb_mul_);
Leaf& leaf = leaves_[idx];
if (leaf.nr_points == 0)
{
leaf.centroid.setZero ();
}
- Eigen::Vector3d pt3d (input_->points[cp].x, input_->points[cp].y, input_->points[cp].z);
+ Eigen::Vector3d pt3d = point.getVector3fMap().template cast<double>();
// Accumulate point sum for centroid calculation
leaf.mean_ += pt3d;
// Accumulate x*xT for single pass covariance calculation
// Do we need to process all the fields?
if (!downsample_all_data_)
{
- Eigen::Vector4f pt (input_->points[cp].x, input_->points[cp].y, input_->points[cp].z, 0);
- leaf.centroid.template head<4> () += pt;
+ leaf.centroid.template head<3> () += point.getVector3fMap();
}
else
{
// Copy all the fields
Eigen::VectorXf centroid = Eigen::VectorXf::Zero (centroid_size);
- pcl::for_each_type<FieldList> (NdCopyPointEigenFunctor<PointT> (input_->points[cp], centroid));
+ pcl::for_each_type<FieldList> (NdCopyPointEigenFunctor<PointT> (point, centroid));
// ---[ RGB special case
if (rgba_index >= 0)
{
// Fill r/g/b data, assuming that the order is BGRA
- const pcl::RGB& rgb = *reinterpret_cast<const RGB*> (reinterpret_cast<const char*> (&input_->points[cp]) + rgba_index);
+ const pcl::RGB& rgb = *reinterpret_cast<const RGB*> (reinterpret_cast<const char*> (&point) + rgba_index);
centroid[centroid_size - 4] = rgb.a;
centroid[centroid_size - 3] = rgb.r;
centroid[centroid_size - 2] = rgb.g;
}
}
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> int
-pcl::VoxelGridCovariance<PointT>::getNeighborhoodAtPoint (const PointT& reference_point, std::vector<LeafConstPtr> &neighbors)
+pcl::VoxelGridCovariance<PointT>::getNeighborhoodAtPoint (const Eigen::Matrix<int, 3, Eigen::Dynamic>& relative_coordinates, const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const
{
neighbors.clear ();
// Find displacement coordinates
- Eigen::MatrixXi relative_coordinates = pcl::getAllNeighborCellIndices ();
- Eigen::Vector4i ijk (static_cast<int> (std::floor (reference_point.x / leaf_size_[0])),
- static_cast<int> (std::floor (reference_point.y / leaf_size_[1])),
- static_cast<int> (std::floor (reference_point.z / leaf_size_[2])), 0);
- Eigen::Array4i diff2min = min_b_ - ijk;
- Eigen::Array4i diff2max = max_b_ - ijk;
+ Eigen::Vector4i ijk = Eigen::floor(reference_point.getArray4fMap() * inverse_leaf_size_).template cast<int>();
+ ijk[3] = 0;
+ const Eigen::Array4i diff2min = min_b_ - ijk;
+ const Eigen::Array4i diff2max = max_b_ - ijk;
neighbors.reserve (relative_coordinates.cols ());
// Check each neighbor to see if it is occupied and contains sufficient points
- // Slower than radius search because needs to check 26 indices
for (Eigen::Index ni = 0; ni < relative_coordinates.cols (); ni++)
{
- Eigen::Vector4i displacement = (Eigen::Vector4i () << relative_coordinates.col (ni), 0).finished ();
+ const Eigen::Vector4i displacement = (Eigen::Vector4i () << relative_coordinates.col (ni), 0).finished ();
// Checking if the specified cell is in the grid
if ((diff2min <= displacement.array ()).all () && (diff2max >= displacement.array ()).all ())
{
- typename std::map<std::size_t, Leaf>::iterator leaf_iter = leaves_.find (((ijk + displacement - min_b_).dot (divb_mul_)));
+ const auto leaf_iter = leaves_.find (((ijk + displacement - min_b_).dot (divb_mul_)));
if (leaf_iter != leaves_.end () && leaf_iter->second.nr_points >= min_points_per_voxel_)
{
LeafConstPtr leaf = &(leaf_iter->second);
}
}
- return (static_cast<int> (neighbors.size ()));
+ return static_cast<int> (neighbors.size());
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointT> int
+pcl::VoxelGridCovariance<PointT>::getNeighborhoodAtPoint (const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const
+{
+ Eigen::MatrixXi relative_coordinates = pcl::getAllNeighborCellIndices();
+ return getNeighborhoodAtPoint(relative_coordinates, reference_point, neighbors);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointT> int
+pcl::VoxelGridCovariance<PointT>::getVoxelAtPoint(const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const
+{
+ return getNeighborhoodAtPoint(Eigen::Matrix<int, 3, Eigen::Dynamic>::Zero(3,1), reference_point, neighbors);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointT> int
+pcl::VoxelGridCovariance<PointT>::getFaceNeighborsAtPoint(const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const
+{
+ Eigen::Matrix<int, 3, Eigen::Dynamic> relative_coordinates(3, 7);
+ relative_coordinates.setZero();
+ relative_coordinates(0, 1) = 1;
+ relative_coordinates(0, 2) = -1;
+ relative_coordinates(1, 3) = 1;
+ relative_coordinates(1, 4) = -1;
+ relative_coordinates(2, 5) = 1;
+ relative_coordinates(2, 6) = -1;
+
+ return getNeighborhoodAtPoint(relative_coordinates, reference_point, neighbors);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointT> int
+pcl::VoxelGridCovariance<PointT>::getAllNeighborsAtPoint(const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const
+{
+ Eigen::Matrix<int, 3, Eigen::Dynamic> relative_coordinates(3, 27);
+ relative_coordinates.col(0).setZero();
+ relative_coordinates.rightCols(26) = pcl::getAllNeighborCellIndices();
+
+ return getNeighborhoodAtPoint(relative_coordinates, reference_point, neighbors);
}
//////////////////////////////////////////////////////////////////////////////////////////
const std::vector<int>& k_indices,
const std::vector<float>& k_sqr_distances)
{
- pcl::utils::ignore(cloud);
- pcl::utils::ignore(index);
+ pcl::utils::ignore(cloud, index);
// Check inputs
if (k_indices.size () != k_sqr_distances.size ())
PCL_ERROR("[pcl::assignNormalWeights] inequal size of neighbor indices and distances!\n");
operator () (const int& p0, const int& p1)
{
if (dim == 0)
- return (cloud.points[p0].x < cloud.points[p1].x);
+ return (cloud[p0].x < cloud[p1].x);
if (dim == 1)
- return (cloud.points[p0].y < cloud.points[p1].y);
+ return (cloud[p0].y < cloud[p1].y);
if (dim == 2)
- return (cloud.points[p0].z < cloud.points[p1].z);
+ return (cloud[p0].z < cloud[p1].z);
return (false);
}
};
}
- /** \brief Get the voxels surrounding point p, not including the voxel containing point p.
- * \note Only voxels containing a sufficient number of points are used (slower than radius search in practice).
+ /** \brief Get the voxels surrounding point p designated by #relative_coordinates.
+ * \note Only voxels containing a sufficient number of points are used.
+ * \param[in] relative_coordinates 3xN matrix that represents relative coordinates of N neighboring voxels with respect to the center voxel
* \param[in] reference_point the point to get the leaf structure at
* \param[out] neighbors
* \return number of neighbors found
*/
int
- getNeighborhoodAtPoint (const PointT& reference_point, std::vector<LeafConstPtr> &neighbors);
+ getNeighborhoodAtPoint (const Eigen::Matrix<int, 3, Eigen::Dynamic>& relative_coordinates, const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const;
+
+ /** \brief Get the voxels surrounding point p, not including the voxel containing point p.
+ * \note Only voxels containing a sufficient number of points are used.
+ * \param[in] reference_point the point to get the leaf structure at
+ * \param[out] neighbors
+ * \return number of neighbors found (up to 26)
+ */
+ int
+ getNeighborhoodAtPoint (const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const;
+
+ /** \brief Get the voxel at p.
+ * \note Only voxels containing a sufficient number of points are used.
+ * \param[in] reference_point the point to get the leaf structure at
+ * \param[out] neighbors
+ * \return number of neighbors found (up to 1)
+ */
+ int
+ getVoxelAtPoint (const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const;
+
+ /** \brief Get the voxel at p and its facing voxels (up to 7 voxels).
+ * \note Only voxels containing a sufficient number of points are used.
+ * \param[in] reference_point the point to get the leaf structure at
+ * \param[out] neighbors
+ * \return number of neighbors found (up to 7)
+ */
+ int
+ getFaceNeighborsAtPoint (const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const;
+
+ /** \brief Get all 3x3x3 neighbor voxels of p (up to 27 voxels).
+ * \note Only voxels containing a sufficient number of points are used.
+ * \param[in] reference_point the point to get the leaf structure at
+ * \param[out] neighbors
+ * \return number of neighbors found (up to 27)
+ */
+ int
+ getAllNeighborsAtPoint (const PointT& reference_point, std::vector<LeafConstPtr> &neighbors) const;
/** \brief Get the leaf structure map
* \return a map contataining all leaves
*/
int
nearestKSearch (const PointT &point, int k,
- std::vector<LeafConstPtr> &k_leaves, std::vector<float> &k_sqr_distances)
+ std::vector<LeafConstPtr> &k_leaves, std::vector<float> &k_sqr_distances) const
{
k_leaves.clear ();
k_leaves.reserve (k);
for (const int &k_index : k_indices)
{
- k_leaves.push_back (&leaves_[voxel_centroids_leaf_indices_[k_index]]);
+ auto voxel = leaves_.find(voxel_centroids_leaf_indices_[k_index]);
+ if (voxel == leaves_.end()) {
+ continue;
+ }
+
+ k_leaves.push_back(&voxel->second);
}
- return k;
+ return k_leaves.size();
}
/** \brief Search for the k-nearest occupied voxels for the given query point.
*/
inline int
nearestKSearch (const PointCloud &cloud, int index, int k,
- std::vector<LeafConstPtr> &k_leaves, std::vector<float> &k_sqr_distances)
+ std::vector<LeafConstPtr> &k_leaves, std::vector<float> &k_sqr_distances) const
{
- if (index >= static_cast<int> (cloud.points.size ()) || index < 0)
+ if (index >= static_cast<int> (cloud.size ()) || index < 0)
return (0);
- return (nearestKSearch (cloud.points[index], k, k_leaves, k_sqr_distances));
+ return (nearestKSearch (cloud[index], k, k_leaves, k_sqr_distances));
}
*/
int
radiusSearch (const PointT &point, double radius, std::vector<LeafConstPtr> &k_leaves,
- std::vector<float> &k_sqr_distances, unsigned int max_nn = 0)
+ std::vector<float> &k_sqr_distances, unsigned int max_nn = 0) const
{
k_leaves.clear ();
// Find neighbors within radius in the occupied voxel centroid cloud
std::vector<int> k_indices;
- int k = kdtree_.radiusSearch (point, radius, k_indices, k_sqr_distances, max_nn);
+ const int k = kdtree_.radiusSearch (point, radius, k_indices, k_sqr_distances, max_nn);
// Find leaves corresponding to neighbors
k_leaves.reserve (k);
for (const int &k_index : k_indices)
{
- k_leaves.push_back (&leaves_[voxel_centroids_leaf_indices_[k_index]]);
+ const auto voxel = leaves_.find(voxel_centroids_leaf_indices_[k_index]);
+ if(voxel == leaves_.end()) {
+ continue;
+ }
+
+ k_leaves.push_back(&voxel->second);
}
- return k;
+ return k_leaves.size();
}
/** \brief Search for all the nearest occupied voxels of the query point in a given radius.
inline int
radiusSearch (const PointCloud &cloud, int index, double radius,
std::vector<LeafConstPtr> &k_leaves, std::vector<float> &k_sqr_distances,
- unsigned int max_nn = 0)
+ unsigned int max_nn = 0) const
{
- if (index >= static_cast<int> (cloud.points.size ()) || index < 0)
+ if (index >= static_cast<int> (cloud.size ()) || index < 0)
return (0);
- return (radiusSearch (cloud.points[index], radius, k_leaves, k_sqr_distances, max_nn));
+ return (radiusSearch (cloud[index], radius, k_leaves, k_sqr_distances, max_nn));
}
protected:
inserter (remaining_indices, remaining_indices.begin ()));
// Prepare the output and copy the data
- output.width = static_cast<std::uint32_t> (remaining_indices.size ());
+ output.width = remaining_indices.size ();
output.data.resize (remaining_indices.size () * output.point_step);
for (std::size_t i = 0; i < remaining_indices.size (); ++i)
memcpy (&output.data[i * output.point_step], &input_->data[remaining_indices[i] * output.point_step], output.point_step);
else
{
// Prepare the output and copy the data
- output.width = static_cast<std::uint32_t> (indices_->size ());
+ output.width = indices_->size ();
output.data.resize (indices_->size () * output.point_step);
for (std::size_t i = 0; i < indices_->size (); ++i)
memcpy (&output.data[i * output.point_step], &input_->data[(*indices_)[i] * output.point_step], output.point_step);
*/
#include <pcl/filters/impl/filter_indices.hpp>
-#include <pcl/PCLPointCloud2.h>
+namespace pcl { struct PCLPointCloud2; }
/** \brief Base method for feature estimation for all points given in
* <setInputCloud (), setIndices ()> using the surface in setSearchSurface ()
}
// If fields x/y/z are not present, we cannot filter
- if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
+ if (x_idx_ == UNAVAILABLE || y_idx_ == UNAVAILABLE || z_idx_ == UNAVAILABLE)
{
PCL_ERROR ("[pcl::%s::applyFilter] Input dataset doesn't have x-y-z coordinates!\n", getClassName ().c_str ());
output.width = output.height = 0;
}
// If fields x/y/z are not present, we cannot filter
- if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
+ if (x_idx_ == UNAVAILABLE || y_idx_ == UNAVAILABLE || z_idx_ == UNAVAILABLE)
{
PCL_ERROR ("[pcl::%s::applyFilter] Input dataset doesn't have x-y-z coordinates!\n", getClassName ().c_str ());
indices.clear ();
// Copy the projected points
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- memcpy (&output.data[(*indices_)[i] * output.point_step + output.fields[x_idx].offset], &cloud_out.points[(*indices_)[i]].x, sizeof (float));
- memcpy (&output.data[(*indices_)[i] * output.point_step + output.fields[y_idx].offset], &cloud_out.points[(*indices_)[i]].y, sizeof (float));
- memcpy (&output.data[(*indices_)[i] * output.point_step + output.fields[z_idx].offset], &cloud_out.points[(*indices_)[i]].z, sizeof (float));
+ memcpy (&output.data[(*indices_)[i] * output.point_step + output.fields[x_idx].offset], &cloud_out[(*indices_)[i]].x, sizeof (float));
+ memcpy (&output.data[(*indices_)[i] * output.point_step + output.fields[y_idx].offset], &cloud_out[(*indices_)[i]].y, sizeof (float));
+ memcpy (&output.data[(*indices_)[i] * output.point_step + output.fields[z_idx].offset], &cloud_out[(*indices_)[i]].z, sizeof (float));
}
}
else
{
// Copy everything
output.height = 1;
- output.width = static_cast<std::uint32_t> (indices_->size ());
+ output.width = indices_->size ();
output.point_step = input_->point_step;
output.data.resize (output.width * output.point_step);
output.is_bigendian = input_->is_bigendian;
for (std::size_t i = 0; i < indices_->size (); ++i)
{
memcpy (&output.data[i * output.point_step], &input_->data[(*indices_)[i] * input_->point_step], output.point_step);
- memcpy (&output.data[i * output.point_step + output.fields[x_idx].offset], &cloud_out.points[(*indices_)[i]].x, sizeof (float));
- memcpy (&output.data[i * output.point_step + output.fields[y_idx].offset], &cloud_out.points[(*indices_)[i]].y, sizeof (float));
- memcpy (&output.data[i * output.point_step + output.fields[z_idx].offset], &cloud_out.points[(*indices_)[i]].z, sizeof (float));
+ memcpy (&output.data[i * output.point_step + output.fields[x_idx].offset], &cloud_out[(*indices_)[i]].x, sizeof (float));
+ memcpy (&output.data[i * output.point_step + output.fields[y_idx].offset], &cloud_out[(*indices_)[i]].y, sizeof (float));
+ memcpy (&output.data[i * output.point_step + output.fields[z_idx].offset], &cloud_out[(*indices_)[i]].z, sizeof (float));
}
}
}
pcl::RadiusOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (PCLPointCloud2 &output)
{
// If fields x/y/z are not present, we cannot filter
- if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
+ if (x_idx_ == UNAVAILABLE|| y_idx_ == UNAVAILABLE || z_idx_ == UNAVAILABLE)
{
PCL_ERROR ("[pcl::%s::applyFilter] Input dataset doesn't have x-y-z coordinates!\n", getClassName ().c_str ());
output.width = output.height = 0;
#include <pcl/filters/impl/statistical_outlier_removal.hpp>
#include <pcl/conversions.h>
-using namespace std;
-
///////////////////////////////////////////////////////////////////////////////////////////
void
pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (PCLPointCloud2 &output)
{
// If fields x/y/z are not present, we cannot filter
- if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
+ if (x_idx_ == UNAVAILABLE || y_idx_ == UNAVAILABLE || z_idx_ == UNAVAILABLE)
{
PCL_ERROR ("[pcl::%s::applyFilter] Input dataset doesn't have x-y-z coordinates!\n", getClassName ().c_str ());
output.width = output.height = 0;
///////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (vector<int>& indices)
+pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (std::vector<int>& indices)
{
// If fields x/y/z are not present, we cannot filter
- if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
+ if (x_idx_ == UNAVAILABLE || y_idx_ == UNAVAILABLE || z_idx_ == UNAVAILABLE)
{
PCL_ERROR ("[pcl::%s::applyFilter] Input dataset doesn't have x-y-z coordinates!\n", getClassName ().c_str ());
indices.clear();
// Go over all the points and calculate the mean or smallest distance
for (std::size_t cp = 0; cp < indices_->size (); ++cp)
{
- if (!std::isfinite (cloud->points[(*indices_)[cp]].x) ||
- !std::isfinite (cloud->points[(*indices_)[cp]].y) ||
- !std::isfinite (cloud->points[(*indices_)[cp]].z))
+ if (!std::isfinite ((*cloud)[(*indices_)[cp]].x) ||
+ !std::isfinite ((*cloud)[(*indices_)[cp]].y) ||
+ !std::isfinite ((*cloud)[(*indices_)[cp]].z))
{
distances[cp] = 0;
continue;
pcl::VoxelGrid<pcl::PCLPointCloud2>::applyFilter (PCLPointCloud2 &output)
{
// If fields x/y/z are not present, we cannot downsample
- if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
+ if (x_idx_ == UNAVAILABLE || y_idx_ == UNAVAILABLE || z_idx_ == UNAVAILABLE)
{
PCL_ERROR ("[pcl::%s::applyFilter] Input dataset doesn't have x-y-z coordinates!\n", getClassName ().c_str ());
output.width = output.height = 0;
*/
#include <map>
-#include <pcl/common/io.h>
#include <pcl/filters/voxel_grid_label.h>
#include <pcl/filters/impl/voxel_grid.hpp>
label_index = pcl::getFieldIndex<PointXYZRGBL> ("label", fields);
std::vector<cloud_point_index_idx> index_vector;
- index_vector.reserve(input_->points.size());
+ index_vector.reserve(input_->size());
// If we don't want to process the entire cloud, but rather filter points far away from the viewpoint first...
if (!filter_field_name_.empty ())
// First pass: go over all points and insert them into the index_vector vector
// with calculated idx. Points with the same idx value will contribute to the
// same point of resulting CloudPoint
- for (unsigned int cp = 0; cp < static_cast<unsigned int> (input_->points.size ()); ++cp)
+ for (unsigned int cp = 0; cp < static_cast<unsigned int> (input_->size ()); ++cp)
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite (input_->points[cp].x) ||
- !std::isfinite (input_->points[cp].y) ||
- !std::isfinite (input_->points[cp].z))
+ if (!std::isfinite ((*input_)[cp].x) ||
+ !std::isfinite ((*input_)[cp].y) ||
+ !std::isfinite ((*input_)[cp].z))
continue;
// Get the distance value
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&input_->points[cp]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*input_)[cp]);
float distance_value = 0;
memcpy (&distance_value, pt_data + fields[distance_idx].offset, sizeof (float));
continue;
}
- int ijk0 = static_cast<int> (std::floor (input_->points[cp].x * inverse_leaf_size_[0]) - min_b_[0]);
- int ijk1 = static_cast<int> (std::floor (input_->points[cp].y * inverse_leaf_size_[1]) - min_b_[1]);
- int ijk2 = static_cast<int> (std::floor (input_->points[cp].z * inverse_leaf_size_[2]) - min_b_[2]);
+ int ijk0 = static_cast<int> (std::floor ((*input_)[cp].x * inverse_leaf_size_[0]) - min_b_[0]);
+ int ijk1 = static_cast<int> (std::floor ((*input_)[cp].y * inverse_leaf_size_[1]) - min_b_[1]);
+ int ijk2 = static_cast<int> (std::floor ((*input_)[cp].z * inverse_leaf_size_[2]) - min_b_[2]);
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
// First pass: go over all points and insert them into the index_vector vector
// with calculated idx. Points with the same idx value will contribute to the
// same point of resulting CloudPoint
- for (unsigned int cp = 0; cp < static_cast<unsigned int> (input_->points.size ()); ++cp)
+ for (unsigned int cp = 0; cp < static_cast<unsigned int> (input_->size ()); ++cp)
{
if (!input_->is_dense)
// Check if the point is invalid
- if (!std::isfinite (input_->points[cp].x) ||
- !std::isfinite (input_->points[cp].y) ||
- !std::isfinite (input_->points[cp].z))
+ if (!std::isfinite ((*input_)[cp].x) ||
+ !std::isfinite ((*input_)[cp].y) ||
+ !std::isfinite ((*input_)[cp].z))
continue;
- int ijk0 = static_cast<int> (std::floor (input_->points[cp].x * inverse_leaf_size_[0]) - min_b_[0]);
- int ijk1 = static_cast<int> (std::floor (input_->points[cp].y * inverse_leaf_size_[1]) - min_b_[1]);
- int ijk2 = static_cast<int> (std::floor (input_->points[cp].z * inverse_leaf_size_[2]) - min_b_[2]);
+ int ijk0 = static_cast<int> (std::floor ((*input_)[cp].x * inverse_leaf_size_[0]) - min_b_[0]);
+ int ijk1 = static_cast<int> (std::floor ((*input_)[cp].y * inverse_leaf_size_[1]) - min_b_[1]);
+ int ijk2 = static_cast<int> (std::floor ((*input_)[cp].z * inverse_leaf_size_[2]) - min_b_[2]);
// Compute the centroid leaf index
int idx = ijk0 * divb_mul_[0] + ijk1 * divb_mul_[1] + ijk2 * divb_mul_[2];
// calculate centroid - sum values from all input points, that have the same idx value in index_vector array
if (!downsample_all_data_)
{
- centroid[0] = input_->points[index_vector[cp].cloud_point_index].x;
- centroid[1] = input_->points[index_vector[cp].cloud_point_index].y;
- centroid[2] = input_->points[index_vector[cp].cloud_point_index].z;
+ centroid[0] = (*input_)[index_vector[cp].cloud_point_index].x;
+ centroid[1] = (*input_)[index_vector[cp].cloud_point_index].y;
+ centroid[2] = (*input_)[index_vector[cp].cloud_point_index].z;
}
else
{
{
// Fill r/g/b data, assuming that the order is BGRA
pcl::RGB rgb;
- memcpy (&rgb, reinterpret_cast<const char*> (&input_->points[index_vector[cp].cloud_point_index]) + rgba_index, sizeof (RGB));
+ memcpy (&rgb, reinterpret_cast<const char*> (&(*input_)[index_vector[cp].cloud_point_index]) + rgba_index, sizeof (RGB));
centroid[centroid_size-3] = rgb.r;
centroid[centroid_size-2] = rgb.g;
centroid[centroid_size-1] = rgb.b;
if (label_index >= 0)
{
// store the label in a map data structure
- std::uint32_t label = input_->points[index_vector[cp].cloud_point_index].label;
+ std::uint32_t label = (*input_)[index_vector[cp].cloud_point_index].label;
std::map<int, int>::iterator it = labels.find (label);
if (it == labels.end ())
labels.insert (labels.begin (), std::pair<int, int> (label, 1));
it->second = it->second++;
}
- pcl::for_each_type <FieldList> (NdCopyPointEigenFunctor <pcl::PointXYZRGBL> (input_->points[index_vector[cp].cloud_point_index], centroid));
+ pcl::for_each_type <FieldList> (NdCopyPointEigenFunctor <pcl::PointXYZRGBL> ((*input_)[index_vector[cp].cloud_point_index], centroid));
}
unsigned int i = cp + 1;
{
if (!downsample_all_data_)
{
- centroid[0] += input_->points[index_vector[i].cloud_point_index].x;
- centroid[1] += input_->points[index_vector[i].cloud_point_index].y;
- centroid[2] += input_->points[index_vector[i].cloud_point_index].z;
+ centroid[0] += (*input_)[index_vector[i].cloud_point_index].x;
+ centroid[1] += (*input_)[index_vector[i].cloud_point_index].y;
+ centroid[2] += (*input_)[index_vector[i].cloud_point_index].z;
}
else
{
{
// Fill r/g/b data, assuming that the order is BGRA
pcl::RGB rgb;
- memcpy (&rgb, reinterpret_cast<const char*> (&input_->points[index_vector[i].cloud_point_index]) + rgba_index, sizeof (RGB));
+ memcpy (&rgb, reinterpret_cast<const char*> (&(*input_)[index_vector[i].cloud_point_index]) + rgba_index, sizeof (RGB));
temporary[centroid_size-3] = rgb.r;
temporary[centroid_size-2] = rgb.g;
temporary[centroid_size-1] = rgb.b;
}
- pcl::for_each_type <FieldList> (NdCopyPointEigenFunctor <pcl::PointXYZRGBL> (input_->points[index_vector[i].cloud_point_index], temporary));
+ pcl::for_each_type <FieldList> (NdCopyPointEigenFunctor <pcl::PointXYZRGBL> ((*input_)[index_vector[i].cloud_point_index], temporary));
centroid += temporary;
}
++i;
// Do we need to process all the fields?
if (!downsample_all_data_)
{
- output.points[index].x = centroid[0];
- output.points[index].y = centroid[1];
- output.points[index].z = centroid[2];
+ output[index].x = centroid[0];
+ output[index].y = centroid[1];
+ output[index].z = centroid[2];
}
else
{
- pcl::for_each_type<FieldList> (pcl::NdCopyEigenPointFunctor <pcl::PointXYZRGBL> (centroid, output.points[index]));
+ pcl::for_each_type<FieldList> (pcl::NdCopyEigenPointFunctor <pcl::PointXYZRGBL> (centroid, output[index]));
// ---[ RGB special case
if (rgba_index >= 0)
{
// pack r/g/b into rgb
float r = centroid[centroid_size-3], g = centroid[centroid_size-2], b = centroid[centroid_size-1];
int rgb = (static_cast<int> (r) << 16) | (static_cast<int> (g) << 8) | static_cast<int> (b);
- memcpy (reinterpret_cast<char*> (&output.points[index]) + rgba_index, &rgb, sizeof (float));
+ memcpy (reinterpret_cast<char*> (&output[index]) + rgba_index, &rgb, sizeof (float));
}
if (label_index >= 0)
best_label = label.first;
}
}
- output.points[index].label = best_label;
+ output[index].label = best_label;
}
}
cp = i;
++index;
}
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
}
#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
-#include <pcl/filters/voxel_grid_occlusion_estimation.h>
#include <pcl/filters/impl/voxel_grid_occlusion_estimation.hpp>
// Instantiations of specific point types
#include <pcl/gpu/containers/device_memory.h>
#include <pcl/gpu/utils/safe_call.hpp>
-#include "cuda_runtime_api.h"
+#include <cuda_runtime_api.h>
#include <cassert>
#define HAVE_CUDA
}
pcl::io::savePCDFileASCII ("input.pcd", cloud);
- std::cout << "INFO: Saved " << cloud.points.size () << " data points to test_pcd.pcd." << std::endl;
+ std::cout << "INFO: Saved " << cloud.size () << " data points to test_pcd.pcd." << std::endl;
pcl::gpu::Octree::PointCloud cloud_device;
cloud_device.upload(cloud.points);
for (std::size_t j = 0; j < sizes[i] ; ++j)
{
- cloud_result.points.push_back(cloud.points[data[j + i * max_answers]]);
+ cloud_result.points.push_back(cloud[data[j + i * max_answers]]);
std::cout << "INFO: data : " << j << " " << j + i * max_answers << " data " << data[j+ i * max_answers] << std::endl;
}
std::stringstream ss;
ss << "cloud_cluster_" << i << ".pcd";
- cloud_result.width = cloud_result.points.size();
+ cloud_result.width = cloud_result.size();
pcl::io::savePCDFileASCII (ss.str(), cloud_result);
- std::cout << "INFO: Saved " << cloud_result.points.size () << " data points to " << ss.str() << std::endl;
+ std::cout << "INFO: Saved " << cloud_result.size () << " data points to " << ss.str() << std::endl;
}
}
return 0;
/// CPU VERSION
/////////////////////////////////////////////
- std::cout << "INFO: PointCloud_filtered still has " << cloud_filtered->points.size() << " Points " << std::endl;
+ std::cout << "INFO: PointCloud_filtered still has " << cloud_filtered->size() << " Points " << std::endl;
clock_t tStart = clock();
// Creating the KdTree object for the search method of the extraction
pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
- cloud_cluster->points.push_back (cloud_filtered->points[*pit]); //*
- cloud_cluster->width = cloud_cluster->points.size ();
+ cloud_cluster->push_back ((*cloud_filtered)[*pit]); //*
+ cloud_cluster->width = cloud_cluster->size ();
cloud_cluster->height = 1;
cloud_cluster->is_dense = true;
- std::cout << "PointCloud representing the Cluster: " << cloud_cluster->points.size () << " data points." << std::endl;
+ std::cout << "PointCloud representing the Cluster: " << cloud_cluster->size () << " data points." << std::endl;
std::stringstream ss;
ss << "cloud_cluster_" << j << ".pcd";
writer.write<pcl::PointXYZ> (ss.str (), *cloud_cluster, false); //*
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster_gpu (new pcl::PointCloud<pcl::PointXYZ>);
for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit)
- cloud_cluster_gpu->points.push_back (cloud_filtered->points[*pit]); //*
- cloud_cluster_gpu->width = cloud_cluster_gpu->points.size ();
+ cloud_cluster_gpu->push_back ((*cloud_filtered)[*pit]); //*
+ cloud_cluster_gpu->width = cloud_cluster_gpu->size ();
cloud_cluster_gpu->height = 1;
cloud_cluster_gpu->is_dense = true;
- std::cout << "PointCloud representing the Cluster: " << cloud_cluster_gpu->points.size () << " data points." << std::endl;
+ std::cout << "PointCloud representing the Cluster: " << cloud_cluster_gpu->size () << " data points." << std::endl;
std::stringstream ss;
ss << "gpu_cloud_cluster_" << j << ".pcd";
writer.write<pcl::PointXYZ> (ss.str (), *cloud_cluster_gpu, false); //*
void generateColor()
{
- std::size_t cloud_size = cloud->points.size();
- for(std::size_t i = 0; i < cloud_size; ++i)
+ for (auto& p: *cloud)
{
- PointXYZ& p = cloud->points[i];
-
int r = std::max(1, std::min(255, static_cast<int>((double(rand())/RAND_MAX)*255)));
int g = std::max(1, std::min(255, static_cast<int>((double(rand())/RAND_MAX)*255)));
int b = std::max(1, std::min(255, static_cast<int>((double(rand())/RAND_MAX)*255)));
KdTreeFLANN<PointXYZ>::Ptr kdtree(new KdTreeFLANN<PointXYZ>);
kdtree->setInputCloud(cloud);
- std::size_t cloud_size = cloud->points.size();
+ const auto cloud_size = cloud->size();
std::vector<float> dists;
neighbors_all.resize(cloud_size);
for(std::size_t i = 0; i < cloud_size; ++i)
{
- kdtree->nearestKSearch(cloud->points[i], k, neighbors_all[i], dists);
+ kdtree->nearestKSearch((*cloud)[i], k, neighbors_all[i], dists);
sizes.push_back((int)neighbors_all[i].size());
}
max_nn_size = *max_element(sizes.begin(), sizes.end());
KdTreeFLANN<PointXYZ>::Ptr kdtree(new KdTreeFLANN<PointXYZ>);
kdtree->setInputCloud(cloud);
- std::size_t cloud_size = cloud->points.size();
+ const auto cloud_size = cloud->size();
std::vector<float> dists;
neighbors_all.resize(cloud_size);
for(std::size_t i = 0; i < cloud_size; ++i)
{
- kdtree->radiusSearch(cloud->points[i], radius, neighbors_all[i], dists);
+ kdtree->radiusSearch((*cloud)[i], radius, neighbors_all[i], dists);
sizes.push_back((int)neighbors_all[i].size());
}
max_nn_size = *max_element(sizes.begin(), sizes.end());
void generateSurface()
{
- surface->points.clear();
- for(std::size_t i = 0; i < cloud->points.size(); i+= 10)
- surface->points.push_back(cloud->points[i]);
- surface->width = surface->points.size();
+ surface->clear();
+ for(std::size_t i = 0; i < cloud->size(); i+= 10)
+ surface->push_back((*cloud)[i]);
+ surface->width = surface->size();
surface->height = 1;
- if (!normals->points.empty())
+ if (!normals->empty())
{
- normals_surface->points.clear();
- for(std::size_t i = 0; i < normals->points.size(); i+= 10)
- normals_surface->points.push_back(normals->points[i]);
+ normals_surface->clear();
+ for(std::size_t i = 0; i < normals->size(); i+= 10)
+ normals_surface->push_back((*normals)[i]);
- normals_surface->width = surface->points.size();
+ normals_surface->width = surface->size();
normals_surface->height = 1;
}
}
void generateIndices(std::size_t step = 100)
{
indices->clear();
- for(std::size_t i = 0; i < cloud->points.size(); i += step)
+ for(std::size_t i = 0; i < cloud->size(); i += step)
indices->push_back(i);
}
#include "data_source.hpp"
#include <pcl/search/pcl_search.h>
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
std::vector<int> data;
source.getNeghborsArray(data);
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
FPFHSignature33& gpu = downloaded[i];
- FPFHSignature33& cpu = fpfhs.points[i];
+ FPFHSignature33& cpu = fpfhs[i];
std::size_t FSize = sizeof(FPFHSignature33)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
FPFHSignature33& gpu = downloaded[i];
- FPFHSignature33& cpu = fpfhs.points[i];
+ FPFHSignature33& cpu = fpfhs[i];
std::size_t FSize = sizeof(FPFHSignature33)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
FPFHSignature33& gpu = downloaded[i];
- FPFHSignature33& cpu = fpfhs.points[i];
+ FPFHSignature33& cpu = fpfhs[i];
std::size_t FSize = sizeof(FPFHSignature33)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals_surface;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
FPFHSignature33& gpu = downloaded[i];
- FPFHSignature33& cpu = fpfhs.points[i];
+ FPFHSignature33& cpu = fpfhs[i];
std::size_t FSize = sizeof(FPFHSignature33)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals_surface;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
FPFHSignature33& gpu = downloaded[i];
- FPFHSignature33& cpu = fpfhs.points[i];
+ FPFHSignature33& cpu = fpfhs[i];
std::size_t FSize = sizeof(FPFHSignature33)/sizeof(gpu.histogram[0]);
#include <pcl/gpu/containers/initialization.h>
#include <pcl/search/search.h>
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
TEST(PCL_FeaturesGPU, normals_lowlevel)
{
DataSource source;
- std::cout << "Cloud size: " << source.cloud->points.size() << std::endl;
+ std::cout << "Cloud size: " << source.cloud->size() << std::endl;
std::cout << "Radius: " << source.radius << std::endl;
std::cout << "K: " << source.k << std::endl;
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
- Normal n = source.normals->points[i];
+ Normal n = (*source.normals)[i];
PointXYZ xyz = downloaded[i];
float curvature = xyz.data[3];
TEST(PCL_FeaturesGPU, normals_highlevel_1)
{
DataSource source;
- std::cout << "Cloud size: " << source.cloud->points.size() << std::endl;
+ std::cout << "Cloud size: " << source.cloud->size() << std::endl;
std::cout << "Radius: " << source.radius << std::endl;
std::cout << "Max_elems: " << source.max_elements << std::endl;
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
- Normal n = normals->points[i];
+ Normal n = (*normals)[i];
PointXYZ xyz = downloaded[i];
float curvature = xyz.data[3];
TEST(PCL_FeaturesGPU, normals_highlevel_2)
{
DataSource source;
- std::cout << "Cloud size: " << source.cloud->points.size() << std::endl;
+ std::cout << "Cloud size: " << source.cloud->size() << std::endl;
std::cout << "Radius: " << source.radius << std::endl;
std::cout << "Max_elems: " << source.max_elements << std::endl;
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
- Normal n = normals->points[i];
+ Normal n = (*normals)[i];
PointXYZ xyz = downloaded[i];
float curvature = xyz.data[3];
TEST(PCL_FeaturesGPU, normals_highlevel_3)
{
DataSource source;
- std::cout << "Cloud size: " << source.cloud->points.size() << std::endl;
+ std::cout << "Cloud size: " << source.cloud->size() << std::endl;
std::cout << "Radius: " << source.radius << std::endl;
std::cout << "Max_elems: " << source.max_elements << std::endl;
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
- Normal n = normals->points[i];
+ Normal n = (*normals)[i];
PointXYZ xyz = downloaded[i];
float curvature = xyz.data[3];
TEST(PCL_FeaturesGPU, normals_highlevel_4)
{
DataSource source;
- std::cout << "Cloud size: " << source.cloud->points.size() << std::endl;
+ std::cout << "Cloud size: " << source.cloud->size() << std::endl;
std::cout << "Radius: " << source.radius << std::endl;
std::cout << "Max_elems: " << source.max_elements << std::endl;
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
- Normal n = normals->points[i];
+ Normal n = (*normals)[i];
PointXYZ xyz = downloaded[i];
float curvature = xyz.data[3];
#include "data_source.hpp"
#include <pcl/search/search.h>
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
std::vector<int> data;
source.getNeghborsArray(data);
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PFHSignature125& gpu = downloaded[i];
- PFHSignature125& cpu = pfhs.points[i];
+ PFHSignature125& cpu = pfhs[i];
std::size_t FSize = sizeof(PFHSignature125)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PFHSignature125& gpu = downloaded[i];
- PFHSignature125& cpu = fpfhs.points[i];
+ PFHSignature125& cpu = fpfhs[i];
std::size_t FSize = sizeof(PFHSignature125)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PFHSignature125& gpu = downloaded[i];
- PFHSignature125& cpu = fpfhs.points[i];
+ PFHSignature125& cpu = fpfhs[i];
std::size_t FSize = sizeof(PFHSignature125)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals_surface;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PFHSignature125& gpu = downloaded[i];
- PFHSignature125& cpu = fpfhs.points[i];
+ PFHSignature125& cpu = fpfhs[i];
std::size_t FSize = sizeof(PFHSignature125)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals_surface;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PFHSignature125& gpu = downloaded[i];
- PFHSignature125& cpu = fpfhs.points[i];
+ PFHSignature125& cpu = fpfhs[i];
std::size_t FSize = sizeof(PFHSignature125)/sizeof(gpu.histogram[0]);
PointCloud<Normal>::Ptr& normals = source.normals;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(normals->points.begin(), normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
PointCloud<PointXYZRGB>::Ptr cloud_XYZRGB(new PointCloud<PointXYZRGB>());
cloud_XYZRGB->points.clear();
- for(std::size_t i = 0; i < source.cloud->points.size(); ++i)
+ for (const auto& p: *source.cloud)
{
- const PointXYZ& p = source.cloud->points[i];
PointXYZRGB o;
int color = *(int*)&p.data[3];
cloud_XYZRGB->points.push_back(o);
}
- cloud_XYZRGB->width = cloud_XYZRGB->points.size();
+ cloud_XYZRGB->width = cloud_XYZRGB->size();
cloud_XYZRGB->height = 1;
fe.setInputCloud (cloud_XYZRGB);
for(std::size_t i = 170; i < downloaded.size(); ++i)
{
PFHRGBSignature250& gpu = downloaded[i];
- PFHRGBSignature250& cpu = fpfhs.points[i];
+ PFHRGBSignature250& cpu = fpfhs[i];
std::size_t FSize = sizeof(PFHRGBSignature250)/sizeof(gpu.histogram[0]);
#include "data_source.hpp"
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
source.generateIndices();
source.estimateNormals();
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PPFSignature& gpu = downloaded[i];
- PPFSignature& cpu = ppfs.points[i];
+ PPFSignature& cpu = ppfs[i];
ASSERT_NEAR(gpu.f1, cpu.f1, 0.01f);
ASSERT_NEAR(gpu.f2, cpu.f2, 0.01f);
source.generateIndices();
source.estimateNormals();
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
PointCloud<PointXYZRGB>::Ptr cloud_XYZRGB(new PointCloud<PointXYZRGB>());
cloud_XYZRGB->points.clear();
- for(std::size_t i = 0; i < source.cloud->points.size(); ++i)
+ for (const auto& p: *source.cloud)
{
- const PointXYZ& p = source.cloud->points[i];
int color = *(int*)&p.data[3];
int r = color & 0xFF;
int g = (color >> 8) & 0xFF;
o.x = p.x; o.y = p.y; o.z = p.z; o.r = r; o.g = g; o.b = b;
cloud_XYZRGB->points.push_back(o);
}
- cloud_XYZRGB->width = cloud_XYZRGB->points.size();
+ cloud_XYZRGB->width = cloud_XYZRGB->size();
cloud_XYZRGB->height = 1;
for(std::size_t i = 207025; i < downloaded.size(); ++i)
{
PPFRGBSignature& gpu = downloaded[i];
- PPFRGBSignature& cpu = ppfs.points[i];
+ PPFRGBSignature& cpu = ppfs[i];
ASSERT_NEAR(gpu.f1, cpu.f1, 0.01f);
ASSERT_NEAR(gpu.f2, cpu.f2, 0.01f);
source.estimateNormals();
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
PointCloud<PointXYZRGB>::Ptr cloud_XYZRGB(new PointCloud<PointXYZRGB>());
cloud_XYZRGB->points.clear();
- for(std::size_t i = 0; i < source.cloud->points.size(); ++i)
+ for (const auto& p: *source.cloud)
{
- const PointXYZ& p = source.cloud->points[i];
int color = *(int*)&p.data[3];
int r = color & 0xFF;
int g = (color >> 8) & 0xFF;
o.x = p.x; o.y = p.y; o.z = p.z; o.r = r; o.g = g; o.b = b;
cloud_XYZRGB->points.push_back(o);
}
- cloud_XYZRGB->width = cloud_XYZRGB->points.size();
+ cloud_XYZRGB->width = cloud_XYZRGB->size();
cloud_XYZRGB->height = 1;
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PPFRGBSignature& gpu = downloaded[i];
- PPFRGBSignature& cpu = ppfs.points[i];
+ PPFRGBSignature& cpu = ppfs[i];
ASSERT_NEAR(gpu.f1, cpu.f1, 0.01f);
ASSERT_NEAR(gpu.f2, cpu.f2, 0.01f);
#include "data_source.hpp"
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
source.estimateNormals();
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
for(std::size_t i = 0; i < downloaded.size(); ++i)
{
PrincipalCurvatures& gpu = downloaded[i];
- PrincipalCurvatures& cpu = pc.points[i];
+ PrincipalCurvatures& cpu = pc[i];
ASSERT_NEAR(gpu.principal_curvature_x, cpu.principal_curvature_x, 0.01f);
ASSERT_NEAR(gpu.principal_curvature_y, cpu.principal_curvature_y, 0.01f);
#include "data_source.hpp"
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
const int min_beighbours = 15;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
continue;
SpinImage& gpu = downloaded[i];
- SpinImage& cpu = spin_images->points[i];
+ SpinImage& cpu = (*spin_images)[i];
std::size_t FSize = sizeof(SpinImage)/sizeof(gpu.histogram[0]);
const int min_beighbours = 15;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
continue;
SpinImage& gpu = downloaded[i];
- SpinImage& cpu = spin_images->points[i];
+ SpinImage& cpu = (*spin_images)[i];
std::size_t FSize = sizeof(SpinImage)/sizeof(gpu.histogram[0]);
const int min_beighbours = 15;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
continue;
SpinImage& gpu = downloaded[i];
- SpinImage& cpu = spin_images->points[i];
+ SpinImage& cpu = (*spin_images)[i];
std::size_t FSize = sizeof(SpinImage)/sizeof(gpu.histogram[0]);
const int min_beighbours = 15;
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
continue;
SpinImage& gpu = downloaded[i];
- SpinImage& cpu = spin_images->points[i];
+ SpinImage& cpu = (*spin_images)[i];
std::size_t FSize = sizeof(SpinImage)/sizeof(gpu.histogram[0]);
#include "data_source.hpp"
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
source.estimateNormals();
source.generateIndices(3);
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
}
VFHSignature308& gpu = downloaded[0];
- VFHSignature308& cpu = vfh.points[0];
+ VFHSignature308& cpu = vfh[0];
std::size_t FSize = sizeof(VFHSignature308)/sizeof(gpu.histogram[0]);
source.estimateNormals();
source.generateIndices(3);
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
fe.compute (vfh);
VFHSignature308& gpu = downloaded[0];
- VFHSignature308& cpu = vfh.points[0];
+ VFHSignature308& cpu = vfh[0];
std::size_t FSize = sizeof(VFHSignature308)/sizeof(gpu.histogram[0]);
source.estimateNormals();
source.generateIndices(3);
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
fe.compute (vfh);
VFHSignature308& gpu = downloaded[0];
- VFHSignature308& cpu = vfh.points[0];
+ VFHSignature308& cpu = vfh[0];
std::size_t FSize = sizeof(VFHSignature308)/sizeof(gpu.histogram[0]);
source.estimateNormals();
source.generateIndices(3);
- std::vector<PointXYZ> normals_for_gpu(source.normals->points.size());
+ std::vector<PointXYZ> normals_for_gpu(source.normals->size());
std::transform(source.normals->points.begin(), source.normals->points.end(), normals_for_gpu.begin(), DataSource::Normal2PointXYZ());
//uploading data to GPU
fe.compute (vfh);
VFHSignature308& gpu = downloaded[0];
- VFHSignature308& cpu = vfh.points[0];
+ VFHSignature308& cpu = vfh[0];
std::size_t FSize = sizeof(VFHSignature308)/sizeof(gpu.histogram[0]);
#ifndef PCL_GPU_KINFU_CUDA_UTILS_HPP_
#define PCL_GPU_KINFU_CUDA_UTILS_HPP_
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
#include <limits>
#include <cuda.h>
return ptr[tid - lane];
}
- static __forceinline__ __device__ int
+ static __forceinline__ __device__ int
Ballot(int predicate, volatile int* cta_buffer)
- {
+ {
+ pcl::utils::ignore(cta_buffer);
#if CUDA_VERSION >= 9000
- (void)cta_buffer;
- return __ballot_sync (__activemask (), predicate);
+ return __ballot_sync (__activemask (), predicate);
#else
- (void)cta_buffer;
- return __ballot(predicate);
+ return __ballot(predicate);
#endif
}
static __forceinline__ __device__ bool
All(int predicate, volatile int* cta_buffer)
{
+ pcl::utils::ignore(cta_buffer);
#if CUDA_VERSION >= 9000
- (void)cta_buffer;
- return __all_sync (__activemask (), predicate);
+ return __all_sync (__activemask (), predicate);
#else
- (void)cta_buffer;
- return __all(predicate);
+ return __all(predicate);
#endif
}
};
}
}
-#endif /* PCL_GPU_KINFU_CUDA_UTILS_HPP_ */
\ No newline at end of file
+#endif /* PCL_GPU_KINFU_CUDA_UTILS_HPP_ */
#include <opencv2/gpu/gpu.hpp>
#endif
-using namespace std;
using namespace pcl::device;
using namespace pcl::gpu;
PCL_EXPORTS void
mergePointNormal(const DeviceArray<PointXYZ>& cloud, const DeviceArray<Normal>& normals, DeviceArray<PointNormal>& output)
{
- const std::size_t size = min(cloud.size(), normals.size());
+ const std::size_t size = std::min(cloud.size(), normals.size());
output.create(size);
const DeviceArray<float4>& c = (const DeviceArray<float4>&)cloud;
}
}
#undef FETCH
- cloud.width = (int)cloud.points.size ();
+ cloud.width = cloud.size ();
cloud.height = 1;
}
#include "openni_capture.h"
#include <pcl/gpu/containers/initialization.h>
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
using namespace xn;
pcl::gpu::CaptureOpenNI::CaptureOpenNI() : depth_focal_length_VGA (0.f), baseline (0.f), shadow_value (0), no_sample_value (0), pixelSize (0.0), max_depth (0) {}
pcl::gpu::CaptureOpenNI::CaptureOpenNI(int device) {open (device); }
-pcl::gpu::CaptureOpenNI::CaptureOpenNI(const string& filename) {open (filename); }
+pcl::gpu::CaptureOpenNI::CaptureOpenNI(const std::string& filename) {open (filename); }
pcl::gpu::CaptureOpenNI::~CaptureOpenNI() { release (); }
void
#include<iostream>
using namespace pcl::gpu;
-using namespace std;
const float Evaluation::fx = 525.0f;
const float Evaluation::fy = 525.0f;
folder_.push_back('/');
std::cout << "Initializing evaluation from folder: " << folder_ << std::endl;
- string depth_file = folder_ + "depth.txt";
- string rgb_file = folder_ + "rgb.txt";
+ std::string depth_file = folder_ + "depth.txt";
+ std::string rgb_file = folder_ + "rgb.txt";
readFile(depth_file, depth_stamps_and_filenames_);
readFile(rgb_file, rgb_stamps_and_filenames_);
void Evaluation::setMatchFile(const std::string& file)
{
- string full = folder_ + file;
- ifstream iff(full.c_str());
+ std::string full = folder_ + file;
+ std::ifstream iff(full.c_str());
if(!iff)
{
std::cout << "Can't read " << file << std::endl;
}
}
-void Evaluation::readFile(const string& file, std::vector< pair<double,string> >& output)
+void Evaluation::readFile(const std::string& file, std::vector< std::pair<double,string> >& output)
{
char buffer[4096];
- std::vector< pair<double,string> > tmp;
+ std::vector< std::pair<double,string> > tmp;
- ifstream iff(file.c_str());
+ std::ifstream iff(file.c_str());
if(!iff)
{
std::cout << "Can't read " << file << std::endl;
// each line consists of the timestamp and the filename of the depth image
while (!iff.eof())
{
- double time; string name;
+ double time; std::string name;
iff >> time >> name;
- tmp.push_back(make_pair(time, name));
+ tmp.push_back(std::make_pair(time, name));
}
tmp.swap(output);
}
if ( i>= total)
return false;
- string file = folder_ + (accociations_.empty() ? rgb_stamps_and_filenames_[i].second : accociations_[i].name2);
+ std::string file = folder_ + (accociations_.empty() ? rgb_stamps_and_filenames_[i].second : accociations_[i].name2);
cv::Mat bgr = cv::imread(file);
if(bgr.empty())
if ( i>= total)
return false;
- string file = folder_ + (accociations_.empty() ? depth_stamps_and_filenames_[i].second : accociations_[i].name1);
+ std::string file = folder_ + (accociations_.empty() ? depth_stamps_and_filenames_[i].second : accociations_[i].name1);
cv::Mat d_img = cv::imread(file, CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
if(d_img.empty())
if ( i>= accociations_.size())
return false;
- string depth_file = folder_ + accociations_[i].name1;
- string color_file = folder_ + accociations_[i].name2;
+ std::string depth_file = folder_ + accociations_[i].name1;
+ std::string color_file = folder_ + accociations_[i].name2;
cv::Mat d_img = cv::imread(depth_file, CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
if(d_img.empty())
#include "../src/internal.h"
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
using namespace Eigen;
+using namespace std::chrono_literals;
namespace pc = pcl::console;
namespace pcl
scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
- vtkIdType nr_points = vtkIdType (cloud_->points.size ());
+ vtkIdType nr_points = vtkIdType (cloud_->size ());
reinterpret_cast<vtkUnsignedCharArray*>(&(*scalars))->SetNumberOfTuples (nr_points);
unsigned char* colors = reinterpret_cast<vtkUnsignedCharArray*>(&(*scalars))->GetPointer (0);
// Color every point
- if (nr_points != int (rgb_->points.size ()))
+ if (nr_points != static_cast<vtkIdType>(rgb_->size ()))
std::fill (colors, colors + nr_points * 3, static_cast<unsigned char> (0xFF));
else
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
int idx = cp * 3;
- colors[idx + 0] = rgb_->points[cp].r;
- colors[idx + 1] = rgb_->points[cp].g;
- colors[idx + 2] = rgb_->points[cp].b;
+ colors[idx + 0] = (*rgb_)[cp].r;
+ colors[idx + 1] = (*rgb_)[cp].g;
+ colors[idx + 2] = (*rgb_)[cp].b;
}
return (true);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-std::vector<string> getPcdFilesInDir(const string& directory)
+std::vector<std::string> getPcdFilesInDir(const std::string& directory)
{
namespace fs = boost::filesystem;
fs::path dir(directory);
if (directory.empty() || !fs::exists(dir) || !fs::is_directory(dir))
PCL_THROW_EXCEPTION (pcl::IOException, "No valid PCD directory given!\n");
- std::vector<string> result;
+ std::vector<std::string> result;
fs::directory_iterator pos(dir);
fs::directory_iterator end;
pcl::copyPointCloud (points, *merged_ptr);
for (std::size_t i = 0; i < colors.size (); ++i)
- merged_ptr->points[i].rgba = colors.points[i].rgba;
+ (*merged_ptr)[i].rgba = colors[i].rgba;
return merged_ptr;
}
return pcl::PolygonMesh::Ptr ();
pcl::PointCloud<pcl::PointXYZ> cloud;
- cloud.width = (int)triangles.size();
+ cloud.width = triangles.size();
cloud.height = 1;
triangles.download(cloud.points);
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
- std::cout << "\nGetting cloud... " << flush;
+ std::cout << "\nGetting cloud... " << std::flush;
valid_combined_ = false;
kinfu.volume().fetchNormals (extracted, normals_device_);
pcl::gpu::mergePointNormal (extracted, normals_device_, combined_device_);
combined_device_.download (combined_ptr_->points);
- combined_ptr_->width = (int)combined_ptr_->points.size ();
+ combined_ptr_->width = combined_ptr_->size ();
combined_ptr_->height = 1;
valid_combined_ = true;
else
{
extracted.download (cloud_ptr_->points);
- cloud_ptr_->width = (int)cloud_ptr_->points.size ();
+ cloud_ptr_->width = cloud_ptr_->size ();
cloud_ptr_->height = 1;
}
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
- point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
+ point_colors_ptr_->width = point_colors_ptr_->size ();
point_colors_ptr_->height = 1;
}
else
point_colors_ptr_->points.clear();
}
- std::size_t points_size = valid_combined_ ? combined_ptr_->points.size () : cloud_ptr_->points.size ();
- std::cout << "Done. Cloud size: " << points_size / 1000 << "K" << std::endl;
+ const auto size = valid_combined_ ? combined_ptr_->size () : cloud_ptr_->size ();
+ std::cout << "Done. Cloud size: " << size / 1000 << "K" << std::endl;
if (viz_)
{
return;
ScopeTimeT time ("Mesh Extraction");
- std::cout << "\nGetting mesh... " << flush;
+ std::cout << "\nGetting mesh... " << std::flush;
if (!marching_cubes_)
marching_cubes_ = MarchingCubes::Ptr( new MarchingCubes() );
}
void
- toggleEvaluationMode(const string& eval_folder, const string& match_file = string())
+ toggleEvaluationMode(const std::string& eval_folder, const std::string& match_file = std::string())
{
evaluation_ptr_ = Evaluation::Ptr( new Evaluation(eval_folder) );
if (!match_file.empty())
if (scan_volume_)
{
- std::cout << "Downloading TSDF volume from device ... " << flush;
+ std::cout << "Downloading TSDF volume from device ... " << std::flush;
kinfu_.volume().downloadTsdfAndWeighs (tsdf_volume_.volumeWriteable (), tsdf_volume_.weightsWriteable ());
tsdf_volume_.setHeader (Eigen::Vector3i (pcl::device::VOLUME_X, pcl::device::VOLUME_Y, pcl::device::VOLUME_Z), kinfu_.volume().getSize ());
std::cout << "done [" << tsdf_volume_.size () << " voxels]" << std::endl << std::endl;
- std::cout << "Converting volume to TSDF cloud ... " << flush;
+ std::cout << "Converting volume to TSDF cloud ... " << std::flush;
tsdf_volume_.convertToTsdfCloud (tsdf_cloud_ptr_);
std::cout << "done [" << tsdf_cloud_ptr_->size () << " points]" << std::endl << std::endl;
}
std::cout << std::endl << "Volume scan: " << (app->scan_volume_ ? "enabled" : "disabled") << std::endl << std::endl;
break;
case (int)'v': case (int)'V':
- std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << flush;
+ std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << std::flush;
app->tsdf_volume_.save ("tsdf_volume.dat", true);
std::cout << "done [" << app->tsdf_volume_.size () << " voxels]" << std::endl;
- std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << flush;
+ std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << std::flush;
pcl::io::savePCDFile<pcl::PointXYZI> ("tsdf_cloud.pcd", *app->tsdf_cloud_ptr_, true);
std::cout << "done [" << app->tsdf_cloud_ptr_->size () << " points]" << std::endl;
break;
{
if (format == KinFuApp::PCD_BIN)
{
- std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << flush;
+ std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << std::flush;
pcl::io::savePCDFile ("cloud_bin.pcd", *cloud_prt, true);
}
else
if (format == KinFuApp::PCD_ASCII)
{
- std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << std::flush;
pcl::io::savePCDFile ("cloud.pcd", *cloud_prt, false);
}
else /* if (format == KinFuApp::PLY) */
{
- std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << std::flush;
pcl::io::savePLYFileASCII ("cloud.ply", *cloud_prt);
}
{
if (format == KinFuApp::MESH_PLY)
{
- std::cout << "Saving mesh to to 'mesh.ply'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.ply'... " << std::flush;
pcl::io::savePLYFile("mesh.ply", mesh);
}
else /* if (format == KinFuApp::MESH_VTK) */
{
- std::cout << "Saving mesh to to 'mesh.vtk'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.vtk'... " << std::flush;
pcl::io::saveVTKFile("mesh.vtk", mesh);
}
std::cout << "Done" << std::endl;
float fps_pcd = 15.0f;
pc::parse_argument (argc, argv, "-pcd_fps", fps_pcd);
- std::vector<string> pcd_files = getPcdFilesInDir(pcd_dir);
+ std::vector<std::string> pcd_files = getPcdFilesInDir(pcd_dir);
// Sort the read files by name
sort (pcd_files.begin (), pcd_files.end ());
#include <opencv/highgui.h>
//SIMENDSIMENDSIMENDSIMENDSIMENDSIMENDSIMEND
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
using namespace Eigen;
using namespace pcl::console;
using namespace pcl::io;
using namespace pcl::simulation;
-using namespace std;
std::uint16_t t_gamma[2048];
Scene::Ptr scene_;
Camera::Ptr camera_;
// timestamps and displays the elapsed time between them as
// a fraction and time used [for profiling]
void
-display_tic_toc (vector<double> &tic_toc,const string &fun_name)
+display_tic_toc (std::vector<double> &tic_toc,const std::string &fun_name)
{
std::size_t tic_toc_size = tic_toc.size ();
// Save in local frame
range_likelihood_->getPointCloud (pc_out,false,camera_->pose ());
// TODO: what to do when there are more than one simulated view?
- std::cout << pc_out->points.size() << " points written to file\n";
+ std::cout << pc_out->size() << " points written to file\n";
pcl::PCDWriter writer;
//writer.write (point_cloud_fname, *pc_out, false); /// ASCII
//pcl::concatenateFields (points, colors, *merged_ptr); why error?
for (std::size_t i = 0; i < colors.size (); ++i)
- merged_ptr->points[i].rgba = colors.points[i].rgba;
+ (*merged_ptr)[i].rgba = colors[i].rgba;
return merged_ptr;
}
return pcl::PolygonMesh::Ptr();
pcl::PointCloud<pcl::PointXYZ> cloud;
- cloud.width = (int)triangles.size();
+ cloud.width = triangles.size();
cloud.height = 1;
triangles.download(cloud.points);
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
- std::cout << "\nGetting cloud... " << flush;
+ std::cout << "\nGetting cloud... " << std::flush;
valid_combined_ = false;
kinfu.volume().fetchNormals (extracted, normals_device_);
pcl::gpu::mergePointNormal (extracted, normals_device_, combined_device_);
combined_device_.download (combined_ptr_->points);
- combined_ptr_->width = (int)combined_ptr_->points.size ();
+ combined_ptr_->width = combined_ptr_->size ();
combined_ptr_->height = 1;
valid_combined_ = true;
else
{
extracted.download (cloud_ptr_->points);
- cloud_ptr_->width = (int)cloud_ptr_->points.size ();
+ cloud_ptr_->width = cloud_ptr_->size ();
cloud_ptr_->height = 1;
}
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
- point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
+ point_colors_ptr_->width = point_colors_ptr_->size ();
point_colors_ptr_->height = 1;
}
else
point_colors_ptr_->points.clear();
}
- std::size_t points_size = valid_combined_ ? combined_ptr_->points.size () : cloud_ptr_->points.size ();
- std::cout << "Done. Cloud size: " << points_size / 1000 << "K" << std::endl;
+ const auto size = valid_combined_ ? combined_ptr_->size () : cloud_ptr_->size ();
+ std::cout << "Done. Cloud size: " << size / 1000 << "K" << std::endl;
cloud_viewer_.removeAllPointClouds ();
if (valid_combined_)
showMesh(KinfuTracker& kinfu, bool /*integrate_colors*/)
{
ScopeTimeT time ("Mesh Extraction");
- std::cout << "\nGetting mesh... " << flush;
+ std::cout << "\nGetting mesh... " << std::flush;
if (!marching_cubes_)
marching_cubes_ = MarchingCubes::Ptr( new MarchingCubes() );
}
void
- toggleEvaluationMode(const string& eval_folder, const string& match_file = string())
+ toggleEvaluationMode(const std::string& eval_folder, const std::string& match_file = string())
{
evaluation_ptr_ = Evaluation::Ptr( new Evaluation(eval_folder) );
if (!match_file.empty())
// download tsdf volume
{
ScopeTimeT time ("tsdf volume download");
- std::cout << "Downloading TSDF volume from device ... " << flush;
+ std::cout << "Downloading TSDF volume from device ... " << std::flush;
kinfu_.volume().downloadTsdfAndWeighs (tsdf_volume_.volumeWriteable (), tsdf_volume_.weightsWriteable ());
tsdf_volume_.setHeader (Eigen::Vector3i (pcl::device::VOLUME_X, pcl::device::VOLUME_Y, pcl::device::VOLUME_Z), kinfu_.volume().getSize ());
std::cout << "done [" << tsdf_volume_.size () << " voxels]" << std::endl << std::endl;
}
{
ScopeTimeT time ("converting");
- std::cout << "Converting volume to TSDF cloud ... " << flush;
+ std::cout << "Converting volume to TSDF cloud ... " << std::flush;
tsdf_volume_.convertToTsdfCloud (tsdf_cloud_ptr_);
std::cout << "done [" << tsdf_cloud_ptr_->size () << " points]" << std::endl << std::endl;
}
std::cout << std::endl << "Volume scan: " << (app->scan_volume_ ? "enabled" : "disabled") << std::endl << std::endl;
break;
case (int)'v': case (int)'V':
- std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << flush;
+ std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << std::flush;
app->tsdf_volume_.save ("tsdf_volume.dat", true);
std::cout << "done [" << app->tsdf_volume_.size () << " voxels]" << std::endl;
- std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << flush;
+ std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << std::flush;
pcl::io::savePCDFile<pcl::PointXYZI> ("tsdf_cloud.pcd", *app->tsdf_cloud_ptr_, true);
std::cout << "done [" << app->tsdf_cloud_ptr_->size () << " points]" << std::endl;
break;
{
if (format == KinFuApp::PCD_BIN)
{
- std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << flush;
+ std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << std::flush;
pcl::io::savePCDFile ("cloud_bin.pcd", *cloud_prt, true);
}
else
if (format == KinFuApp::PCD_ASCII)
{
- std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << std::flush;
pcl::io::savePCDFile ("cloud.pcd", *cloud_prt, false);
}
else /* if (format == KinFuApp::PLY) */
{
- std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << std::flush;
pcl::io::savePLYFileASCII ("cloud.ply", *cloud_prt);
}
if (format == KinFuApp::MESH_PLY)
{
- std::cout << "Saving mesh to to 'mesh.ply'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.ply'... " << std::flush;
pcl::io::savePLYFile("mesh.ply", mesh);
}
else /* if (format == KinFuApp::MESH_VTK) */
{
- std::cout << "Saving mesh to to 'mesh.vtk'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.vtk'... " << std::flush;
pcl::io::saveVTKFile("mesh.vtk", mesh);
}
std::cout << "Done" << std::endl;
#include "tsdf_volume.h"
#include "tsdf_volume.hpp"
-using namespace std;
namespace pc = pcl::console;
using PointT = pcl::PointXYZ;
using VoxelT = float;
using WeightT = short;
-string cloud_file = "cloud.pcd";
-string volume_file = "tsdf_volume.dat";
+std::string cloud_file = "cloud.pcd";
+std::string volume_file = "tsdf_volume.dat";
double min_trunc_dist = 30.0f;
{
public:
- using Ptr = shared_ptr<DeviceVolume>;
- using ConstPtr = shared_ptr<const DeviceVolume>;
+ using Ptr = std::shared_ptr<DeviceVolume>;
+ using ConstPtr = std::shared_ptr<const DeviceVolume>;
/** \brief Constructor
* param[in] volume_size size of the volume in mm
// truncation distance
Eigen::Vector3f voxel_size = volume_size.array() / volume_res.array().cast<float>();
- trunc_dist_ = max ((float)min_trunc_dist, 2.1f * max (voxel_size[0], max (voxel_size[1], voxel_size[2])));
+ trunc_dist_ = std::max ((float)min_trunc_dist, 2.1f * std::max (voxel_size[0], std::max (voxel_size[1], voxel_size[2])));
};
/** \brief Creates the TSDF volume on the GPU
// write into point cloud structure
device_cloud_buffer.download (cloud->points);
- cloud->width = (int)cloud->points.size ();
+ cloud->width = cloud->size ();
cloud->height = 1;
return true;
// integrate depth in device volume
- pc::print_highlight ("Converting depth map to volume ... "); std::cout << flush;
+ pc::print_highlight ("Converting depth map to volume ... "); std::cout << std::flush;
device_volume->createFromDepth (depth, intr);
// get volume from device
// generating TSDF cloud
- pc::print_highlight ("Generating tsdf volume cloud ... "); std::cout << flush;
+ pc::print_highlight ("Generating tsdf volume cloud ... "); std::cout << std::flush;
pcl::PointCloud<pcl::PointXYZI>::Ptr tsdf_cloud (new pcl::PointCloud<pcl::PointXYZI>);
volume->convertToTsdfCloud (tsdf_cloud);
pc::print_info ("done [%d points]\n", tsdf_cloud->size());
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_volume (new pcl::PointCloud<pcl::PointXYZ>);
if (extract_cloud_volume)
{
- pc::print_highlight ("Generating cloud from volume ... "); std::cout << flush;
+ pc::print_highlight ("Generating cloud from volume ... "); std::cout << std::flush;
if (!device_volume->getCloud (cloud_volume))
{
pc::print_error ("Cloudn't get cloud from device volume!\n");
pc::print_error ("Cloudn't save the volume to file %s.\n", volume_file.c_str());
// TSDF point cloud
- string tsdf_cloud_file (pcl::getFilenameWithoutExtension(volume_file) + "_cloud.pcd");
+ std::string tsdf_cloud_file (pcl::getFilenameWithoutExtension(volume_file) + "_cloud.pcd");
pc::print_info ("Saving volume cloud to "); pc::print_value ("%s", tsdf_cloud_file.c_str()); pc::print_info (" ... ");
if (pcl::io::savePCDFile (tsdf_cloud_file, *tsdf_cloud, true) < 0)
{
// point cloud from volume
if (extract_cloud_volume)
{
- string cloud_volume_file (pcl::getFilenameWithoutExtension(cloud_file) + "_from_volume.pcd");
+ std::string cloud_volume_file (pcl::getFilenameWithoutExtension(cloud_file) + "_from_volume.pcd");
pc::print_info ("Saving cloud from volume to "); pc::print_value ("%s", cloud_volume_file.c_str()); pc::print_info (" ... ");
if (pcl::io::savePCDFile (cloud_volume_file, *cloud_volume, true) < 0)
{
for (int x = 0; x < sx; x+=step, ++cloud_idx)
{
volume_idx = sx*sy*z + sx*y + x;
- // pcl::PointXYZI &point = cloud->points[cloud_idx];
+ // pcl::PointXYZI &point = (*cloud)[cloud_idx];
if (weights_->at(volume_idx) == 0 || volume_->at(volume_idx) > 0.98 )
continue;
shared(cloud, output)
for(int i = 0; i < (int) cloud.points.size (); ++i)
{
- int x = cloud.points[i].x;
- int y = cloud.points[i].y;
- int z = cloud.points[i].z;
+ int x = cloud[i].x;
+ int y = cloud[i].y;
+ int z = cloud[i].z;
if(x > 0 && x < voxels_x_ && y > 0 && y < voxels_y_ && z > 0 && z < voxels_z_)
{
int dst_index = x + voxels_x_ * y + voxels_y_ * voxels_x_ * z;
short2& elem = *reinterpret_cast<short2*> (&output[dst_index]);
- elem.x = static_cast<short> (cloud.points[i].intensity * DIVISOR);
+ elem.x = static_cast<short> (cloud[i].intensity * DIVISOR);
elem.y = static_cast<short> (1);
}
}
}
pcl::PointCloud<pcl::PointXYZ> cloud;
- cloud.width = (int)triangles.size ();
+ cloud.width = triangles.size ();
cloud.height = 1;
triangles.download (cloud.points);
void
pcl::kinfuLS::WorldModel<PointT>::addSlice ( PointCloudPtr new_cloud)
{
- PCL_DEBUG ("Adding new cloud. Current world contains %d points.\n", world_->points.size ());
+ PCL_DEBUG("Adding new cloud. Current world contains %zu points.\n",
+ static_cast<std::size_t>(world_->size()));
- PCL_DEBUG ("New slice contains %d points.\n", new_cloud->points.size ());
+ PCL_DEBUG("New slice contains %zu points.\n",
+ static_cast<std::size_t>(new_cloud->size()));
*world_ += *new_cloud;
- PCL_DEBUG ("World now contains %d points.\n", world_->points.size ());
+ PCL_DEBUG("World now contains %zu points.\n",
+ static_cast<std::size_t>(world_->size()));
}
-
template <typename PointT>
void
pcl::kinfuLS::WorldModel<PointT>::getExistingData(const double previous_origin_x, const double previous_origin_y, const double previous_origin_z, const double offset_x, const double offset_y, const double offset_z, const double volume_x, const double volume_y, const double volume_z, pcl::PointCloud<PointT> &existing_slice)
return;
}
- PCL_INFO ("Getting world as cubes. World contains %d points.\n", world_->points.size ());
+ PCL_INFO("Getting world as cubes. World contains %zu points.\n",
+ static_cast<std::size_t>(world_->size()));
// remove nans from world cloud
world_->is_dense = false;
std::vector<int> indices;
pcl::removeNaNFromPointCloud ( *world_, *world_, indices);
-
- PCL_INFO ("World contains %d points after nan removal.\n", world_->points.size ());
-
+
+ PCL_INFO("World contains %zu points after nan removal.\n",
+ static_cast<std::size_t>(world_->size()));
// check cube size value
double cubeSide = size;
}*/
std::cout << "returning " << cubes.size() << " cubes" << std::endl;
-
}
template <typename PointT>
for (int rii = 0; rii < static_cast<int> (indices->size ()); ++rii) // rii = removed indices iterator
{
- std::uint8_t* pt_data = reinterpret_cast<std::uint8_t*> (&cloud->points[(*indices)[rii]]);
+ std::uint8_t* pt_data = reinterpret_cast<std::uint8_t*> (&(*cloud)[(*indices)[rii]]);
for (const auto &field : fields)
memcpy (pt_data + field.offset, &my_nan, sizeof (float));
}
*/
std::size_t getWorldSize ()
{
- return (world_->points.size () );
+ return (world_->size () );
}
/** \brief Returns the world as two vectors of cubes of size "size" (pointclouds) and transforms
#ifndef PCL_GPU_KINFU_CUDA_UTILS_HPP_
#define PCL_GPU_KINFU_CUDA_UTILS_HPP_
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
#include <limits>
#include <cuda.h>
return ptr[tid - lane];
}
- static __forceinline__ __device__ int
+ static __forceinline__ __device__ int
Ballot(int predicate, volatile int* cta_buffer)
- {
+ {
+ pcl::utils::ignore(cta_buffer);
#if CUDA_VERSION >= 9000
- (void)cta_buffer;
- return __ballot_sync (__activemask (), predicate);
+ return __ballot_sync (__activemask (), predicate);
#else
- (void)cta_buffer;
- return __ballot (predicate);
+ return __ballot (predicate);
#endif
}
static __forceinline__ __device__ bool
All(int predicate, volatile int* cta_buffer)
{
+ pcl::utils::ignore(cta_buffer);
#if CUDA_VERSION >= 9000
- (void)cta_buffer;
- return __all_sync (__activemask (), predicate);
+ return __all_sync (__activemask (), predicate);
#else
- (void)cta_buffer;
- return __all (predicate);
+ return __all (predicate);
#endif
}
};
}
}
-#endif /* PCL_GPU_KINFU_CUDA_UTILS_HPP_ */
\ No newline at end of file
+#endif /* PCL_GPU_KINFU_CUDA_UTILS_HPP_ */
// Retrieving XYZ
points.download (current_slice_xyz->points);
- current_slice_xyz->width = (int) current_slice_xyz->points.size ();
+ current_slice_xyz->width = current_slice_xyz->size ();
current_slice_xyz->height = 1;
// Retrieving intensities
// when tried, this lead to wrong intenisty values being extracted by fetchSliceAsCloud () (padding pbls?)
std::vector<float , Eigen::aligned_allocator<float> > intensities_vector;
intensities.download (intensities_vector);
- current_slice_intensities->points.resize (current_slice_xyz->points.size ());
- for(std::size_t i = 0 ; i < current_slice_intensities->points.size () ; ++i)
- current_slice_intensities->points[i].intensity = intensities_vector[i];
+ current_slice_intensities->resize (current_slice_xyz->size ());
+ for(std::size_t i = 0 ; i < current_slice_intensities->size () ; ++i)
+ (*current_slice_intensities)[i].intensity = intensities_vector[i];
- current_slice_intensities->width = (int) current_slice_intensities->points.size ();
+ current_slice_intensities->width = current_slice_intensities->size ();
current_slice_intensities->height = 1;
// Concatenating XYZ and Intensities
pcl::concatenateFields (*current_slice_xyz, *current_slice_intensities, *current_slice);
- current_slice->width = (int) current_slice->points.size ();
+ current_slice->width = current_slice->size ();
current_slice->height = 1;
// transform the slice from local to global coordinates
//~ #include <opencv2/gpu/gpu.hpp>
#endif
-using namespace std;
using namespace pcl::device::kinfuLS;
using pcl::device::kinfuLS::device_cast;
int cloud_size = 0;
- cloud_size = cyclical_.getWorldModel ()->getWorld ()->points.size();
+ cloud_size = cyclical_.getWorldModel ()->getWorld ()->size();
if (cloud_size <= 0)
{
PCL_EXPORTS void
mergePointNormal(const DeviceArray<PointXYZ>& cloud, const DeviceArray<Normal>& normals, DeviceArray<PointNormal>& output)
{
- const std::size_t size = min(cloud.size(), normals.size());
+ const std::size_t size = std::min(cloud.size(), normals.size());
output.create(size);
const DeviceArray<float4>& c = (const DeviceArray<float4>&)cloud;
}
}
#undef FETCH
- cloud.width = (int)cloud.points.size ();
+ cloud.width = cloud.size ();
cloud.height = 1;
}
void
pcl::gpu::kinfuLS::TsdfVolume::pushSlice (PointCloud<PointXYZI>::Ptr existing_data_cloud, const pcl::gpu::kinfuLS::tsdf_buffer* buffer) const
{
- std::size_t gpu_array_size = existing_data_cloud->points.size ();
+ const auto gpu_array_size = existing_data_cloud->size ();
if(gpu_array_size == 0)
{
return;
}
- const pcl::PointXYZI *first_point_ptr = &(existing_data_cloud->points[0]);
+ const pcl::PointXYZI *first_point_ptr = &((*existing_data_cloud)[0]);
pcl::gpu::DeviceArray<pcl::PointXYZI> cloud_gpu;
cloud_gpu.upload (first_point_ptr, gpu_array_size);
for (int x = 0; x < sx; x+=step, ++cloud_idx)
{
volume_idx = sx*sy*z + sx*y + x;
- // pcl::PointXYZI &point = cloud->points[cloud_idx];
+ // pcl::PointXYZI &point = (*cloud)[cloud_idx];
if (weights_host_->at(volume_idx) == 0 || volume_host_->at(volume_idx) > 0.98 )
continue;
#include "openni_capture.h"
#include <pcl/gpu/containers/initialization.h>
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
using namespace xn;
pcl::gpu::kinfuLS::CaptureOpenNI::CaptureOpenNI() : depth_focal_length_VGA (0.f), baseline (0.f), shadow_value (0), no_sample_value (0), pixelSize (0.0), max_depth (0) {}
pcl::gpu::kinfuLS::CaptureOpenNI::CaptureOpenNI(int device) {open (device); }
-pcl::gpu::kinfuLS::CaptureOpenNI::CaptureOpenNI(const string& filename) {open (filename); }
+pcl::gpu::kinfuLS::CaptureOpenNI::CaptureOpenNI(const std::string& filename) {open (filename); }
pcl::gpu::kinfuLS::CaptureOpenNI::~CaptureOpenNI() { release (); }
void
scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
- vtkIdType nr_points = (int)cloud_->points.size ();
+ vtkIdType nr_points = static_cast<vtkIdType>(cloud_->size ());
reinterpret_cast<vtkUnsignedCharArray*>(&(*scalars))->SetNumberOfTuples (nr_points);
unsigned char* colors = reinterpret_cast<vtkUnsignedCharArray*>(&(*scalars))->GetPointer (0);
// Color every point
- if (nr_points != (int)rgb_->points.size ())
+ if (nr_points != static_cast<vtkIdType>(rgb_->size ()))
std::fill(colors, colors + nr_points * 3, (unsigned char)0xFF);
else
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
int idx = cp * 3;
- colors[idx + 0] = rgb_->points[cp].r;
- colors[idx + 1] = rgb_->points[cp].g;
- colors[idx + 2] = rgb_->points[cp].b;
+ colors[idx + 0] = (*rgb_)[cp].r;
+ colors[idx + 1] = (*rgb_)[cp].g;
+ colors[idx + 2] = (*rgb_)[cp].b;
}
return (true);
}
#include<iostream>
using namespace pcl::gpu;
-using namespace std;
const float Evaluation::fx = 525.0f;
const float Evaluation::fy = 525.0f;
folder_.push_back('/');
std::cout << "Initializing evaluation from folder: " << folder_ << std::endl;
- string depth_file = folder_ + "depth.txt";
- string rgb_file = folder_ + "rgb.txt";
+ std::string depth_file = folder_ + "depth.txt";
+ std::string rgb_file = folder_ + "rgb.txt";
readFile(depth_file, depth_stamps_and_filenames_);
readFile(rgb_file, rgb_stamps_and_filenames_);
void Evaluation::setMatchFile(const std::string& file)
{
- string full = folder_ + file;
- ifstream iff(full.c_str());
+ std::string full = folder_ + file;
+ std::ifstream iff(full.c_str());
if(!iff)
{
std::cout << "Can't read " << file << std::endl;
}
}
-void Evaluation::readFile(const string& file, std::vector< pair<double,string> >& output)
+void Evaluation::readFile(const std::string& file, std::vector< pair<double,string> >& output)
{
char buffer[4096];
std::vector< pair<double,string> > tmp;
- ifstream iff(file.c_str());
+ std::ifstream iff(file.c_str());
if(!iff)
{
std::cout << "Can't read " << file << std::endl;
// each line consists of the timestamp and the filename of the depth image
while (!iff.eof())
{
- double time; string name;
+ double time; std::string name;
iff >> time >> name;
- tmp.push_back(make_pair(time, name));
+ tmp.push_back(std::make_pair(time, name));
}
tmp.swap(output);
}
if ( i>= total)
return false;
- string file = folder_ + (accociations_.empty() ? rgb_stamps_and_filenames_[i].second : accociations_[i].name2);
+ std::string file = folder_ + (accociations_.empty() ? rgb_stamps_and_filenames_[i].second : accociations_[i].name2);
cv::Mat bgr = cv::imread(file);
if(bgr.empty())
if ( i>= total)
return false;
- string file = folder_ + (accociations_.empty() ? depth_stamps_and_filenames_[i].second : accociations_[i].name1);
+ std::string file = folder_ + (accociations_.empty() ? depth_stamps_and_filenames_[i].second : accociations_[i].name1);
cv::Mat d_img = cv::imread(file, CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
if(d_img.empty())
if ( i>= accociations_.size())
return false;
- string depth_file = folder_ + accociations_[i].name1;
- string color_file = folder_ + accociations_[i].name2;
+ std::string depth_file = folder_ + accociations_[i].name1;
+ std::string color_file = folder_ + accociations_[i].name2;
cv::Mat d_img = cv::imread(depth_file, CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
if(d_img.empty())
#include <pcl/gpu/kinfu_large_scale/screenshot_manager.h>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace Eigen;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-std::vector<string> getPcdFilesInDir(const string& directory)
+std::vector<std::string> getPcdFilesInDir(const std::string& directory)
{
namespace fs = boost::filesystem;
fs::path dir(directory);
if (directory.empty() || !fs::exists(dir) || !fs::is_directory(dir))
PCL_THROW_EXCEPTION (pcl::IOException, "No valid PCD directory given!\n");
- std::vector<string> result;
+ std::vector<std::string> result;
fs::directory_iterator pos(dir);
fs::directory_iterator end;
pcl::copyPointCloud (points, *merged_ptr);
for (std::size_t i = 0; i < colors.size (); ++i)
- merged_ptr->points[i].rgba = colors.points[i].rgba;
+ (*merged_ptr)[i].rgba = colors[i].rgba;
return merged_ptr;
}
return pcl::PolygonMesh::Ptr();
pcl::PointCloud<pcl::PointXYZ> cloud;
- cloud.width = (int)triangles.size();
+ cloud.width = triangles.size();
cloud.height = 1;
triangles.download(cloud.points);
}
inline void
- drawCamera (Eigen::Affine3f& pose, const string& name, double r, double g, double b)
+ drawCamera (Eigen::Affine3f& pose, const std::string& name, double r, double g, double b)
{
double focal = 575;
double height = 480;
}
inline void
- removeCamera (const string& name)
+ removeCamera (const std::string& name)
{
cloud_viewer_.removeShape (name);
std::stringstream ss;
void
displayICPState (KinfuTracker& kinfu, bool was_lost_)
{
- string name = "last_good_track";
- string name_estimate = "last_good_estimate";
+ std::string name = "last_good_track";
+ std::string name_estimate = "last_good_estimate";
if (was_lost_ && !kinfu.icpIsLost ()) //execute only when ICP just recovered (i.e. was_lost_ == true && icpIsLost == false)
{
removeCamera(name);
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
- std::cout << "\nGetting cloud... " << flush;
+ std::cout << "\nGetting cloud... " << std::flush;
valid_combined_ = false;
kinfu.volume().fetchNormals (extracted, normals_device_);
pcl::gpu::kinfuLS::mergePointNormal (extracted, normals_device_, combined_device_);
combined_device_.download (combined_ptr_->points);
- combined_ptr_->width = (int)combined_ptr_->points.size ();
+ combined_ptr_->width = combined_ptr_->size ();
combined_ptr_->height = 1;
valid_combined_ = true;
else
{
extracted.download (cloud_ptr_->points);
- cloud_ptr_->width = (int)cloud_ptr_->points.size ();
+ cloud_ptr_->width = cloud_ptr_->size ();
cloud_ptr_->height = 1;
}
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
- point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
+ point_colors_ptr_->width = point_colors_ptr_->size ();
point_colors_ptr_->height = 1;
}
else
point_colors_ptr_->points.clear();
}
- std::size_t points_size = valid_combined_ ? combined_ptr_->points.size () : cloud_ptr_->points.size ();
- std::cout << "Done. Cloud size: " << points_size / 1000 << "K" << std::endl;
+ const auto size = valid_combined_ ? combined_ptr_->size () : cloud_ptr_->size ();
+ std::cout << "Done. Cloud size: " << size / 1000 << "K" << std::endl;
cloud_viewer_.removeAllPointClouds ();
if (valid_combined_)
showMesh(KinfuTracker& kinfu, bool /*integrate_colors*/)
{
ScopeTimeT time ("Mesh Extraction");
- std::cout << "\nGetting mesh... " << flush;
+ std::cout << "\nGetting mesh... " << std::flush;
if (!marching_cubes_)
marching_cubes_ = MarchingCubes::Ptr( new MarchingCubes() );
}
void
- toggleEvaluationMode(const string& eval_folder, const string& match_file = string())
+ toggleEvaluationMode(const std::string& eval_folder, const std::string& match_file = std::string())
{
evaluation_ptr_ = Evaluation::Ptr( new Evaluation(eval_folder) );
if (!match_file.empty())
if (scan_volume_)
{
- std::cout << "Downloading TSDF volume from device ... " << flush;
+ std::cout << "Downloading TSDF volume from device ... " << std::flush;
// kinfu_->volume().downloadTsdfAndWeighs (tsdf_volume_.volumeWriteable (), tsdf_volume_.weightsWriteable ());
kinfu_->volume ().downloadTsdfAndWeightsLocal ();
// tsdf_volume_.setHeader (Eigen::Vector3i (pcl::device::kinfuLS::VOLUME_X, pcl::device::kinfuLS::VOLUME_Y, pcl::device::kinfuLS::VOLUME_Z), kinfu_->volume().getSize ());
// std::cout << "done [" << tsdf_volume_.size () << " voxels]" << std::endl << std::endl;
std::cout << "done [" << kinfu_->volume ().size () << " voxels]" << std::endl << std::endl;
- std::cout << "Converting volume to TSDF cloud ... " << flush;
+ std::cout << "Converting volume to TSDF cloud ... " << std::flush;
// tsdf_volume_.convertToTsdfCloud (tsdf_cloud_ptr_);
kinfu_->volume ().convertToTsdfCloud (tsdf_cloud_ptr_);
// std::cout << "done [" << tsdf_cloud_ptr_->size () << " points]" << std::endl << std::endl;
std::cout << std::endl << "Volume scan: " << (app->scan_volume_ ? "enabled" : "disabled") << std::endl << std::endl;
break;
case (int)'v': case (int)'V':
- std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << flush;
+ std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << std::flush;
// app->tsdf_volume_.save ("tsdf_volume.dat", true);
app->kinfu_->volume ().save ("tsdf_volume.dat", true);
// std::cout << "done [" << app->tsdf_volume_.size () << " voxels]" << std::endl;
std::cout << "done [" << app->kinfu_->volume ().size () << " voxels]" << std::endl;
- std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << flush;
+ std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << std::flush;
pcl::io::savePCDFile<pcl::PointXYZI> ("tsdf_cloud.pcd", *app->tsdf_cloud_ptr_, true);
std::cout << "done [" << app->tsdf_cloud_ptr_->size () << " points]" << std::endl;
break;
{
if (format == KinFuLSApp::PCD_BIN)
{
- std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << flush;
+ std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << std::flush;
pcl::io::savePCDFile ("cloud_bin.pcd", *cloud_prt, true);
}
else if (format == KinFuLSApp::PCD_ASCII)
{
- std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << std::flush;
pcl::io::savePCDFile ("cloud.pcd", *cloud_prt, false);
}
else /* if (format == KinFuLSApp::PLY) */
{
- std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << std::flush;
pcl::io::savePLYFileASCII ("cloud.ply", *cloud_prt);
}
{
if (format == KinFuLSApp::MESH_PLY)
{
- std::cout << "Saving mesh to to 'mesh.ply'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.ply'... " << std::flush;
pcl::io::savePLYFile("mesh.ply", mesh);
}
else /* if (format == KinFuLSApp::MESH_VTK) */
{
- std::cout << "Saving mesh to to 'mesh.vtk'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.vtk'... " << std::flush;
pcl::io::saveVTKFile("mesh.vtk", mesh);
}
std::cout << "Done" << std::endl;
float fps_pcd = 15.0f;
pc::parse_argument (argc, argv, "-pcd_fps", fps_pcd);
- std::vector<string> pcd_files = getPcdFilesInDir(pcd_dir);
+ std::vector<std::string> pcd_files = getPcdFilesInDir(pcd_dir);
// Sort the read files by name
sort (pcd_files.begin (), pcd_files.end ());
capture.reset (new pcl::PCDGrabber<pcl::PointXYZRGBA> (pcd_files, fps_pcd, false));
#include <opencv/highgui.h>
//SIMENDSIMENDSIMENDSIMENDSIMENDSIMENDSIMEND
-using namespace std;
using namespace pcl;
using namespace pcl::gpu;
using namespace Eigen;
using namespace pcl::console;
using namespace pcl::io;
using namespace pcl::simulation;
-using namespace std;
std::uint16_t t_gamma[2048];
Scene::Ptr scene_;
Camera::Ptr camera_;
// timestamps and displays the elapsed time between them as
// a fraction and time used [for profiling]
void
-display_tic_toc (vector<double> &tic_toc,const string &fun_name)
+display_tic_toc (vector<double> &tic_toc,const std::string &fun_name)
{
std::size_t tic_toc_size = tic_toc.size ();
// Save in local frame
range_likelihood_->getPointCloud (pc_out,false,camera_->pose ());
// TODO: what to do when there are more than one simulated view?
- std::cout << pc_out->points.size() << " points written to file\n";
+ std::cout << pc_out->size() << " points written to file\n";
pcl::PCDWriter writer;
//writer.write (point_cloud_fname, *pc_out, false); /// ASCII
//pcl::concatenateFields (points, colors, *merged_ptr); why error?
for (std::size_t i = 0; i < colors.size (); ++i)
- merged_ptr->points[i].rgba = colors.points[i].rgba;
+ (*merged_ptr)[i].rgba = colors[i].rgba;
return merged_ptr;
}
return PolygonMesh::Ptr ();
pcl::PointCloud<pcl::PointXYZ> cloud;
- cloud.width = (int)triangles.size();
+ cloud.width = triangles.size();
cloud.height = 1;
triangles.download(cloud.points);
viewer_pose_ = kinfu.getCameraPose();
ScopeTimeT time ("PointCloud Extraction");
- std::cout << "\nGetting cloud... " << flush;
+ std::cout << "\nGetting cloud... " << std::flush;
valid_combined_ = false;
kinfu.volume().fetchNormals (extracted, normals_device_);
pcl::gpu::mergePointNormal (extracted, normals_device_, combined_device_);
combined_device_.download (combined_ptr_->points);
- combined_ptr_->width = (int)combined_ptr_->points.size ();
+ combined_ptr_->width = combined_ptr_->size ();
combined_ptr_->height = 1;
valid_combined_ = true;
else
{
extracted.download (cloud_ptr_->points);
- cloud_ptr_->width = (int)cloud_ptr_->points.size ();
+ cloud_ptr_->width = cloud_ptr_->size ();
cloud_ptr_->height = 1;
}
{
kinfu.colorVolume().fetchColors(extracted, point_colors_device_);
point_colors_device_.download(point_colors_ptr_->points);
- point_colors_ptr_->width = (int)point_colors_ptr_->points.size ();
+ point_colors_ptr_->width = point_colors_ptr_->size ();
point_colors_ptr_->height = 1;
}
else
point_colors_ptr_->points.clear();
}
- std::size_t points_size = valid_combined_ ? combined_ptr_->points.size () : cloud_ptr_->points.size ();
- std::cout << "Done. Cloud size: " << points_size / 1000 << "K" << std::endl;
+ const auto size = valid_combined_ ? combined_ptr_->size () : cloud_ptr_->size ();
+ std::cout << "Done. Cloud size: " << size / 1000 << "K" << std::endl;
cloud_viewer_.removeAllPointClouds ();
if (valid_combined_)
showMesh(KinfuTracker& kinfu, bool /*integrate_colors*/)
{
ScopeTimeT time ("Mesh Extraction");
- std::cout << "\nGetting mesh... " << flush;
+ std::cout << "\nGetting mesh... " << std::flush;
if (!marching_cubes_)
marching_cubes_ = MarchingCubes::Ptr( new MarchingCubes() );
}
void
- toggleEvaluationMode(const string& eval_folder, const string& match_file = string())
+ toggleEvaluationMode(const std::string& eval_folder, const std::string& match_file = std::string())
{
evaluation_ptr_ = Evaluation::Ptr( new Evaluation(eval_folder) );
if (!match_file.empty())
// download tsdf volume
{
ScopeTimeT time ("tsdf volume download");
- std::cout << "Downloading TSDF volume from device ... " << flush;
+ std::cout << "Downloading TSDF volume from device ... " << std::flush;
// kinfu_.volume().downloadTsdfAndWeighs (tsdf_volume_.volumeWriteable (), tsdf_volume_.weightsWriteable ());
kinfu_.volume ().downloadTsdfAndWeighsLocal ();
// tsdf_volume_.setHeader (Eigen::Vector3i (pcl::device::VOLUME_X, pcl::device::VOLUME_Y, pcl::device::VOLUME_Z), kinfu_.volume().getSize ());
}
{
ScopeTimeT time ("converting");
- std::cout << "Converting volume to TSDF cloud ... " << flush;
+ std::cout << "Converting volume to TSDF cloud ... " << std::flush;
// tsdf_volume_.convertToTsdfCloud (tsdf_cloud_ptr_);
kinfu_.volume ().convertToTsdfCloud (tsdf_cloud_ptr_);
std::cout << "done [" << tsdf_cloud_ptr_->size () << " points]" << std::endl << std::endl;
std::cout << std::endl << "Volume scan: " << (app->scan_volume_ ? "enabled" : "disabled") << std::endl << std::endl;
break;
case (int)'v': case (int)'V':
- std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << flush;
+ std::cout << "Saving TSDF volume to tsdf_volume.dat ... " << std::flush;
// app->tsdf_volume_.save ("tsdf_volume.dat", true);
app->kinfu_.volume ().save ("tsdf_volume.dat", true);
//std::cout << "done [" << app->tsdf_volume_.size () << " voxels]" << std::endl;
std::cout << "done [" << app->app->kinfu_.volume ().size () << " voxels]" << std::endl;
- std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << flush;
+ std::cout << "Saving TSDF volume cloud to tsdf_cloud.pcd ... " << std::flush;
pcl::io::savePCDFile<pcl::PointXYZI> ("tsdf_cloud.pcd", *app->tsdf_cloud_ptr_, true);
std::cout << "done [" << app->tsdf_cloud_ptr_->size () << " points]" << std::endl;
break;
{
if (format == KinFuApp::PCD_BIN)
{
- std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << flush;
+ std::cout << "Saving point cloud to 'cloud_bin.pcd' (binary)... " << std::flush;
pcl::io::savePCDFile ("cloud_bin.pcd", *cloud_prt, true);
}
else
if (format == KinFuApp::PCD_ASCII)
{
- std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.pcd' (ASCII)... " << std::flush;
pcl::io::savePCDFile ("cloud.pcd", *cloud_prt, false);
}
else /* if (format == KinFuApp::PLY) */
{
- std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << flush;
+ std::cout << "Saving point cloud to 'cloud.ply' (ASCII)... " << std::flush;
pcl::io::savePLYFileASCII ("cloud.ply", *cloud_prt);
}
if (format == KinFuApp::MESH_PLY)
{
- std::cout << "Saving mesh to to 'mesh.ply'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.ply'... " << std::flush;
pcl::io::savePLYFile("mesh.ply", mesh);
}
else /* if (format == KinFuApp::MESH_VTK) */
{
- std::cout << "Saving mesh to to 'mesh.vtk'... " << flush;
+ std::cout << "Saving mesh to to 'mesh.vtk'... " << std::flush;
pcl::io::saveVTKFile("mesh.vtk", mesh);
}
std::cout << "Done" << std::endl;
#include "tsdf_volume.h"
#include "tsdf_volume.hpp"
-using namespace std;
namespace pc = pcl::console;
using PointT = pcl::PointXYZ;
using VoxelT = float;
using WeightT = short;
-string cloud_file = "cloud.pcd";
-string volume_file = "tsdf_volume.dat";
+std::string cloud_file = "cloud.pcd";
+std::string volume_file = "tsdf_volume.dat";
double min_trunc_dist = 30.0f;
// write into point cloud structure
device_cloud_buffer.download (cloud->points);
- cloud->width = (int)cloud->points.size ();
+ cloud->width = cloud->size ();
cloud->height = 1;
return true;
// integrate depth in device volume
- pc::print_highlight ("Converting depth map to volume ... "); std::cout << flush;
+ pc::print_highlight ("Converting depth map to volume ... "); std::cout << std::flush;
device_volume->createFromDepth (depth, intr);
// get volume from device
// generating TSDF cloud
- pc::print_highlight ("Generating tsdf volume cloud ... "); std::cout << flush;
+ pc::print_highlight ("Generating tsdf volume cloud ... "); std::cout << std::flush;
pcl::PointCloud<pcl::PointXYZI>::Ptr tsdf_cloud (new pcl::PointCloud<pcl::PointXYZI>);
volume->convertToTsdfCloud (tsdf_cloud);
pc::print_info ("done [%d points]\n", tsdf_cloud->size());
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_volume (new pcl::PointCloud<pcl::PointXYZ>);
if (extract_cloud_volume)
{
- pc::print_highlight ("Generating cloud from volume ... "); std::cout << flush;
+ pc::print_highlight ("Generating cloud from volume ... "); std::cout << std::flush;
if (!device_volume->getCloud (cloud_volume))
{
pc::print_error ("Cloudn't get cloud from device volume!\n");
pc::print_error ("Cloudn't save the volume to file %s.\n", volume_file.c_str());
// TSDF point cloud
- string tsdf_cloud_file (pcl::getFilenameWithoutExtension(volume_file) + "_cloud.pcd");
+ std::string tsdf_cloud_file (pcl::getFilenameWithoutExtension(volume_file) + "_cloud.pcd");
pc::print_info ("Saving volume cloud to "); pc::print_value ("%s", tsdf_cloud_file.c_str()); pc::print_info (" ... ");
if (pcl::io::savePCDFile (tsdf_cloud_file, *tsdf_cloud, true) < 0)
{
// point cloud from volume
if (extract_cloud_volume)
{
- string cloud_volume_file (pcl::getFilenameWithoutExtension(cloud_file) + "_from_volume.pcd");
+ std::string cloud_volume_file (pcl::getFilenameWithoutExtension(cloud_file) + "_from_volume.pcd");
pc::print_info ("Saving cloud from volume to "); pc::print_value ("%s", cloud_volume_file.c_str()); pc::print_info (" ... ");
if (pcl::io::savePCDFile (cloud_volume_file, *cloud_volume, true) < 0)
{
/** \brief Helper function that reads a camera file outputted by Kinfu */
bool readCamPoseFile(std::string filename, pcl::TextureMapping<pcl::PointXYZ>::Camera &cam)
{
- ifstream myReadFile;
- myReadFile.open(filename.c_str (), ios::in);
+ std::ifstream myReadFile;
+ myReadFile.open(filename.c_str (), std::ios::in);
if(!myReadFile.is_open ())
{
PCL_ERROR ("Error opening file %d\n", filename.c_str ());
polygon_1[i] = triangles.polygons[i];
}
mesh.tex_polygons.push_back(polygon_1);
- PCL_INFO ("\tInput mesh contains %d faces and %d vertices\n", mesh.tex_polygons[0].size (), cloud->points.size ());
+ PCL_INFO("\tInput mesh contains %zu faces and %zu vertices\n",
+ mesh.tex_polygons[0].size(),
+ static_cast<std::size_t>(cloud->size()));
PCL_INFO ("...Done.\n");
// Load textures and cameras poses and intrinsics
my_cams.push_back (cam);
}
}
- PCL_INFO ("\tLoaded %d textures.\n", my_cams.size ());
+ PCL_INFO ("\tLoaded %zu textures.\n", my_cams.size ());
PCL_INFO ("...Done.\n");
// Display cameras to user
PCL_INFO ("Sorting faces by cameras done.\n");
for(std::size_t i = 0 ; i <= my_cams.size() ; ++i)
{
- PCL_INFO ("\tSub mesh %d contains %d faces and %d UV coordinates.\n", i, mesh.tex_polygons[i].size (), mesh.tex_coordinates[i].size ());
+ PCL_INFO ("\tSub mesh %zu contains %zu faces and %zu UV coordinates.\n", i, mesh.tex_polygons[i].size (), mesh.tex_coordinates[i].size ());
}
# Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}" ${incs})
-
-add_subdirectory(test)
#include "cuda.h"
-using namespace std;
using namespace thrust;
namespace pcl
#include "utils/boxutils.hpp"
#include "utils/bitonic_sort.hpp"
#include "octree_iterator.hpp"
+#include <tuple>
namespace pcl { namespace device { namespace knn_search
{
OctreeIterator iterator;
- __device__ __forceinline__ Warp_knnSearch(const Batch& batch_arg, int query_index_arg)
+ __device__ __forceinline__ Warp_knnSearch(const Batch& batch_arg, const int query_index_arg)
: batch(batch_arg), query_index(query_index_arg), min_distance(std::numeric_limits<float>::max()), min_idx(0), iterator(batch.octree) { }
__device__ __forceinline__ void launch(bool active)
__device__ __forceinline__ int examineNode(OctreeIterator& iterator)
{
- int node_idx = *iterator;
- int code = batch.octree.codes[node_idx];
+ const int node_idx = *iterator;
+ const int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
}
//need to go to next level
- int node = batch.octree.nodes[node_idx];
- int children_mask = node & 0xFF;
- bool isLeaf = children_mask == 0;
+ const int node = batch.octree.nodes[node_idx];
+ const int children_mask = node & 0xFF;
+ const bool isLeaf = children_mask == 0;
if (isLeaf)
{
}
//goto next level
- int first = node >> 8;
- int len = __popc(children_mask);
+ const int first = node >> 8;
+ const int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
- __device__ __forceinline__ void processLeaf(int node_idx)
+ __device__ __forceinline__ void processLeaf(const int node_idx)
{
int mask = __ballot_sync(0xFFFFFFFF, node_idx != -1);
- unsigned int laneId = Warp::laneId();
- unsigned int warpId = Warp::id();
+ const unsigned int laneId = Warp::laneId();
__shared__ volatile int per_warp_buffer[KernelPolicy::WARPS_COUNT];
while(mask)
{
- int active_lane = __ffs(mask) - 1; //[0..31]
- mask &= ~(1 << active_lane);
+ const int active_lane = __ffs(mask) - 1; //[0..31]
+ mask &= ~(1 << active_lane);
- volatile int* warp_buffer = &per_warp_buffer[warpId];
-
- //broadcast beg
+ //broadcast beg and end
+ int fbeg, fend;
if (active_lane == laneId)
- *warp_buffer = batch.octree.begs[node_idx];
- int beg = *warp_buffer;
+ {
+ fbeg = batch.octree.begs[node_idx];
+ fend = batch.octree.ends[node_idx];
+ }
+ const int beg = __shfl_sync(0xFFFFFFFF, fbeg, active_lane);
+ const int end = __shfl_sync(0xFFFFFFFF, fend, active_lane);
- //broadcast end
- if (active_lane == laneId)
- *warp_buffer = batch.octree.ends[node_idx];
- int end = *warp_buffer;
-
- float3 active_query;
- volatile float* warp_buffer_float = (float*)&per_warp_buffer[warpId];
//broadcast warp_query
- if (active_lane == laneId)
- *warp_buffer_float = query.x;
- active_query.x = *warp_buffer_float;
+ const float3 active_query = make_float3(
+ __shfl_sync(0xFFFFFFFF, query.x, active_lane),
+ __shfl_sync(0xFFFFFFFF, query.y, active_lane),
+ __shfl_sync(0xFFFFFFFF, query.z, active_lane)
+ );
- if (active_lane == laneId)
- *warp_buffer_float = query.y;
- active_query.y = *warp_buffer_float;
-
- if (active_lane == laneId)
- *warp_buffer_float = query.z;
- active_query.z = *warp_buffer_float;
+ const auto nearestPoint = NearestWarpKernel<KernelPolicy::CTA_SIZE>(beg, batch.points_step, end - beg, active_query);
- //broadcast query_index
if (active_lane == laneId)
- *warp_buffer = query_index;
- float active_query_index = *warp_buffer;
-
- float dist;
- int offset = NearestWarpKernel<KernelPolicy::CTA_SIZE>(batch.points + beg, batch.points_step, end - beg, active_query, dist);
-
- if (active_lane == laneId)
- if (min_distance > dist)
+ if (min_distance > nearestPoint.second)
{
- min_distance = dist;
- min_idx = beg + offset;
- }
+ min_distance = nearestPoint.second;
+ min_idx = beg + nearestPoint.first;
+ }
}
}
- template<int CTA_SIZE>
- __device__ __forceinline__ int NearestWarpKernel(const float* points, int points_step, int length, const float3& active_query, float& dist)
- {
- __shared__ volatile float dist2[CTA_SIZE];
- __shared__ volatile int index[CTA_SIZE];
-
- int tid = threadIdx.x;
- dist2[tid] = std::numeric_limits<float>::max();
-
- //serial step
- for (int idx = Warp::laneId(); idx < length; idx += Warp::STRIDE)
- {
- float dx = points[idx ] - active_query.x;
- float dy = points[idx + points_step ] - active_query.y;
- float dz = points[idx + points_step * 2] - active_query.z;
-
- float d2 = dx * dx + dy * dy + dz * dz;
-
- if (dist2[tid] > d2)
- {
- dist2[tid] = d2;
- index[tid] = idx;
- }
- }
- //parallel step
- unsigned int lane = Warp::laneId();
-
- float mind2 = dist2[tid];
-
- if (lane < 16)
- {
- float next = dist2[tid + 16];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 16];
- }
- }
-
- if (lane < 8)
- {
- float next = dist2[tid + 8];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 8];
- }
- }
-
- if (lane < 4)
- {
- float next = dist2[tid + 4];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 4];
- }
- }
-
- if (lane < 2)
- {
- float next = dist2[tid + 2];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 2];
- }
- }
-
- if (lane < 1)
- {
- float next = dist2[tid + 1];
- if (mind2 > next)
- {
- dist2[tid] = mind2 = next;
- index[tid] = index[tid + 1];
- }
- }
-
- dist = sqrt(dist2[tid - lane]);
- return index[tid - lane];
- }
+ template <int CTA_SIZE>
+ __device__ std::pair<int, float>
+ NearestWarpKernel(const int beg,
+ const int field_step,
+ const int length,
+ const float3& active_query)
+
+ {
+ int index = 0;
+ float dist2 = std::numeric_limits<float>::max();
+
+ // serial step
+ for (int idx = Warp::laneId(); idx < length; idx += Warp::STRIDE) {
+ const float dx = batch.points[beg + idx] - active_query.x;
+ const float dy = batch.points[beg + idx + field_step] - active_query.y;
+ const float dz = batch.points[beg + idx + field_step * 2] - active_query.z;
+
+ const float d2 = dx * dx + dy * dy + dz * dz;
+
+ if (dist2 > d2) {
+ dist2 = d2;
+ index = idx;
+ }
+ }
+
+ // find minimum distance among warp threads
+ constexpr unsigned FULL_MASK = 0xFFFFFFFF;
+ static_assert(KernelPolicy::WARP_SIZE <= 8*sizeof(unsigned int), "WARP_SIZE exceeds size of bit_offset.");
+
+ for (unsigned int bit_offset = KernelPolicy::WARP_SIZE / 2; bit_offset > 0;
+ bit_offset /= 2) {
+ const float next = __shfl_down_sync(FULL_MASK, dist2, bit_offset);
+ const int next_index = __shfl_down_sync(FULL_MASK, index, bit_offset);
+
+ if (dist2 > next) {
+ dist2 = next;
+ index = next_index;
+ }
+ }
+
+ // retrieve index and distance
+ index = __shfl_sync(FULL_MASK, index, 0);
+ const float dist = sqrt(__shfl_sync(FULL_MASK, dist2, 0));
+
+ return std::make_pair(index, dist);
+ }
};
__global__ void KernelKNN(const Batch batch)
{
- int query_index = blockIdx.x * blockDim.x + threadIdx.x;
+ const int query_index = blockIdx.x * blockDim.x + threadIdx.x;
- bool active = query_index < batch.queries_num;
+ const bool active = query_index < batch.queries_num;
if (__all_sync(0xFFFFFFFF, active == false))
return;
using namespace pcl::gpu;
using namespace pcl::device;
-using namespace std;
namespace pcl
{
int beg = host_octree.begs[node_idx];
int end = host_octree.ends[node_idx];
- end = beg + min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
+ end = beg + std::min<int>((int)out.size() + end - beg, max_nn) - (int)out.size();
out.insert(out.end(), host_octree.indices.begin() + beg, host_octree.indices.begin() + end);
if (out.size() == (std::size_t)max_nn)
struct DirectQuery
{
PtrSz<PointType> queries;
- __device__ __forceinline__ float3 fetch(int query_index) const
+ __device__ __forceinline__ float3 fetch(const int query_index) const
{
- PointType q = queries.data[query_index];
+ const PointType& q = queries.data[query_index];
return make_float3(q.x, q.y, q.z);
}
};
struct IndicesQuery : public DirectQuery
{
const int* queries_indices;
- __device__ __forceinline__ float3 fetch(int query_index) const
+ __device__ __forceinline__ float3 fetch(const int query_index) const
{
- PointType q = queries[queries_indices[query_index]];
+ const PointType& q = queries[queries_indices[query_index]];
return make_float3(q.x, q.y, q.z);
}
};
struct SharedRadius
{
float radius;
- __device__ __forceinline__ float getRadius(int /*index*/) const { return radius; }
- __device__ __forceinline__ float bradcastRadius2(float* /*ptr*/, bool /*active*/, float& /*radius_reg*/) const
- {
- return radius * radius;
- }
+ __device__ __forceinline__ float getRadius(const int /*index*/) const { return radius; }
};
struct IndividualRadius
{
const float* radiuses;
- __device__ __forceinline__ float getRadius(int index) const { return radiuses[index]; }
- __device__ __forceinline__ float bradcastRadius2(float* ptr, bool active, float& radius_reg) const
- {
- if (active)
- *ptr = radius_reg * radius_reg;
- return *ptr;
- }
+ __device__ __forceinline__ float getRadius(const int index) const { return radiuses[index]; }
};
struct KernelPolicy
float3 query;
float radius;
- __device__ __forceinline__ Warp_radiusSearch(const BatchType& batch_arg, int query_index_arg)
+ __device__ __forceinline__ Warp_radiusSearch(const BatchType& batch_arg, const int query_index_arg)
: batch(batch_arg), iterator(/**/batch.octree/*storage.paths*/), found_count(0), query_index(query_index_arg){}
__device__ __forceinline__ void launch(bool active)
{
using namespace pcl::gpu;
- int node_idx = *iterator;
- int code = batch.octree.codes[node_idx];
+ const int node_idx = *iterator;
+ const int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
}
//need to go to next level
- int node = batch.octree.nodes[node_idx];
- int children_mask = node & 0xFF;
- bool isLeaf = children_mask == 0;
+ const int node = batch.octree.nodes[node_idx];
+ const int children_mask = node & 0xFF;
+ const bool isLeaf = children_mask == 0;
if (isLeaf)
{
}
//goto next level
- int first = node >> 8;
- int len = __popc(children_mask);
+ const int first = node >> 8;
+ const int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
while(mask)
{
- unsigned int laneId = Warp::laneId();
- unsigned int warpId = Warp::id();
+ const unsigned int laneId = Warp::laneId();
int active_lane = __ffs(mask) - 1; //[0..31]
mask &= ~(1 << active_lane);
//broadcast active_found_count
- if (active_lane == laneId)
- storage.per_warp_buffer[warpId] = found_count;
- int active_found_count = storage.per_warp_buffer[warpId];
-
- int node_idx = leaf & ~KernelPolicy::CHECK_FLAG;
+ const int active_found_count = __shfl_sync(0xFFFFFFFF, found_count, active_lane);
- //broadcast beg
- if (active_lane == laneId)
- storage.per_warp_buffer[warpId] = batch.octree.begs[node_idx];
- int beg = storage.per_warp_buffer[warpId];
+ const int node_idx = leaf & ~KernelPolicy::CHECK_FLAG;
- //broadcast end
+ //broadcast beg and end
+ int fbeg, fend;
if (active_lane == laneId)
- storage.per_warp_buffer[warpId] = batch.octree.ends[node_idx];
- int end = storage.per_warp_buffer[warpId];
+ {
+ fbeg = batch.octree.begs[node_idx];
+ fend = batch.octree.ends[node_idx];
+ }
+ const int beg = __shfl_sync(0xFFFFFFFF, fbeg, active_lane);
+ const int end = __shfl_sync(0xFFFFFFFF, fend, active_lane);
//broadcast active_query_index
- if (active_lane == laneId)
- storage.per_warp_buffer[warpId] = query_index;
- int active_query_index = storage.per_warp_buffer[warpId];
+ const int active_query_index = __shfl_sync(0xFFFFFFFF, query_index, active_lane);
int length = end - beg;
int *out = batch.output + active_query_index * batch.max_results + active_found_count;
- int length_left = batch.max_results - active_found_count;
+ const int length_left = batch.max_results - active_found_count;
- int test = __any_sync(0xFFFFFFFF, active_lane == laneId && (leaf & KernelPolicy::CHECK_FLAG));
+ const int test = __any_sync(0xFFFFFFFF, active_lane == laneId && (leaf & KernelPolicy::CHECK_FLAG));
if (test)
- {
- float3 active_query;
+ {
+ //broadcast warp_radius
+ const float radius2 = __shfl_sync(0xFFFFFFFF, radius * radius, active_lane);
//broadcast warp_query
- if (active_lane == laneId)
- storage.per_warp_buffer[warpId] = __float_as_int(query.x);
- active_query.x = __int_as_float(storage.per_warp_buffer[warpId]);
-
- if (active_lane == laneId)
- storage.per_warp_buffer[warpId] = __float_as_int(query.y);
- active_query.y = __int_as_float(storage.per_warp_buffer[warpId]);
-
- if (active_lane == laneId)
- storage.per_warp_buffer[warpId] = __float_as_int(query.z);
- active_query.z = __int_as_float(storage.per_warp_buffer[warpId]);
-
- float radius2 = batch.bradcastRadius2((float*)&storage.per_warp_buffer[warpId], (active_lane == laneId), radius);
+ const float3 active_query = make_float3(
+ __shfl_sync(0xFFFFFFFF, query.x, active_lane),
+ __shfl_sync(0xFFFFFFFF, query.y, active_lane),
+ __shfl_sync(0xFFFFFFFF, query.z, active_lane)
+ );
- length = TestWarpKernel(beg, active_query, radius2, length, out, length_left);
+ length = TestWarpKernel(beg, active_query, radius2, length, out, length_left);
}
else
{
}
}
- __device__ __forceinline__ int TestWarpKernel(int beg, const float3& active_query, float radius2, int length, int* out, int length_left)
+ __device__ __forceinline__ int TestWarpKernel(const int beg, const float3& active_query, const float radius2, const int length, int* out, const int length_left)
{
unsigned int idx = Warp::laneId();
- int last_threadIdx = threadIdx.x - idx + 31;
+ const int last_threadIdx = threadIdx.x - idx + 31;
int total_new = 0;
if (idx < length)
{
- float dx = batch.points.ptr(0)[beg + idx] - active_query.x;
- float dy = batch.points.ptr(1)[beg + idx] - active_query.y;
- float dz = batch.points.ptr(2)[beg + idx] - active_query.z;
+ const float dx = batch.points.ptr(0)[beg + idx] - active_query.x;
+ const float dy = batch.points.ptr(1)[beg + idx] - active_query.y;
+ const float dz = batch.points.ptr(2)[beg + idx] - active_query.z;
- float d2 = dx * dx + dy * dy + dz * dz;
+ const float d2 = dx * dx + dy * dy + dz * dz;
if (d2 < radius2)
take = 1;
storage.cta_buffer[threadIdx.x] = take;
- int offset = scan_warp<exclusive>(storage.cta_buffer);
+ const int offset = scan_warp<exclusive>(storage.cta_buffer);
//ensure that we copy
- bool out_of_bounds = (offset + total_new) >= length_left;
+ const bool out_of_bounds = (offset + total_new) >= length_left;
if (take && !out_of_bounds)
out[offset] = batch.indices[beg + idx];
- int new_nodes = storage.cta_buffer[last_threadIdx];
+ const int new_nodes = storage.cta_buffer[last_threadIdx];
idx += Warp::STRIDE;
template<typename BatchType>
__global__ void KernelRS(const BatchType batch)
{
- int query_index = blockIdx.x * blockDim.x + threadIdx.x;
+ const int query_index = blockIdx.x * blockDim.x + threadIdx.x;
- bool active = query_index < batch.queries.size;
+ const bool active = query_index < batch.queries.size;
if (__all_sync(0xFFFFFFFF, active == false))
return;
#include<cassert>
using namespace pcl::device;
-using namespace std;
//////////////////////////////////////////////////////////////////////////////////////
//////////////// Octree Host Interface implementation ////////////////////////////////
#ifndef PCL_GPU_EMULATION
#define PCL_GPU_EMULATION
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
#include "utils/warp_reduce.hpp"
namespace pcl
{
static __forceinline__ __device__ int Ballot(int predicate, volatile int* cta_buffer)
{
- (void)cta_buffer;
+ pcl::utils::ignore(cta_buffer);
return __ballot(predicate);
}
};
}
}
-#endif /* PCL_GPU_EMULATION */
\ No newline at end of file
+#endif /* PCL_GPU_EMULATION */
+++ /dev/null
-if(NOT BUILD_TESTS)
- return()
-endif()
-
-set(the_test_target test_gpu_octree)
-
-get_filename_component(DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
-get_filename_component(INC "${DIR}/../../../octree/include" REALPATH)
-include_directories("${INC}")
-
-file(GLOB test_src *.cpp *.hpp)
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-
-#ifndef _PCL_TEST_GPU_OCTREE_DATAGEN_
-#define _PCL_TEST_GPU_OCTREE_DATAGEN_
-
-#include <vector>
-#include <algorithm>
-#include <iostream>
-#include <Eigen/StdVector>
-#include <cstdlib>
-
-
-#if defined (_WIN32) || defined(_WIN64)
- EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(pcl::PointXYZ)
-#endif
-
-
-namespace pcl
-{
-
-namespace gpu
-{
-
-struct DataGenerator
-{
- using PointType = Octree::PointType;
-
- std::size_t data_size;
- std::size_t tests_num;
-
- float cube_size;
- float max_radius;
-
- float shared_radius;
-
- std::vector<PointType> points;
- std::vector<PointType> queries;
- std::vector<float> radiuses;
- std::vector< std::vector<int> > bfresutls;
-
- std::vector<int> indices;
-
- DataGenerator() : data_size(871000), tests_num(10000), cube_size(1024.f)
- {
- max_radius = cube_size/15.f;
- shared_radius = cube_size/20.f;
- }
-
- void operator()()
- {
- srand (0);
-
- points.resize(data_size);
- for(std::size_t i = 0; i < data_size; ++i)
- {
- points[i].x = ((float)rand())/RAND_MAX * cube_size;
- points[i].y = ((float)rand())/RAND_MAX * cube_size;
- points[i].z = ((float)rand())/RAND_MAX * cube_size;
- }
-
-
- queries.resize(tests_num);
- radiuses.resize(tests_num);
- for (std::size_t i = 0; i < tests_num; ++i)
- {
- queries[i].x = ((float)rand())/RAND_MAX * cube_size;
- queries[i].y = ((float)rand())/RAND_MAX * cube_size;
- queries[i].z = ((float)rand())/RAND_MAX * cube_size;
- radiuses[i] = ((float)rand())/RAND_MAX * max_radius;
- };
-
- for(std::size_t i = 0; i < tests_num/2; ++i)
- indices.push_back(i*2);
- }
-
- void bruteForceSearch(bool log = false, float radius = -1.f)
- {
- if (log)
- std::cout << "BruteForceSearch";
-
- int value100 = std::min<int>(tests_num, 50);
- int step = tests_num/value100;
-
- bfresutls.resize(tests_num);
- for(std::size_t i = 0; i < tests_num; ++i)
- {
- if (log && i % step == 0)
- {
- std::cout << ".";
- std::cout.flush();
- }
-
- std::vector<int>& curr_res = bfresutls[i];
- curr_res.clear();
-
- float query_radius = radius > 0 ? radius : radiuses[i];
- const PointType& query = queries[i];
-
- for(std::size_t ind = 0; ind < points.size(); ++ind)
- {
- const PointType& point = points[ind];
-
- float dx = query.x - point.x;
- float dy = query.y - point.y;
- float dz = query.z - point.z;
-
- if (dx*dx + dy*dy + dz*dz < query_radius * query_radius)
- curr_res.push_back(ind);
- }
-
- std::sort(curr_res.begin(), curr_res.end());
- }
- if (log)
- std::cout << "Done" << std::endl;
- }
-
- void printParams() const
- {
- std::cout << "Points number = " << data_size << std::endl;
- std::cout << "Queries number = " << tests_num << std::endl;
- std::cout << "Cube size = " << cube_size << std::endl;
- std::cout << "Max radius = " << max_radius << std::endl;
- std::cout << "Shared radius = " << shared_radius << std::endl;
- }
-
- template<typename Dst>
- struct ConvPoint
- {
- Dst operator()(const PointType& src) const
- {
- Dst dst;
- dst.x = src.x;
- dst.y = src.y;
- dst.z = src.z;
- return dst;
- }
- };
-};
-
-} // namespace gpu
-} // namespace pcl
-
-#endif /* _PCL_TEST_GPU_OCTREE_DATAGEN_ */
-
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-#if defined _MSC_VER
- #pragma warning (disable : 4996 4530)
-#endif
-
-#include <gtest/gtest.h>
-
-#include<iostream>
-#include<algorithm>
-
-#if defined _MSC_VER
- #pragma warning (disable: 4521)
-#endif
-
-#include <pcl/point_cloud.h>
-#include <pcl/octree/octree.h>
-
-#if defined _MSC_VER
- #pragma warning (default: 4521)
-#endif
-
-
-#include <pcl/gpu/octree/octree.hpp>
-#include <pcl/gpu/containers/device_array.h>
-#include <pcl/common/time.h>
-#include "data_source.hpp"
-
-using namespace pcl::gpu;
-using namespace std;
-
-using pcl::ScopeTime;
-
-#if defined HAVE_OPENCV
- #include "opencv2/contrib/contrib.hpp"
-#endif
-
-//TEST(PCL_OctreeGPU, DISABLED_performance)
-TEST(PCL_OctreeGPU, performance)
-{
- DataGenerator data;
- data.data_size = 871000;
- data.tests_num = 10000;
- data.cube_size = 1024.f;
- data.max_radius = data.cube_size/15.f;
- data.shared_radius = data.cube_size/15.f;
- data.printParams();
-
- //const int k = 32;
-
- std::cout << "sizeof(pcl::gpu::Octree::PointType): " << sizeof(pcl::gpu::Octree::PointType) << std::endl;
- //std::cout << "k = " << k << std::endl;
- //generate data
- data();
-
- //prepare device cloud
- pcl::gpu::Octree::PointCloud cloud_device;
- cloud_device.upload(data.points);
-
- //prepare queries_device
- pcl::gpu::Octree::Queries queries_device;
- pcl::gpu::Octree::Radiuses radiuses_device;
- queries_device.upload(data.queries);
- radiuses_device.upload(data.radiuses);
-
- //prepare host cloud
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
- cloud_host->width = data.points.size();
- cloud_host->height = 1;
- cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
-
- float host_octree_resolution = 25.f;
-
- std::cout << "[!] Host octree resolution: " << host_octree_resolution << std::endl << std::endl;
-
- std::cout << "====== Build performance =====" << std::endl;
- // build device octree
- pcl::gpu::Octree octree_device;
- octree_device.setCloud(cloud_device);
- {
- ScopeTime up("gpu-build");
- octree_device.build();
- }
- {
- ScopeTime up("gpu-download");
- octree_device.internalDownload();
- }
-
- //build host octree
- pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(host_octree_resolution);
- octree_host.setInputCloud (cloud_host);
- {
- ScopeTime t("host-build");
- octree_host.addPointsFromInputCloud();
- }
-
- // build opencv octree
-#ifdef HAVE_OPENCV
- cv::Octree octree_opencv;
- const static int opencv_octree_points_per_leaf = 32;
- std::vector<cv::Point3f> opencv_points(data.points.size());
- std::transform(data.points.begin(), data.points.end(), opencv_points.begin(), DataGenerator::ConvPoint<cv::Point3f>());
-
- {
- ScopeTime t("opencv-build");
- octree_opencv.buildTree(opencv_points, 10, opencv_octree_points_per_leaf);
- }
-#endif
-
- //// Radius search performance ///
-
- const int max_answers = 500;
- float dist;
- int inds;
-
- //host buffers
- std::vector<int> indeces;
- std::vector<float> pointRadiusSquaredDistance;
-#ifdef HAVE_OPENCV
- std::vector<cv::Point3f> opencv_results;
-#endif
-
- //reserve
- indeces.reserve(data.data_size);
- pointRadiusSquaredDistance.reserve(data.data_size);
-#ifdef HAVE_OPENCV
- opencv_results.reserve(data.data_size);
-#endif
-
- //device buffers
- pcl::gpu::DeviceArray<int> bruteforce_results_device, buffer(cloud_device.size());
- pcl::gpu::NeighborIndices result_device(queries_device.size(), max_answers);
-
- //pcl::gpu::Octree::BatchResult distsKNN_device(queries_device.size() * k);
- //pcl::gpu::Octree::BatchResultSqrDists indsKNN_device(queries_device.size() * k);
-
- std::cout << "====== Separate radius for each query =====" << std::endl;
-
- {
- ScopeTime up("gpu--radius-search-batch-all");
- octree_device.radiusSearch(queries_device, radiuses_device, max_answers, result_device);
- }
-
- {
- ScopeTime up("gpu-radius-search-{host}-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], indeces, max_answers);
- }
-
- {
- ScopeTime up("host-radius-search-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z),
- data.radiuses[i], indeces, pointRadiusSquaredDistance, max_answers);
- }
-
- {
- ScopeTime up("gpu_bruteforce-radius-search-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- pcl::gpu::bruteForceRadiusSearchGPU(cloud_device, data.queries[i], data.radiuses[i], bruteforce_results_device, buffer);
- }
-
- std::cout << "====== Shared radius (" << data.shared_radius << ") =====" << std::endl;
-
- {
- ScopeTime up("gpu-radius-search-batch-all");
- octree_device.radiusSearch(queries_device, data.shared_radius, max_answers, result_device);
- }
-
- {
- ScopeTime up("gpu-radius-search-{host}-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_device.radiusSearchHost(data.queries[i], data.shared_radius, indeces, max_answers);
- }
-
- {
- ScopeTime up("host-radius-search-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z),
- data.radiuses[i], indeces, pointRadiusSquaredDistance, max_answers);
- }
-
- {
- ScopeTime up("gpu-radius-bruteforce-search-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- pcl::gpu::bruteForceRadiusSearchGPU(cloud_device, data.queries[i], data.shared_radius, bruteforce_results_device, buffer);
- }
-
- std::cout << "====== Approx nearest search =====" << std::endl;
-
- {
- ScopeTime up("gpu-approx-nearest-batch-all");
- octree_device.approxNearestSearch(queries_device, result_device);
- }
-
- {
- ScopeTime up("gpu-approx-nearest-search-{host}-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_device.approxNearestSearchHost(data.queries[i], inds, dist);
- }
-
- {
- ScopeTime up("host-approx-nearest-search-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_host.approxNearestSearch(data.queries[i], inds, dist);
- }
-
- /* std::cout << "====== knn search ( k fixed to " << k << " ) =====" << std::endl;
- {
- ScopeTime up("gpu-knn-batch-all");
- octree_device.nearestKSearchBatch(queries_device, k, distsKNN_device, indsKNN_device);
- }
-
- {
- ScopeTime up("host-knn-search-all");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_host.nearestKSearch(data.queries[i], k, indeces, pointRadiusSquaredDistance);
- }*/
-}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-#if defined _MSC_VER
- #pragma warning (disable : 4996 4530)
-#endif
-
-#include <gtest/gtest.h>
-
-#include<iostream>
-#include<fstream>
-#include<algorithm>
-
-#if defined _MSC_VER
- #pragma warning (disable: 4521)
-#endif
-#include <pcl/point_cloud.h>
-#include <pcl/octree/octree_search.h>
-#if defined _MSC_VER
- #pragma warning (default: 4521)
-#endif
-
-#include <pcl/gpu/octree/octree.hpp>
-#include <pcl/gpu/containers/device_array.h>
-#include <pcl/gpu/utils/safe_call.hpp>
-
-#include "data_source.hpp"
-
-using namespace pcl::gpu;
-using namespace std;
-
-//TEST(PCL_OctreeGPU, DISABLED_approxNearesSearch)
-TEST(PCL_OctreeGPU, approxNearesSearch)
-{
- DataGenerator data;
- data.data_size = 871000;
- data.tests_num = 10000;
- data.cube_size = 1024.f;
- data.max_radius = data.cube_size/30.f;
- data.shared_radius = data.cube_size/30.f;
- data.printParams();
-
- const float host_octree_resolution = 25.f;
-
- //generate
- data();
-
- //prepare device cloud
- pcl::gpu::Octree::PointCloud cloud_device;
- cloud_device.upload(data.points);
-
-
- //prepare host cloud
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
- cloud_host->width = data.points.size();
- cloud_host->height = 1;
- cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
-
- //gpu build
- pcl::gpu::Octree octree_device;
- octree_device.setCloud(cloud_device);
- octree_device.build();
-
- //build host octree
- pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(host_octree_resolution);
- octree_host.setInputCloud (cloud_host);
- octree_host.addPointsFromInputCloud();
-
- //upload queries
- pcl::gpu::Octree::Queries queries_device;
- queries_device.upload(data.queries);
-
-
- //prepare output buffers on device
- pcl::gpu::NeighborIndices result_device(data.tests_num, 1);
- std::vector<int> result_host_pcl(data.tests_num);
- std::vector<int> result_host_gpu(data.tests_num);
- std::vector<float> dists_pcl(data.tests_num);
- std::vector<float> dists_gpu(data.tests_num);
-
- //search GPU shared
- octree_device.approxNearestSearch(queries_device, result_device);
-
- std::vector<int> downloaded;
- result_device.data.download(downloaded);
-
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- octree_host.approxNearestSearch(data.queries[i], result_host_pcl[i], dists_pcl[i]);
- octree_device.approxNearestSearchHost(data.queries[i], result_host_gpu[i], dists_gpu[i]);
- }
-
- ASSERT_EQ ( ( downloaded == result_host_gpu ), true );
-
- int count_gpu_better = 0;
- int count_pcl_better = 0;
- float diff_pcl_better = 0;
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- float diff = dists_pcl[i] - dists_gpu[i];
- bool gpu_better = diff > 0;
-
- ++(gpu_better ? count_gpu_better : count_pcl_better);
-
- if (!gpu_better)
- diff_pcl_better +=std::abs(diff);
- }
-
- diff_pcl_better /=count_pcl_better;
-
- std::cout << "count_gpu_better: " << count_gpu_better << std::endl;
- std::cout << "count_pcl_better: " << count_pcl_better << std::endl;
- std::cout << "avg_diff_pcl_better: " << diff_pcl_better << std::endl;
-
-}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-#include <gtest/gtest.h>
-
-#include <iostream>
-#include <numeric>
-
-#if defined _MSC_VER
- #pragma warning (disable: 4521)
-#endif
-
-#include <pcl/point_cloud.h>
-
-#if defined _MSC_VER
- #pragma warning (default: 4521)
-#endif
-
-#include <pcl/gpu/octree/octree.hpp>
-#include <pcl/gpu/containers/device_array.h>
-
-#include "data_source.hpp"
-
-using namespace std;
-using namespace pcl::gpu;
-
-
-//TEST (PCL_GPU, DISABLED_bruteForceRadiusSeachGPU)
-TEST (PCL_GPU, bruteForceRadiusSeachGPU)
-{
- DataGenerator data;
- data.data_size = 871000;
- data.tests_num = 100;
- data.cube_size = 1024.f;
- data.max_radius = data.cube_size/15.f;
- data.shared_radius = data.cube_size/20.f;
- data.printParams();
-
- //generate
- data();
-
- // brute force radius search
- data.bruteForceSearch();
-
- //prepare gpu cloud
- pcl::gpu::Octree::PointCloud cloud_device;
- cloud_device.upload(data.points);
-
- pcl::gpu::DeviceArray<int> results_device, buffer(cloud_device.size());
-
- std::vector<int> results_host;
- std::vector<std::size_t> sizes;
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- pcl::gpu::bruteForceRadiusSearchGPU(cloud_device, data.queries[i], data.radiuses[i], results_device, buffer);
-
- results_device.download(results_host);
- std::sort(results_host.begin(), results_host.end());
-
- ASSERT_EQ ( (results_host == data.bfresutls[i]), true );
- sizes.push_back(results_device.size());
- }
-
- float avg_size = std::accumulate(sizes.begin(), sizes.end(), (std::size_t)0) * (1.f/sizes.size());;
-
- std::cout << "avg_result_size = " << avg_size << std::endl;
- ASSERT_GT(avg_size, 5);
-}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-#include <numeric>
-#include <algorithm>
-#include <vector>
-
-#include <gtest/gtest.h>
-
-#if defined _MSC_VER
- #pragma warning (disable: 4521)
-#endif
-
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
-#include <pcl/octree/octree_search.h>
-
-#if defined _MSC_VER
- #pragma warning (default: 4521)
-#endif
-
-#include <pcl/gpu/octree/octree.hpp>
-#include <pcl/gpu/containers/device_array.h>
-#include <pcl/gpu/containers/initialization.h>
-
-#include "data_source.hpp"
-
-using namespace std;
-using namespace pcl;
-using namespace pcl::gpu;
-
-//TEST(PCL_OctreeGPU, DISABLED_hostRadiusSearch)
-TEST(PCL_OctreeGPU, hostRadiusSearch)
-{
- DataGenerator data;
- data.data_size = 871000;
- data.tests_num = 10000;
- data.cube_size = 1024.f;
- data.max_radius = data.cube_size/15.f;
- data.shared_radius = data.cube_size/20.f;
- data.printParams();
-
- //generate
- data();
-
- //prepare device cloud
- pcl::gpu::Octree::PointCloud cloud_device;
- cloud_device.upload(data.points);
-
- //prepare host cloud
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
- cloud_host->width = data.points.size();
- cloud_host->height = 1;
- cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
-
- // build device octree
- pcl::gpu::Octree octree_device;
- octree_device.setCloud(cloud_device);
- octree_device.build();
-
-
-
- // build host octree
- float resolution = 25.f;
- std::cout << "[!]Octree resolution: " << resolution << std::endl;
- pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(resolution);
- octree_host.setInputCloud (cloud_host);
- octree_host.addPointsFromInputCloud ();
-
- //perform bruteForceSearch
- data.bruteForceSearch(true);
-
- std::vector<int> sizes;
- sizes.reserve(data.tests_num);
- octree_device.internalDownload();
-
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- //search host on octree that was built on device
- std::vector<int> results_host_gpu; //host search
- octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], results_host_gpu);
-
- //search host
- std::vector<float> dists;
- std::vector<int> results_host;
- octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z), data.radiuses[i], results_host, dists);
-
- std::sort(results_host_gpu.begin(), results_host_gpu.end());
- std::sort(results_host.begin(), results_host.end());
-
- ASSERT_EQ ( (results_host_gpu == results_host ), true );
- ASSERT_EQ ( (results_host_gpu == data.bfresutls[i]), true );
- sizes.push_back(results_host.size());
- }
-
- float avg_size = std::accumulate(sizes.begin(), sizes.end(), 0) * (1.f/sizes.size());;
-
- std::cout << "avg_result_size = " << avg_size << std::endl;
- ASSERT_GT(avg_size, 5);
-}
-
-
-int main (int argc, char** argv)
-{
- const int device = 0;
- pcl::gpu::setDevice(device);
- pcl::gpu::printShortCudaDeviceInfo(device);
- testing::InitGoogleTest (&argc, argv);
- return (RUN_ALL_TESTS ());
-}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-#if defined _MSC_VER
- #pragma warning (disable : 4996 4530)
-#endif
-
-#include <gtest/gtest.h>
-
-#include<iostream>
-#include<fstream>
-#include<algorithm>
-
-#if defined _MSC_VER
- #pragma warning (disable: 4521)
-#endif
-#include <pcl/point_cloud.h>
-#include <pcl/octree/octree_search.h>
-#if defined _MSC_VER
- #pragma warning (default: 4521)
-#endif
-
-#include <pcl/gpu/octree/octree.hpp>
-#include <pcl/gpu/containers/device_array.h>
-#include <pcl/common/time.h>
-#include "data_source.hpp"
-
-using namespace pcl::gpu;
-using namespace std;
-
-
-struct PriorityPair
-{
- int index;
- float dist2;
-
- bool operator<(const PriorityPair& other) const { return dist2 < other.dist2; }
-
- bool operator==(const PriorityPair& other) const { return dist2 == other.dist2 && index == other.index; }
-};
-
-//TEST(PCL_OctreeGPU, DISABLED_exactNeighbourSearch)
-TEST(PCL_OctreeGPU, exactNeighbourSearch)
-{
- DataGenerator data;
- data.data_size = 871000;
- data.tests_num = 10000;
- data.cube_size = 1024.f;
- data.max_radius = data.cube_size/30.f;
- data.shared_radius = data.cube_size/30.f;
- data.printParams();
-
- const float host_octree_resolution = 25.f;
- const int k = 1; // only this is supported
-
- //generate
- data();
-
- //prepare device cloud
- pcl::gpu::Octree::PointCloud cloud_device;
- cloud_device.upload(data.points);
-
- //prepare host cloud
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
- cloud_host->width = data.points.size();
- cloud_host->height = 1;
- cloud_host->points.resize (cloud_host->width * cloud_host->height);
- std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
-
- //gpu build
- pcl::gpu::Octree octree_device;
- octree_device.setCloud(cloud_device);
- octree_device.build();
-
- //build host octree
- pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(host_octree_resolution);
- octree_host.setInputCloud (cloud_host);
- octree_host.addPointsFromInputCloud();
-
- //upload queries
- pcl::gpu::Octree::Queries queries_device;
- queries_device.upload(data.queries);
-
- //prepare output buffers on device
- pcl::gpu::NeighborIndices result_device(data.tests_num, k);
-
- //prepare output buffers on host
- std::vector<vector< int> > result_host(data.tests_num);
- std::vector<vector<float> > dists_host(data.tests_num);
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- result_host[i].reserve(k);
- dists_host[i].reserve(k);
- }
-
- //search GPU shared
- {
- pcl::ScopeTime time("1nn-gpu");
- octree_device.nearestKSearchBatch(queries_device, k, result_device);
- }
-
- std::vector<int> downloaded, downloaded_cur;
- result_device.data.download(downloaded);
-
- {
- pcl::ScopeTime time("1nn-cpu");
- for(std::size_t i = 0; i < data.tests_num; ++i)
- octree_host.nearestKSearch(data.queries[i], k, result_host[i], dists_host[i]);
- }
-
- //verify results
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- //std::cout << i << std::endl;
- std::vector<int>& results_host_cur = result_host[i];
- std::vector<float>& dists_host_cur = dists_host[i];
-
- int beg = i * k;
- int end = beg + k;
-
- downloaded_cur.assign(downloaded.begin() + beg, downloaded.begin() + end);
-
- std::vector<PriorityPair> pairs_host;
- std::vector<PriorityPair> pairs_gpu;
- for(int n = 0; n < k; ++n)
- {
- PriorityPair host;
- host.index = results_host_cur[n];
- host.dist2 = dists_host_cur[n];
- pairs_host.push_back(host);
-
- PriorityPair gpu;
- gpu.index = downloaded_cur[n];
-
- float dist = (data.queries[i].getVector3fMap() - data.points[gpu.index].getVector3fMap()).norm();
- gpu.dist2 = dist * dist;
- pairs_gpu.push_back(gpu);
- }
-
- std::sort(pairs_host.begin(), pairs_host.end());
- std::sort(pairs_gpu.begin(), pairs_gpu.end());
-
- while (pairs_host.size ())
- {
- ASSERT_EQ ( pairs_host.back().index , pairs_gpu.back().index );
- EXPECT_NEAR ( pairs_host.back().dist2 , pairs_gpu.back().dist2, 1e-2 );
-
- pairs_host.pop_back();
- pairs_gpu.pop_back();
- }
- }
-}
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Copyright (c) 2011, Willow Garage, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Willow Garage, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
- */
-
-#include <gtest/gtest.h>
-
-#include <iostream>
-#include <fstream>
-#include <numeric>
-
-#if defined _MSC_VER
- #pragma warning (disable: 4521)
-#endif
-
-#include <pcl/point_cloud.h>
-
-#if defined _MSC_VER
- #pragma warning (default: 4521)
-#endif
-
-#include <pcl/gpu/octree/octree.hpp>
-#include <pcl/gpu/containers/device_array.h>
-
-#include "data_source.hpp"
-
-using namespace std;
-using namespace pcl::gpu;
-
-//TEST(PCL_OctreeGPU, DISABLED_batchRadiusSearch)
-TEST(PCL_OctreeGPU, batchRadiusSearch)
-{
- DataGenerator data;
- data.data_size = 871000;
- data.tests_num = 10000;
- data.cube_size = 1024.f;
- data.max_radius = data.cube_size/30.f;
- data.shared_radius = data.cube_size/30.f;
- data.printParams();
-
- const int max_answers = 333;
-
- //generate
- data();
-
- //prepare gpu cloud
-
- pcl::gpu::Octree::PointCloud cloud_device;
- cloud_device.upload(data.points);
-
- //gpu build
- pcl::gpu::Octree octree_device;
- octree_device.setCloud(cloud_device);
- octree_device.build();
-
- //upload queries
- pcl::gpu::Octree::Queries queries_device;
- pcl::gpu::Octree::Radiuses radiuses_device;
- queries_device.upload(data.queries);
- radiuses_device.upload(data.radiuses);
-
- //prepare output buffers on device
-
- pcl::gpu::NeighborIndices result_device1(queries_device.size(), max_answers);
- pcl::gpu::NeighborIndices result_device2(queries_device.size(), max_answers);
- pcl::gpu::NeighborIndices result_device3(data.indices.size(), max_answers);
-
- //prepare output buffers on host
- std::vector< std::vector<int> > host_search1(data.tests_num);
- std::vector< std::vector<int> > host_search2(data.tests_num);
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- host_search1[i].reserve(max_answers);
- host_search2[i].reserve(max_answers);
- }
-
- //search GPU shared
- octree_device.radiusSearch(queries_device, data.shared_radius, max_answers, result_device1);
-
- //search GPU individual
- octree_device.radiusSearch(queries_device, radiuses_device, max_answers, result_device2);
-
- //search GPU shared with indices
- pcl::gpu::Octree::Indices indices;
- indices.upload(data.indices);
- octree_device.radiusSearch(queries_device, indices, data.shared_radius, max_answers, result_device3);
-
- //search CPU
- octree_device.internalDownload();
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- octree_device.radiusSearchHost(data.queries[i], data.shared_radius, host_search1[i], max_answers);
- octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], host_search2[i], max_answers);
- }
-
- //download results
- std::vector<int> sizes1;
- std::vector<int> sizes2;
- std::vector<int> sizes3;
- result_device1.sizes.download(sizes1);
- result_device2.sizes.download(sizes2);
- result_device3.sizes.download(sizes3);
-
- std::vector<int> downloaded_buffer1, downloaded_buffer2, downloaded_buffer3, results_batch;
- result_device1.data.download(downloaded_buffer1);
- result_device2.data.download(downloaded_buffer2);
- result_device3.data.download(downloaded_buffer3);
-
- //data.bruteForceSearch();
-
- //verify results
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- std::vector<int>& results_host = host_search1[i];
-
- int beg = i * max_answers;
- int end = beg + sizes1[i];
-
- results_batch.assign(downloaded_buffer1.begin() + beg, downloaded_buffer1.begin() + end);
-
- std::sort(results_batch.begin(), results_batch.end());
- std::sort(results_host.begin(), results_host.end());
-
- if ((int)results_batch.size() == max_answers && results_batch.size() < results_host.size() && max_answers)
- results_host.resize(max_answers);
-
- ASSERT_EQ ( ( results_batch == results_host ), true );
-
- //vector<int>& results_bf = data.bfresutls[i];
- //ASSERT_EQ ( ( results_bf == results_batch), true );
- //ASSERT_EQ ( ( results_bf == results_host ), true );
- }
-
- float avg_size1 = std::accumulate(sizes1.begin(), sizes1.end(), 0) * (1.f/sizes1.size());
-
- std::cout << "avg_result_size1 = " << avg_size1 << std::endl;
- ASSERT_GT(avg_size1, 5);
-
-
- //verify results
- for(std::size_t i = 0; i < data.tests_num; ++i)
- {
- std::vector<int>& results_host = host_search2[i];
-
- int beg = i * max_answers;
- int end = beg + sizes2[i];
-
- results_batch.assign(downloaded_buffer2.begin() + beg, downloaded_buffer2.begin() + end);
-
- std::sort(results_batch.begin(), results_batch.end());
- std::sort(results_host.begin(), results_host.end());
-
- if ((int)results_batch.size() == max_answers && results_batch.size() < results_host.size() && max_answers)
- results_host.resize(max_answers);
-
- ASSERT_EQ ( ( results_batch == results_host ), true );
-
- //vector<int>& results_bf = data.bfresutls[i];
- //ASSERT_EQ ( ( results_bf == results_batch), true );
- //ASSERT_EQ ( ( results_bf == results_host ), true );
- }
-
- float avg_size2 = std::accumulate(sizes2.begin(), sizes2.end(), 0) * (1.f/sizes2.size());
-
- std::cout << "avg_result_size2 = " << avg_size2 << std::endl;
- ASSERT_GT(avg_size2, 5);
-
-
- //verify results
- for(std::size_t i = 0; i < data.tests_num; i+=2)
- {
- std::vector<int>& results_host = host_search1[i];
-
- int beg = i/2 * max_answers;
- int end = beg + sizes3[i/2];
-
- results_batch.assign(downloaded_buffer3.begin() + beg, downloaded_buffer3.begin() + end);
-
- std::sort(results_batch.begin(), results_batch.end());
- std::sort(results_host.begin(), results_host.end());
-
- if ((int)results_batch.size() == max_answers && results_batch.size() < results_host.size() && max_answers)
- results_host.resize(max_answers);
-
- ASSERT_EQ ( ( results_batch == results_host ), true );
-
- //vector<int>& results_bf = data.bfresutls[i];
- //ASSERT_EQ ( ( results_bf == results_batch), true );
- //ASSERT_EQ ( ( results_bf == results_host ), true );
- }
-
- float avg_size3 = std::accumulate(sizes3.begin(), sizes3.end(), 0) * (1.f/sizes3.size());
-
- std::cout << "avg_result_size3 = " << avg_size3 << std::endl;
- ASSERT_GT(avg_size3, 5);
-}
-
#include "internal.h"
#include "cuda_async_copy.h"
-using namespace std;
-
const int MAX_CLUST_SIZE = 25000;
const float CLUST_TOL = 0.05f;
-pcl::gpu::people::RDFBodyPartsDetector::RDFBodyPartsDetector( const std::vector<string>& tree_files, int rows, int cols)
+pcl::gpu::people::RDFBodyPartsDetector::RDFBodyPartsDetector( const std::vector<std::string>& tree_files, int rows, int cols)
: max_cluster_size_(MAX_CLUST_SIZE), cluster_tolerance_(CLUST_TOL)
{
PCL_DEBUG("[pcl::gpu::people::RDFBodyPartsDetector::RDFBodyPartsDetector] : (D) : Constructor called\n");
for(std::size_t k = 0; k < dst_labels_.size(); ++k)
{
- const PointXYZ& p = cloud.points[k];
+ const PointXYZ& p = cloud[k];
int cc = dst_labels_[k];
means[cc].x += p.x;
means[cc].y += p.y;
for(std::size_t k = 0; k < dst_labels_.size(); ++k)
{
- const PointXYZ& p = cloud.points[k];
+ const PointXYZ& p = cloud[k];
int cc = dst_labels_[k];
means[cc].x += p.x;
means[cc].y += p.y;
#include <vector>
#include "internal.h"
-using namespace std;
using namespace pcl::gpu::people;
pcl::RGB pcl::gpu::people::getLColor (unsigned char l)
{
colormap_out.resize(cloud_in.size());
for(std::size_t i = 0; i < cloud_in.size (); i++)
- colormap_out.points[i] = getLColor(cloud_in.points[i]);
+ colormap_out[i] = getLColor(cloud_in[i]);
colormap_out.width = cloud_in.width;
colormap_out.height = cloud_in.height;
using pcl::gpu::people::trees::focal;
using pcl::gpu::people::trees::NUM_LABELS;
-using namespace std;
using uint = unsigned int;
#ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code
{
assert(!trees.empty());
- unsigned int numTrees = static_cast<int> (trees.size ());
+ unsigned int numTrees = static_cast<unsigned int> (trees.size ());
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
- for( int ti = 0; ti < numTrees; ++ti )
+ for(unsigned int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
- for( int ti = 0; ti < numTrees; ++ti )
+ for(unsigned int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
#include <algorithm>
#include "NCV.hpp"
-using namespace std;
-
//==============================================================================
//
//==============================================================================
-static void stdDebugOutput(const string &msg)
+static void stdDebugOutput(const std::string &msg)
{
std::cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
-void ncvDebugOutput(const string &msg)
+void ncvDebugOutput(const std::string &msg)
{
debugOutputHandler(msg);
}
size = alignUp(size, this->_alignment);
this->currentSize += size;
- this->_maxSize = std::max(this->_maxSize, this->currentSize);
+ this->_maxSize = max(this->_maxSize, this->currentSize);
if (!isCounting())
{
}
this->currentSize += alignUp(size, this->_alignment);
- this->_maxSize = std::max(this->_maxSize, this->currentSize);
+ this->_maxSize = max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
#ifndef PCL_GPU_PEOPLE__NCV_HPP_
#define PCL_GPU_PEOPLE__NCV_HPP_
-#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
+#if (defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
#define NCV_EXPORTS __declspec(dllexport)
#else
#define NCV_EXPORTS
}
else
{
- for (Ncv32u i=0; i<std::max(numDetGold, numDetections) && bPass; i++)
+ for (Ncv32u i=0; i<max(numDetGold, numDetections) && bPass; i++)
{
if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i])
{
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
- NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, std::max(szTmpBufIntegral, szTmpBufSqIntegral));
+ NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, max(szTmpBufIntegral, szTmpBufSqIntegral));
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
using pcl::gpu::people::trees::focal;
using pcl::gpu::people::trees::NUM_LABELS;
-using namespace std;
using uint = unsigned int;
#ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code
float y1 = (b - sqrt (det)) / a;
float y2 = (b + sqrt (det)) / a;
- min = std::min (static_cast<int> (std::floor (y1)), static_cast<int> (std::floor (y2)));
- max = std::max (static_cast<int> (std::ceil (y1)), static_cast<int> (std::ceil (y2)));
- minY = std::min (rows - 1, std::max (0, min));
- maxY = std::max (std::min (rows - 1, max), 0);
+ min = min (static_cast<int> (std::floor (y1)), static_cast<int> (std::floor (y2)));
+ max = max (static_cast<int> (std::ceil (y1)), static_cast<int> (std::ceil (y2)));
+ minY = min (rows - 1, max (0, min));
+ maxY = max (min (rows - 1, max), 0);
}
b = squared_radius * coeff6 - q.x * q.z;
float x1 = (b - sqrt (det)) / a;
float x2 = (b + sqrt (det)) / a;
- min = std::min (static_cast<int> (std::floor (x1)), static_cast<int> (std::floor (x2)));
- max = std::max (static_cast<int> (std::ceil (x1)), static_cast<int> (std::ceil (x2)));
- minX = std::min (cols- 1, std::max (0, min));
- maxX = std::max (std::min (cols - 1, max), 0);
+ min = min (static_cast<int> (std::floor (x1)), static_cast<int> (std::floor (x2)));
+ max = max (static_cast<int> (std::ceil (x1)), static_cast<int> (std::ceil (x2)));
+ minX = min (cols- 1, max (0, min));
+ maxX = max (min (cols - 1, max), 0);
}
}
cv::Mat huebuf(cloud.height, cloud.width, CV_32F);
float *hue = huebuf.ptr<float>();
- for(std::size_t i = 0; i < cloud.points.size(); ++i)
+ for(std::size_t i = 0; i < cloud.size(); ++i)
{
PointXYZHSV h;
- PointXYZRGB p = cloud.points[i];
+ PointXYZRGB p = cloud[i];
PointXYZRGBtoXYZHSV(p, h);
hue[i] = h.h;
}
int sq_idx = 0;
seed_queue.push_back (i);
- PointXYZRGB p = cloud.points[i];
+ PointXYZRGB p = cloud[i];
float h = hue[i];
while (sq_idx < (int)seed_queue.size ())
{
int index = seed_queue[sq_idx];
- const PointXYZRGB& q = cloud.points[index];
+ const PointXYZRGB& q = cloud[index];
if(!isFinite (q))
continue;
if (mask[idx])
continue;
- if (sqnorm(cloud.points[idx], q) <= squared_radius)
+ if (sqnorm(cloud[idx], q) <= squared_radius)
{
float h_l = hue[idx];
cloud_out.width = input_gray.width;
cloud_out.height = input_gray.height;
- cloud_out.points.resize (input_gray.points.size ());
+ cloud_out.points.resize (input_gray.size ());
PCL_ASSERT_ERROR_PRINT_RETURN(!gpu_allocator.isCounting(),"retcode=NCV_NULL_PTR", NCV_NULL_PTR);
{
for(const int &index : inlier_index.indices) // iterate over all the indices in that plane
{
- P_l_host_.points[index].probs[pcl::gpu::people::Background] = 1.f; // set background at max
+ P_l_host_[index].probs[pcl::gpu::people::Background] = 1.f; // set background at max
}
}
}
pcl::gpu::people::OrganizedPlaneDetector::copyHostLabelProbability(HostLabelProbability& src,
HostLabelProbability& dst)
{
- if(src.points.size() != dst.points.size())
+ if(src.size() != dst.size())
{
PCL_ERROR("[pcl::gpu::people::OrganizedPlaneDetector::copyHostLabelProbability] : (E) : Sizes don't match\n");
return -1;
}
- for(std::size_t hist = 0; hist < src.points.size(); hist++)
+ for(std::size_t hist = 0; hist < src.size(); hist++)
{
for(int label = 0; label < pcl::gpu::people::NUM_LABELS; label++)
{
- dst.points[hist].probs[label] = src.points[hist].probs[label];
+ dst[hist].probs[label] = src[hist].probs[label];
}
}
return 1;
pcl::gpu::people::OrganizedPlaneDetector::copyAndClearHostLabelProbability(HostLabelProbability& src,
HostLabelProbability& dst)
{
- if(src.points.size() != dst.points.size())
+ if(src.size() != dst.size())
{
PCL_ERROR("[pcl::gpu::people::OrganizedPlaneDetector::copyHostLabelProbability] : (E) : Sizes don't match\n");
return -1;
}
- for(std::size_t hist = 0; hist < src.points.size(); hist++)
+ for(std::size_t hist = 0; hist < src.size(); hist++)
{
for(int label = 0; label < pcl::gpu::people::NUM_LABELS; label++)
{
- dst.points[hist].probs[label] = src.points[hist].probs[label];
- src.points[hist].probs[label] = 0.f;
+ dst[hist].probs[label] = src[hist].probs[label];
+ src[hist].probs[label] = 0.f;
}
}
return 1;
#define CLUST_TOL_SHS 0.05
#define DELTA_HUE_SHS 5
-using namespace std;
using namespace pcl;
using namespace pcl::gpu::people;
const float qnan = std::numeric_limits<float>::quiet_NaN();
- for(std::size_t i = 0; i < cloud->points.size(); ++i)
+ for(std::size_t i = 0; i < cloud->size(); ++i)
{
- cloud_host_.points[i].x = cloud->points[i].x;
- cloud_host_.points[i].y = cloud->points[i].y;
- cloud_host_.points[i].z = cloud->points[i].z;
+ cloud_host_[i].x = (*cloud)[i].x;
+ cloud_host_[i].y = (*cloud)[i].y;
+ cloud_host_[i].z = (*cloud)[i].z;
- bool valid = isFinite(cloud_host_.points[i]);
+ bool valid = isFinite(cloud_host_[i]);
- hue_host_.points[i] = !valid ? qnan : device::computeHue(cloud->points[i].rgba);
- depth_host_.points[i] = !valid ? 0 : static_cast<unsigned short>(cloud_host_.points[i].z * 1000); //m -> mm
+ hue_host_[i] = !valid ? qnan : device::computeHue((*cloud)[i].rgba);
+ depth_host_[i] = !valid ? 0 : static_cast<unsigned short>(cloud_host_[i].z * 1000); //m -> mm
}
cloud_device_.upload(cloud_host_.points, cloud_host_.width);
hue_device_.upload(hue_host_.points, hue_host_.width);
std::fill(flowermat_host_.points.begin(), flowermat_host_.points.end(), 0);
{
//ScopeTime time("shs");
- shs5(cloud_host_, seed, &flowermat_host_.points[0]);
+ shs5(cloud_host_, seed, &flowermat_host_[0]);
}
int cols = cloud_device_.cols();
const float qnan = std::numeric_limits<float>::quiet_NaN();
- for(std::size_t i = 0; i < cloud->points.size(); ++i)
+ for(std::size_t i = 0; i < cloud->size(); ++i)
{
- cloud_host_color_.points[i].x = cloud_host_.points[i].x = cloud->points[i].x;
- cloud_host_color_.points[i].y = cloud_host_.points[i].y = cloud->points[i].y;
- cloud_host_color_.points[i].z = cloud_host_.points[i].z = cloud->points[i].z;
- cloud_host_color_.points[i].rgba = cloud->points[i].rgba;
+ cloud_host_color_[i].x = cloud_host_[i].x = (*cloud)[i].x;
+ cloud_host_color_[i].y = cloud_host_[i].y = (*cloud)[i].y;
+ cloud_host_color_[i].z = cloud_host_[i].z = (*cloud)[i].z;
+ cloud_host_color_[i].rgba = (*cloud)[i].rgba;
- bool valid = isFinite(cloud_host_.points[i]);
+ bool valid = isFinite(cloud_host_[i]);
- hue_host_.points[i] = !valid ? qnan : device::computeHue(cloud->points[i].rgba);
- depth_host_.points[i] = !valid ? 0 : static_cast<unsigned short>(cloud_host_.points[i].z * 1000); //m -> mm
+ hue_host_[i] = !valid ? qnan : device::computeHue((*cloud)[i].rgba);
+ depth_host_[i] = !valid ? 0 : static_cast<unsigned short>(cloud_host_[i].z * 1000); //m -> mm
}
cloud_device_.upload(cloud_host_.points, cloud_host_.width);
hue_device_.upload(hue_host_.points, hue_host_.width);
std::fill(flowermat_host_.points.begin(), flowermat_host_.points.end(), 0);
{
//ScopeTime time("shs");
- shs5(cloud_host_, seed, &flowermat_host_.points[0]);
+ shs5(cloud_host_, seed, &flowermat_host_[0]);
}
int cols = cloud_device_.cols();
pcl::device::Intr intr(fx_, fy_, cx_, cy_);
intr.setDefaultPPIfIncorrect(cloud.width, cloud.height);
- const float *hue = &hue_host_.points[0];
+ const float *hue = &hue_host_[0];
double squared_radius = CLUST_TOL_SHS * CLUST_TOL_SHS;
std::vector< std::vector<int> > storage(100);
while (sq_idx < (int)seed_queue.size ())
{
int index = seed_queue[sq_idx];
- const PointT& q = cloud.points[index];
+ const PointT& q = cloud[index];
if(!pcl::isFinite (q))
continue;
if (mask[idx])
continue;
- if (sqnorm(cloud.points[idx], q) <= squared_radius)
+ if (sqnorm(cloud[idx], q) <= squared_radius)
{
float h_l = hue[idx];
people_detector_.depth_device1_.download(depth_host_.points, c);
}
- depth_view_.showShortImage(&depth_host_.points[0], depth_host_.width, depth_host_.height, 0, 5000, true);
+ depth_view_.showShortImage(&depth_host_[0], depth_host_.width, depth_host_.height, 0, 5000, true);
depth_view_.spinOnce(1, true);
if (write_)
depth_host_.points.resize(w *h);
depth_host_.width = w;
depth_host_.height = h;
- std::copy(data, data + w * h, &depth_host_.points[0]);
+ std::copy(data, data + w * h, &depth_host_[0]);
//getting image
w = image_wrapper->getWidth();
for(std::size_t i = 0; i < rgba_host_.size(); ++i)
{
const unsigned char *pixel = &rgb_host_[i * 3];
- pcl::RGB& rgba = rgba_host_.points[i];
+ pcl::RGB& rgba = rgba_host_[i];
rgba.r = pixel[0];
rgba.g = pixel[1];
rgba.b = pixel[2];
}
- image_device_.upload(&rgba_host_.points[0], s, h, w);
+ image_device_.upload(&rgba_host_[0], s, h, w);
}
data_ready_cond_.notify_one();
}
using namespace pcl::visualization;
using namespace pcl::console;
using namespace pcl::gpu;
-using namespace std;
using PointT = pcl::PointXYZRGBA;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-string
+std::string
make_name(int counter, const char* suffix)
{
char buf[4096];
return buf;
}
-string
+std::string
make_ext_name(int counter1, int counter2, const char* suffix)
{
char buf[4096];
void
writeXMLFile(std::string& filename) const
{
- filebuf fb;
- fb.open (filename.c_str(), ios::out);
+ std::filebuf fb;
+ fb.open (filename.c_str(), std::ios::out);
ostream os(&fb);
people_detector_.person_attribs_->writePersonXMLConfig(os);
fb.close();
void
readXMLFile(std::string& filename) const
{
- filebuf fb;
- fb.open (filename.c_str(), ios::in);
+ std::filebuf fb;
+ fb.open (filename.c_str(), std::ios::in);
istream is(&fb);
people_detector_.person_attribs_->readPersonXMLConfig(is);
fb.close();
// loading trees
using pcl::gpu::people::RDFBodyPartsDetector;
- std::vector<string> names_vector(treeFilenames, treeFilenames + numTrees);
+ std::vector<std::string> names_vector(treeFilenames, treeFilenames + numTrees);
PCL_DEBUG("[Main] : (D) : Trees collected\n");
RDFBodyPartsDetector::Ptr rdf(new RDFBodyPartsDetector(names_vector));
PCL_DEBUG("[Main] : (D) : Loaded files into rdf\n");
// Create a bool vector of processed point indices, and initialize it to false
// cloud is a DeviceArray<PointType>
- std::vector<bool> processed (host_cloud_->points.size (), false);
+ std::vector<bool> processed (host_cloud_->size (), false);
int max_answers;
- if(max_pts_per_cluster > host_cloud_->points.size())
- max_answers = host_cloud_->points.size();
+ if(max_pts_per_cluster > host_cloud_->size())
+ max_answers = host_cloud_->size();
else
max_answers = max_pts_per_cluster;
std::cout << "Max_answers: " << max_answers << std::endl;
queries_device_buffer.create(max_answers);
// Process all points in the cloud
- for (std::size_t i = 0; i < host_cloud_->points.size (); ++i)
+ for (std::size_t i = 0; i < host_cloud_->size (); ++i)
{
// if we already processed this point continue with the next one
if (processed[i])
// Create the query queue on the host
pcl::PointCloud<pcl::PointXYZ>::VectorType queries_host;
// Push the starting point in the vector
- queries_host.push_back (host_cloud_->points[i]);
+ queries_host.push_back ((*host_cloud_)[i]);
// Clear vector
r.indices.clear();
// Push the starting point in
if(processed[data[i]])
continue;
processed[data[i]] = true;
- queries_host.push_back (host_cloud_->points[data[i]]);
+ queries_host.push_back ((*host_cloud_)[data[i]]);
found_points++;
r.indices.push_back(data[i]);
}
if(processed[data[qp_r + qp * max_answers]])
continue;
processed[data[qp_r + qp * max_answers]] = true;
- queries_host.push_back (host_cloud_->points[data[qp_r + qp * max_answers]]);
+ queries_host.push_back ((*host_cloud_)[data[qp_r + qp * max_answers]]);
found_points++;
r.indices.push_back(data[qp_r + qp * max_answers]);
}
tree_->build();
}
/*
- if(tree_->cloud_.size() != host_cloud.points.size ())
+ if(tree_->cloud_.size() != host_cloud.size ())
{
PCL_ERROR("[pcl::gpu::EuclideanClusterExtraction] size of host cloud and device cloud don't match!\n");
return;
// Create a bool vector of processed point indices, and initialize it to false
// cloud is a DeviceArray<PointType>
- std::vector<bool> processed (host_cloud_->points.size (), false);
+ std::vector<bool> processed (host_cloud_->size (), false);
int max_answers;
- if(max_pts_per_cluster > host_cloud_->points.size ())
- max_answers = static_cast<int> (host_cloud_->points.size ());
+ if(max_pts_per_cluster > host_cloud_->size ())
+ max_answers = static_cast<int> (host_cloud_->size ());
else
max_answers = max_pts_per_cluster;
pcl::PointIndices r;
// Process all points in the cloud
- for (std::size_t i = 0; i < host_cloud_->points.size (); ++i)
+ for (std::size_t i = 0; i < host_cloud_->size (); ++i)
{
// if we already processed this point continue with the next one
if (processed[i])
pcl::PointCloud<pcl::PointXYZ>::VectorType queries_host;
// Buffer in a new PointXYZ type
- PointT t = host_cloud_->points[i];
+ PointT t = (*host_cloud_)[i];
PointXYZ p;
p.x = t.x; p.y = t.y; p.z = t.z;
if(processed[data[qp_r + qp * max_answers]])
continue;
// Only add if label matches the original label
- if(host_cloud_->points[i].label == host_cloud_->points[data[qp_r + qp * max_answers]].label)
+ if((*host_cloud_)[i].label == (*host_cloud_)[data[qp_r + qp * max_answers]].label)
{
processed[data[qp_r + qp * max_answers]] = true;
- PointT t_l = host_cloud_->points[data[qp_r + qp * max_answers]];
+ PointT t_l = (*host_cloud_)[data[qp_r + qp * max_answers]];
PointXYZ p_l;
p_l.x = t_l.x; p_l.y = t_l.y; p_l.z = t_l.z;
queries_host.push_back (p_l);
tree_->build();
}
/*
- if(tree_->cloud_.size() != host_cloud.points.size ())
+ if(tree_->cloud_.size() != host_cloud.size ())
{
PCL_ERROR("[pcl::gpu::EuclideanClusterExtraction] size of host cloud and device cloud don't match!\n");
return;
// Create a bool vector of processed point indices, and initialize it to false
// cloud is a DeviceArray<PointType>
- std::vector<bool> processed (host_cloud_->points.size (), false);
+ std::vector<bool> processed (host_cloud_->size (), false);
- int max_answers = host_cloud_->points.size();
+ const auto max_answers = host_cloud_->size();
// Process all points in the indices vector
for (std::size_t k = 0; k < indices_in.indices.size (); ++k)
processed[i] = true;
PointXYZRGB p;
- p = host_cloud_->points[i];
+ p = (*host_cloud_)[i];
PointXYZHSV h;
PointXYZRGBtoXYZHSV(p, h);
// Create the query queue on the host
pcl::PointCloud<pcl::PointXYZ>::VectorType queries_host;
// Push the starting point in the vector
- queries_host.push_back (host_cloud_->points[i]);
+ queries_host.push_back ((*host_cloud_)[i]);
unsigned int found_points = queries_host.size ();
unsigned int previous_found_points = 0;
continue;
PointXYZRGB p_l;
- p_l = host_cloud_->points[data[qp_r + qp * max_answers]];
+ p_l = (*host_cloud_)[data[qp_r + qp * max_answers]];
PointXYZHSV h_l;
PointXYZRGBtoXYZHSV(p_l, h_l);
if (std::abs(h_l.h - h.h) < delta_hue)
{
processed[data[qp_r + qp * max_answers]] = true;
- queries_host.push_back (host_cloud_->points[data[qp_r + qp * max_answers]]);
+ queries_host.push_back ((*host_cloud_)[data[qp_r + qp * max_answers]]);
found_points++;
}
}
tree_->build();
}
/*
- if(tree_->cloud_.size() != host_cloud.points.size ())
+ if(tree_->cloud_.size() != host_cloud.size ())
{
PCL_ERROR("[pcl::gpu::SeededHueSegmentation] size of host cloud and device cloud don't match!\n");
return;
bool
pcl::device::FacetStream::canSplit() const
{
- return facet_count * 3 < verts_inds.cols();
+ return static_cast<Eigen::Index>(facet_count * 3) < verts_inds.cols();
}
struct pcl::gpu::PseudoConvexHull3D::Impl
#include <thrust/gather.h>
using namespace thrust;
-using namespace std;
namespace pcl
{
#include <pcl/common/time.h>
using namespace pcl::gpu;
-using namespace std;
-
int loadCloud(const std::string& file, pcl::PointCloud<pcl::PointXYZ>& cloud)
{
if (result != -1)
return result;
- string name = file.substr(0, file.find_last_of("."));
+ std::string name = file.substr(0, file.find_last_of("."));
result = pcl::io::loadPCDFile(name + ".pcd", cloud);
if (result != -1)
#ifndef PCL_GPU_DEVICE_EMULATION_HPP_
#define PCL_GPU_DEVICE_EMULATION_HPP_
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
#include <pcl/gpu/device/warp_reduce.hpp>
namespace pcl
{
static __forceinline__ __device__ int ballot(int predicate, volatile int* cta_buffer)
{
- (void)cta_buffer;
+ pcl::utils::ignore(cta_buffer);
return __ballot(predicate);
}
};
}
}
-#endif /* PCL_GPU_DEVICE_EMULATION_HPP_ */
\ No newline at end of file
+#endif /* PCL_GPU_DEVICE_EMULATION_HPP_ */
#ifndef __PCL_CUDA_SAFE_CALL_HPP__
#define __PCL_CUDA_SAFE_CALL_HPP__
-#include "cuda_runtime_api.h"
+#include <cuda_runtime_api.h>
#include <pcl/gpu/containers/initialization.h>
#if defined(__GNUC__)
namespace octree
{
-using namespace std;
-
/** \brief @b ColorCoding class
* \note This class encodes 8-bit color information for octree-based point cloud compression.
* \note
{
// get color information from points
const int& idx = indexVector_arg[i];
- const char* idxPointPtr = reinterpret_cast<const char*> (&inputCloud_arg->points[idx]);
+ const char* idxPointPtr = reinterpret_cast<const char*> (&(*inputCloud_arg)[idx]);
const int& colorInt = *reinterpret_cast<const int*> (idxPointPtr+rgba_offset_arg);
// add color information
{
// get color information from point
const int& idx = indexVector_arg[i];
- const char* idxPointPtr = reinterpret_cast<const char*> (&inputCloud_arg->points[idx]);
+ const char* idxPointPtr = reinterpret_cast<const char*> (&(*inputCloud_arg)[idx]);
const int& colorInt = *reinterpret_cast<const int*> (idxPointPtr+rgba_offset_arg);
// add color information
for (std::size_t i = 0; i < len; i++)
{
const int& idx = indexVector_arg[i];
- const char* idxPointPtr = reinterpret_cast<const char*> (&inputCloud_arg->points[idx]);
+ const char* idxPointPtr = reinterpret_cast<const char*> (&(*inputCloud_arg)[idx]);
const int& colorInt = *reinterpret_cast<const int*> (idxPointPtr+rgba_offset_arg);
// extract color components and do XOR encoding with predicted average color
colorInt = (avgRed << 0) | (avgGreen << 8) | (avgBlue << 16);
}
- char* idxPointPtr = reinterpret_cast<char*> (&outputCloud_arg->points[beginIdx_arg + i]);
+ char* idxPointPtr = reinterpret_cast<char*> (&(*outputCloud_arg)[beginIdx_arg + i]);
int& pointColor = *reinterpret_cast<int*> (idxPointPtr+rgba_offset_arg);
// assign color to point from point cloud
pointColor=colorInt;
// iterate over points
for (std::size_t i = 0; i < pointCount; i++)
{
- char* idxPointPtr = reinterpret_cast<char*> (&outputCloud_arg->points[beginIdx_arg + i]);
+ char* idxPointPtr = reinterpret_cast<char*> (&(*outputCloud_arg)[beginIdx_arg + i]);
int& pointColor = *reinterpret_cast<int*> (idxPointPtr+rgba_offset_arg);
// assign color to point from point cloud
pointColor = defaultColor_;
if (!do_voxel_grid_enDecoding_)
{
point_count_data_vector_.clear ();
- point_count_data_vector_.reserve (cloud_arg->points.size ());
+ point_count_data_vector_.reserve (cloud_arg->size ());
}
// initialize color encoding
color_coder_.initializeEncoding ();
- color_coder_.setPointCount (static_cast<unsigned int> (cloud_arg->points.size ()));
+ color_coder_.setPointCount (static_cast<unsigned int> (cloud_arg->size ()));
color_coder_.setVoxelCount (static_cast<unsigned int> (this->leaf_count_));
// initialize point encoding
point_coder_.initializeEncoding ();
- point_coder_.setPointCount (static_cast<unsigned int> (cloud_arg->points.size ()));
+ point_coder_.setPointCount (static_cast<unsigned int> (cloud_arg->size ()));
// serialize octree
if (i_frame_)
// assign point cloud properties
output_->height = 1;
- output_->width = static_cast<std::uint32_t> (cloud_arg->points.size ());
+ output_->width = cloud_arg->size ();
output_->is_dense = false;
if (b_show_statistics_)
if (!do_voxel_grid_enDecoding_)
{
// get current cloud size
- std::size_t cloudSize = output_->points.size ();
+ const auto cloudSize = output_->size ();
// get amount of point to be decoded
pointCount = *point_count_data_vector_iterator_;
{
if (data_with_color_)
// decode color information
- color_coder_.decodePoints (output_, output_->points.size () - pointCount,
- output_->points.size (), point_color_offset_);
+ color_coder_.decodePoints (output_, output_->size () - pointCount,
+ output_->size (), point_color_offset_);
else
// set default color information
- color_coder_.setDefaultColor (output_, output_->points.size () - pointCount,
- output_->points.size (), point_color_offset_);
+ color_coder_.setDefaultColor (output_, output_->size () - pointCount,
+ output_->size (), point_color_offset_);
}
}
}
// Ensure we have an organized point cloud
assert((width>1) && (height>1));
- assert(width*height == cloud_arg->points.size());
+ assert(width*height == cloud_arg->size());
float maxDepth = 0;
float focalLength = 0;
for (int y = -centerY; y < centerY; ++y )
for (int x = -centerX; x < centerX; ++x )
{
- const PointT& point = cloud_arg->points[it++];
+ const PointT& point = (*cloud_arg)[it++];
if (pcl::isFinite (point))
{
#pragma once
+#include <cstdint> // uint8_t, uint16_t
#include <vector>
-#include <pcl/common/common.h>
-#include <pcl/common/io.h>
+#include <pcl/pcl_exports.h> // for PCL_EXPORTS
namespace pcl
{
typename std::vector<std::uint16_t>& disparityData_arg,
typename std::vector<std::uint8_t>&)
{
- std::size_t cloud_size = cloud_arg.points.size ();
+ const auto cloud_size = cloud_arg.size ();
// Clear image data
disparityData_arg.clear ();
for (std::size_t i = 0; i < cloud_size; ++i)
{
// Get point from cloud
- const PointT& point = cloud_arg.points[i];
+ const PointT& point = cloud_arg[i];
if (pcl::isFinite (point))
{
if (pixel_depth)
{
- // Inverse depth decoding
- float depth = focalLength_arg / pixel_depth;
-
// Generate new points
- newPoint.x = static_cast<float> (x) * depth * fl_const;
- newPoint.y = static_cast<float> (y) * depth * fl_const;
- newPoint.z = depth;
+ newPoint.x = static_cast<float> (x) * pixel_depth * fl_const;
+ newPoint.y = static_cast<float> (y) * pixel_depth * fl_const;
+ newPoint.z = pixel_depth;
}
else
typename std::vector<std::uint16_t>& disparityData_arg,
typename std::vector<std::uint8_t>& rgbData_arg)
{
- std::size_t cloud_size = cloud_arg.points.size ();
+ const auto cloud_size = cloud_arg.size ();
// Reset output vectors
disparityData_arg.clear ();
for (std::size_t i = 0; i < cloud_size; ++i)
{
- const PointT& point = cloud_arg.points[i];
+ const PointT& point = cloud_arg[i];
if (pcl::isFinite (point))
{
const float& pixel_depth = depthData_arg[i];
- if (pixel_depth==pixel_depth)
+ if (pixel_depth)
{
- float depth = focalLength_arg / pixel_depth;
-
// Define point location
- newPoint.z = depth;
- newPoint.x = static_cast<float> (x) * depth * fl_const;
- newPoint.y = static_cast<float> (y) * depth * fl_const;
+ newPoint.z = pixel_depth;
+ newPoint.x = static_cast<float> (x) * pixel_depth * fl_const;
+ newPoint.y = static_cast<float> (y) * pixel_depth * fl_const;
if (hasColor)
{
#pragma once
+#include <algorithm>
#include <cstdio>
#include <cstring>
#include <iostream>
// retrieve point from cloud
const int& idx = indexVector_arg[i];
- const PointT& idxPoint = inputCloud_arg->points[idx];
+ const PointT& idxPoint = (*inputCloud_arg)[idx];
// differentially encode point coordinates and truncate overflow
- diffX = static_cast<unsigned char> (max (-127, min<int>(127, static_cast<int> ((idxPoint.x - referencePoint_arg[0]) / pointCompressionResolution_))));
- diffY = static_cast<unsigned char> (max (-127, min<int>(127, static_cast<int> ((idxPoint.y - referencePoint_arg[1]) / pointCompressionResolution_))));
- diffZ = static_cast<unsigned char> (max (-127, min<int>(127, static_cast<int> ((idxPoint.z - referencePoint_arg[2]) / pointCompressionResolution_))));
+ diffX = static_cast<unsigned char> (std::max (-127, std::min<int>(127, static_cast<int> ((idxPoint.x - referencePoint_arg[0]) / pointCompressionResolution_))));
+ diffY = static_cast<unsigned char> (std::max (-127, std::min<int>(127, static_cast<int> ((idxPoint.y - referencePoint_arg[1]) / pointCompressionResolution_))));
+ diffZ = static_cast<unsigned char> (std::max (-127, std::min<int>(127, static_cast<int> ((idxPoint.z - referencePoint_arg[2]) / pointCompressionResolution_))));
// store information in differential point vector
pointDiffDataVector_.push_back (diffX);
const unsigned char& diffZ = static_cast<unsigned char> (*(pointDiffDataVectorIterator_++));
// retrieve point from point cloud
- PointT& point = outputCloud_arg->points[beginIdx_arg + i];
+ PointT& point = (*outputCloud_arg)[beginIdx_arg + i];
// decode point position
point.x = static_cast<float> (referencePoint_arg[0] + diffX * pointCompressionResolution_);
#include <pcl/io/file_io.h>
#include <pcl/PCLPointField.h>
#include <pcl/common/io.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
namespace pcl
PCL_DEPRECATED(1, 12, "use parameterless setInputFields<PointT>() instead")
inline void setInputFields (const PointT p)
{
- (void) p;
+ pcl::utils::ignore(p);
setInputFields<PointT> ();
}
#pragma once
#include <pcl/pcl_macros.h>
-#include <pcl/common/io.h>
-#include <pcl/io/boost.h>
-#include <cmath>
-#include <sstream>
+#include <pcl/point_cloud.h> // for PointCloud
#include <pcl/PolygonMesh.h>
#include <pcl/TextureMesh.h>
#pragma once
-#include <pcl/pcl_macros.h>
-#include <pcl/common/io.h>
+#include <pcl/conversions.h> // for fromPCLPointCloud2, toPCLPointCloud2
+#include <pcl/point_cloud.h> // for PointCloud
+#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
#include <pcl/io/boost.h>
#include <cmath>
#include <sstream>
-#include <pcl/PolygonMesh.h>
-#include <pcl/TextureMesh.h>
+#include <Eigen/Geometry> // for Quaternionf
namespace pcl
{
#define PCL_LZF_IMAGE_IO_HPP_
#include <pcl/console/print.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/io/debayer.h>
#include <cstddef>
{
for (std::uint32_t u = 0; u < cloud.width; ++u, ++point_idx, depth_idx += 2)
{
- PointT &pt = cloud.points[point_idx];
+ PointT &pt = cloud[point_idx];
unsigned short val;
memcpy (&val, &uncompressed_data[depth_idx], sizeof (unsigned short));
if (val == 0)
shared(cloud, constant_x, constant_y, uncompressed_data) \
num_threads(num_threads)
#else
- (void) num_threads; // suppress warning if OMP is not present
+ pcl::utils::ignore(num_threads); // suppress warning if OMP is not present
#endif
for (int i = 0; i < static_cast< int> (cloud.size ()); ++i)
{
int u = i % cloud.width;
int v = i / cloud.width;
- PointT &pt = cloud.points[i];
+ PointT &pt = cloud[i];
int depth_idx = 2*i;
unsigned short val;
memcpy (&val, &uncompressed_data[depth_idx], sizeof (unsigned short));
for (std::size_t i = 0; i < cloud.size (); ++i, ++rgb_idx)
{
- PointT &pt = cloud.points[i];
+ PointT &pt = cloud[i];
pt.b = color_b[rgb_idx];
pt.g = color_g[rgb_idx];
shared(cloud, color_b, color_g, color_r) \
num_threads(num_threads)
#else
- (void) num_threads; // suppress warning if OMP is not present
+ pcl::utils::ignore(num_threads); // suppress warning if OMP is not present
#endif//_OPENMP
for (long int i = 0; i < cloud.size (); ++i)
{
- PointT &pt = cloud.points[i];
+ PointT &pt = cloud[i];
pt.b = color_b[i];
pt.g = color_g[i];
int v = color_v[i] - 128;
int u = color_u[i] - 128;
- PointT &pt1 = cloud.points[y_idx + 0];
+ PointT &pt1 = cloud[y_idx + 0];
pt1.r = CLIP_CHAR (color_y[y_idx + 0] + ((v * 18678 + 8192 ) >> 14));
pt1.g = CLIP_CHAR (color_y[y_idx + 0] + ((v * -9519 - u * 6472 + 8192) >> 14));
pt1.b = CLIP_CHAR (color_y[y_idx + 0] + ((u * 33292 + 8192 ) >> 14));
- PointT &pt2 = cloud.points[y_idx + 1];
+ PointT &pt2 = cloud[y_idx + 1];
pt2.r = CLIP_CHAR (color_y[y_idx + 1] + ((v * 18678 + 8192 ) >> 14));
pt2.g = CLIP_CHAR (color_y[y_idx + 1] + ((v * -9519 - u * 6472 + 8192) >> 14));
pt2.b = CLIP_CHAR (color_y[y_idx + 1] + ((u * 33292 + 8192 ) >> 14));
shared(cloud, color_u, color_v, color_y, wh2) \
num_threads(num_threads)
#else
- (void) num_threads; //suppress warning if OMP is not present
+ pcl::utils::ignore(num_threads); //suppress warning if OMP is not present
#endif//_OPENMP
for (int i = 0; i < wh2; ++i)
{
int v = color_v[i] - 128;
int u = color_u[i] - 128;
- PointT &pt1 = cloud.points[y_idx + 0];
+ PointT &pt1 = cloud[y_idx + 0];
pt1.r = CLIP_CHAR (color_y[y_idx + 0] + ((v * 18678 + 8192 ) >> 14));
pt1.g = CLIP_CHAR (color_y[y_idx + 0] + ((v * -9519 - u * 6472 + 8192) >> 14));
pt1.b = CLIP_CHAR (color_y[y_idx + 0] + ((u * 33292 + 8192 ) >> 14));
- PointT &pt2 = cloud.points[y_idx + 1];
+ PointT &pt2 = cloud[y_idx + 1];
pt2.r = CLIP_CHAR (color_y[y_idx + 1] + ((v * 18678 + 8192 ) >> 14));
pt2.g = CLIP_CHAR (color_y[y_idx + 1] + ((v * -9519 - u * 6472 + 8192) >> 14));
pt2.b = CLIP_CHAR (color_y[y_idx + 1] + ((u * 33292 + 8192 ) >> 14));
int rgb_idx = 0;
for (std::size_t i = 0; i < cloud.size (); ++i, rgb_idx += 3)
{
- PointT &pt = cloud.points[i];
+ PointT &pt = cloud[i];
pt.b = rgb_buffer[rgb_idx + 2];
pt.g = rgb_buffer[rgb_idx + 1];
default(none) \
num_threads(num_threads)
#else
- (void) num_threads; //suppress warning if OMP is not present
+ pcl::utils::ignore(num_threads); //suppress warning if OMP is not present
#endif//_OPENMP
for (long int i = 0; i < cloud.size (); ++i)
{
- PointT &pt = cloud.points[i];
+ PointT &pt = cloud[i];
long int rgb_idx = 3*i;
pt.b = rgb_buffer[rgb_idx + 2];
pt.g = rgb_buffer[rgb_idx + 1];
#include <fcntl.h>
#include <string>
#include <cstdlib>
+#include <pcl/common/io.h> // for getFields, ...
#include <pcl/console/print.h>
#include <pcl/io/boost.h>
#include <pcl/io/low_level_io.h>
if (nr_points != std::numeric_limits<int>::max ())
oss << "POINTS " << nr_points << "\n";
else
- oss << "POINTS " << cloud.points.size () << "\n";
+ oss << "POINTS " << cloud.size () << "\n";
return (oss.str ());
}
}
fields.resize (nri);
- data_size = cloud.points.size () * fsize;
+ data_size = cloud.size () * fsize;
// Prepare the map
#ifdef _WIN32
// Copy the data
char *out = &map[0] + data_idx;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (const auto& point: cloud)
{
int nrj = 0;
for (const auto &field : fields)
{
- memcpy (out, reinterpret_cast<const char*> (&cloud.points[i]) + field.offset, fields_sizes[nrj]);
+ memcpy (out, reinterpret_cast<const char*> (&point) + field.offset, fields_sizes[nrj]);
out += fields_sizes[nrj++];
}
}
fields.resize (nri);
// Compute the size of data
- data_size = cloud.points.size () * fsize;
+ data_size = cloud.size () * fsize;
// If the data is to large the two 32 bit integers used to store the
// compressed and uncompressed size will overflow.
for (std::size_t i = 0; i < pters.size (); ++i)
{
pters[i] = &only_valid_data[toff];
- toff += static_cast<std::size_t>(fields_sizes[i]) * cloud.points.size();
+ toff += static_cast<std::size_t>(fields_sizes[i]) * cloud.size();
}
// Go over all the points, and copy the data in the appropriate places
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (const auto& point: cloud)
{
for (std::size_t j = 0; j < fields.size (); ++j)
{
- memcpy (pters[j], reinterpret_cast<const char*> (&cloud.points[i]) + fields[j].offset, fields_sizes[j]);
+ memcpy (pters[j], reinterpret_cast<const char*> (&point) + fields[j].offset, fields_sizes[j]);
// Increment the pointer
pters[j] += fields_sizes[j];
}
return (-1);
}
- if (cloud.width * cloud.height != cloud.points.size ())
+ if (cloud.width * cloud.height != cloud.size ())
{
throw pcl::IOException ("[pcl::PCDWriter::writeASCII] Number of points different than width * height!");
return (-1);
stream.precision (precision);
stream.imbue (std::locale::classic ());
// Iterate through the points
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (const auto& point: cloud)
{
for (std::size_t d = 0; d < fields.size (); ++d)
{
case pcl::PCLPointField::INT8:
{
std::int8_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (std::int8_t), sizeof (std::int8_t));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (std::int8_t), sizeof (std::int8_t));
stream << boost::numeric_cast<std::int32_t>(value);
break;
}
case pcl::PCLPointField::UINT8:
{
std::uint8_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (std::uint8_t), sizeof (std::uint8_t));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (std::uint8_t), sizeof (std::uint8_t));
stream << boost::numeric_cast<std::uint32_t>(value);
break;
}
case pcl::PCLPointField::INT16:
{
std::int16_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (std::int16_t), sizeof (std::int16_t));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (std::int16_t), sizeof (std::int16_t));
stream << boost::numeric_cast<std::int16_t>(value);
break;
}
case pcl::PCLPointField::UINT16:
{
std::uint16_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (std::uint16_t), sizeof (std::uint16_t));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (std::uint16_t), sizeof (std::uint16_t));
stream << boost::numeric_cast<std::uint16_t>(value);
break;
}
case pcl::PCLPointField::INT32:
{
std::int32_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (std::int32_t), sizeof (std::int32_t));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (std::int32_t), sizeof (std::int32_t));
stream << boost::numeric_cast<std::int32_t>(value);
break;
}
case pcl::PCLPointField::UINT32:
{
std::uint32_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (std::uint32_t), sizeof (std::uint32_t));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (std::uint32_t), sizeof (std::uint32_t));
stream << boost::numeric_cast<std::uint32_t>(value);
break;
}
if ("rgb" == fields[d].name)
{
std::uint32_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (float), sizeof (float));
stream << boost::numeric_cast<std::uint32_t>(value);
break;
}
float value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (float), sizeof (float));
if (std::isnan (value))
stream << "nan";
else
case pcl::PCLPointField::FLOAT64:
{
double value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[i]) + fields[d].offset + c * sizeof (double), sizeof (double));
+ memcpy (&value, reinterpret_cast<const char*> (&point) + fields[d].offset + c * sizeof (double), sizeof (double));
if (std::isnan (value))
stream << "nan";
else
int nrj = 0;
for (const auto &field : fields)
{
- memcpy (out, reinterpret_cast<const char*> (&cloud.points[index]) + field.offset, fields_sizes[nrj]);
+ memcpy (out, reinterpret_cast<const char*> (&cloud[index]) + field.offset, fields_sizes[nrj]);
out += fields_sizes[nrj++];
}
}
return (-1);
}
- if (cloud.width * cloud.height != cloud.points.size ())
+ if (cloud.width * cloud.height != cloud.size ())
{
throw pcl::IOException ("[pcl::PCDWriter::writeASCII] Number of points different than width * height!");
return (-1);
case pcl::PCLPointField::INT8:
{
std::int8_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (std::int8_t), sizeof (std::int8_t));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (std::int8_t), sizeof (std::int8_t));
stream << boost::numeric_cast<std::int32_t>(value);
break;
}
case pcl::PCLPointField::UINT8:
{
std::uint8_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (std::uint8_t), sizeof (std::uint8_t));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (std::uint8_t), sizeof (std::uint8_t));
stream << boost::numeric_cast<std::uint32_t>(value);
break;
}
case pcl::PCLPointField::INT16:
{
std::int16_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (std::int16_t), sizeof (std::int16_t));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (std::int16_t), sizeof (std::int16_t));
stream << boost::numeric_cast<std::int16_t>(value);
break;
}
case pcl::PCLPointField::UINT16:
{
std::uint16_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (std::uint16_t), sizeof (std::uint16_t));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (std::uint16_t), sizeof (std::uint16_t));
stream << boost::numeric_cast<std::uint16_t>(value);
break;
}
case pcl::PCLPointField::INT32:
{
std::int32_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (std::int32_t), sizeof (std::int32_t));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (std::int32_t), sizeof (std::int32_t));
stream << boost::numeric_cast<std::int32_t>(value);
break;
}
case pcl::PCLPointField::UINT32:
{
std::uint32_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (std::uint32_t), sizeof (std::uint32_t));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (std::uint32_t), sizeof (std::uint32_t));
stream << boost::numeric_cast<std::uint32_t>(value);
break;
}
if ("rgb" == fields[d].name)
{
std::uint32_t value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (float), sizeof (float));
stream << boost::numeric_cast<std::uint32_t>(value);
}
else
{
float value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (float), sizeof (float));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (float), sizeof (float));
if (std::isnan (value))
stream << "nan";
else
case pcl::PCLPointField::FLOAT64:
{
double value;
- memcpy (&value, reinterpret_cast<const char*> (&cloud.points[index]) + fields[d].offset + c * sizeof (double), sizeof (double));
+ memcpy (&value, reinterpret_cast<const char*> (&cloud[index]) + fields[d].offset + c * sizeof (double), sizeof (double));
if (std::isnan (value))
stream << "nan";
else
template <typename PointT> bool
pcl::io::PointCloudImageExtractor<PointT>::extract (const PointCloud& cloud, pcl::PCLImage& img) const
{
- if (!cloud.isOrganized () || cloud.points.size () != cloud.width * cloud.height)
+ if (!cloud.isOrganized () || cloud.size () != cloud.width * cloud.height)
return (false);
bool result = this->extractImpl (cloud, img);
if (paint_nans_with_black_ && result)
{
std::size_t size = img.encoding == "mono16" ? 2 : 3;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
if (!pcl::isFinite (cloud[i]))
std::memset (&img.data[i * size], 0, size);
}
img.step = img.width * sizeof (unsigned char) * 3;
img.data.resize (img.step * img.height);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
float x;
float y;
float z;
- pcl::getFieldValue<PointT, float> (cloud.points[i], offset_x, x);
- pcl::getFieldValue<PointT, float> (cloud.points[i], offset_y, y);
- pcl::getFieldValue<PointT, float> (cloud.points[i], offset_z, z);
+ pcl::getFieldValue<PointT, float> (cloud[i], offset_x, x);
+ pcl::getFieldValue<PointT, float> (cloud[i], offset_y, y);
+ pcl::getFieldValue<PointT, float> (cloud[i], offset_z, z);
img.data[i * 3 + 0] = static_cast<unsigned char>((x + 1.0) * 127);
img.data[i * 3 + 1] = static_cast<unsigned char>((y + 1.0) * 127);
img.data[i * 3 + 2] = static_cast<unsigned char>((z + 1.0) * 127);
img.step = img.width * sizeof (unsigned char) * 3;
img.data.resize (img.step * img.height);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
std::uint32_t val;
- pcl::getFieldValue<PointT, std::uint32_t> (cloud.points[i], offset, val);
+ pcl::getFieldValue<PointT, std::uint32_t> (cloud[i], offset, val);
img.data[i * 3 + 0] = (val >> 16) & 0x0000ff;
img.data[i * 3 + 1] = (val >> 8) & 0x0000ff;
img.data[i * 3 + 2] = (val) & 0x0000ff;
img.step = img.width * sizeof (unsigned short);
img.data.resize (img.step * img.height);
unsigned short* data = reinterpret_cast<unsigned short*>(&img.data[0]);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
std::uint32_t val;
- pcl::getFieldValue<PointT, std::uint32_t> (cloud.points[i], offset, val);
+ pcl::getFieldValue<PointT, std::uint32_t> (cloud[i], offset, val);
data[i] = static_cast<unsigned short>(val);
}
break;
std::srand(std::time(nullptr));
std::map<std::uint32_t, std::size_t> colormap;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
std::uint32_t val;
- pcl::getFieldValue<PointT, std::uint32_t> (cloud.points[i], offset, val);
+ pcl::getFieldValue<PointT, std::uint32_t> (cloud[i], offset, val);
if (colormap.count (val) == 0)
{
colormap[val] = i * 3;
std::map<std::uint32_t, std::size_t> colormap;
// First pass: find unique labels
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (const auto& point: cloud)
{
// If we need to paint NaN points with black do not waste colors on them
- if (paint_nans_with_black_ && !pcl::isFinite (cloud.points[i]))
+ if (paint_nans_with_black_ && !pcl::isFinite (point))
continue;
std::uint32_t val;
- pcl::getFieldValue<PointT, std::uint32_t> (cloud.points[i], offset, val);
+ pcl::getFieldValue<PointT, std::uint32_t> (point, offset, val);
labels.insert (val);
}
}
// Second pass: copy colors from the LUT
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
std::uint32_t val;
- pcl::getFieldValue<PointT, std::uint32_t> (cloud.points[i], offset, val);
+ pcl::getFieldValue<PointT, std::uint32_t> (cloud[i], offset, val);
memcpy (&img.data[i * 3], GlasbeyLUT::data () + colormap[val] * 3, 3);
}
{
float min = std::numeric_limits<float>::infinity();
float max = -std::numeric_limits<float>::infinity();
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (const auto& point: cloud)
{
float val;
- pcl::getFieldValue<PointT, float> (cloud.points[i], offset, val);
+ pcl::getFieldValue<PointT, float> (point, offset, val);
if (val < min)
min = val;
if (val > max)
data_min = min;
}
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
float val;
- pcl::getFieldValue<PointT, float> (cloud.points[i], offset, val);
+ pcl::getFieldValue<PointT, float> (cloud[i], offset, val);
if (scaling_method_ == SCALING_NO)
{
data[i] = val;
// Set the coordinates of the pcl::PointCloud (if the pcl::PointCloud supports coordinates)
if (x_idx != -1 && y_idx != -1 && z_idx != -1)
{
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
double coordinate[3];
polydata->GetPoint (i, coordinate);
- pcl::setFieldValue<PointT, float> (cloud.points[i], x_idx, coordinate[0]);
- pcl::setFieldValue<PointT, float> (cloud.points[i], y_idx, coordinate[1]);
- pcl::setFieldValue<PointT, float> (cloud.points[i], z_idx, coordinate[2]);
+ pcl::setFieldValue<PointT, float> (cloud[i], x_idx, coordinate[0]);
+ pcl::setFieldValue<PointT, float> (cloud[i], y_idx, coordinate[1]);
+ pcl::setFieldValue<PointT, float> (cloud[i], z_idx, coordinate[2]);
}
}
vtkFloatArray* normals = vtkFloatArray::SafeDownCast (polydata->GetPointData ()->GetNormals ());
if (normal_x_idx != -1 && normal_y_idx != -1 && normal_z_idx != -1 && normals)
{
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
float normal[3];
normals->GetTupleValue (i, normal);
- pcl::setFieldValue<PointT, float> (cloud.points[i], normal_x_idx, normal[0]);
- pcl::setFieldValue<PointT, float> (cloud.points[i], normal_y_idx, normal[1]);
- pcl::setFieldValue<PointT, float> (cloud.points[i], normal_z_idx, normal[2]);
+ pcl::setFieldValue<PointT, float> (cloud[i], normal_x_idx, normal[0]);
+ pcl::setFieldValue<PointT, float> (cloud[i], normal_y_idx, normal[1]);
+ pcl::setFieldValue<PointT, float> (cloud[i], normal_z_idx, normal[2]);
}
}
if (rgb_idx != -1 && colors)
{
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
unsigned char color[3];
colors->GetTupleValue (i, color);
pcl::RGB rgb;
rgb.r = color[0]; rgb.g = color[1]; rgb.b = color[2];
- pcl::setFieldValue<PointT, std::uint32_t> (cloud.points[i], rgb_idx, rgb.rgba);
+ pcl::setFieldValue<PointT, std::uint32_t> (cloud[i], rgb_idx, rgb.rgba);
}
}
}
pcl::for_each_type<typename pcl::traits::fieldList<PointT>::type>(pcl::detail::FieldAdder<PointT>(fields));
// Coordinates (always must have coordinates)
- vtkIdType nr_points = cloud.points.size ();
+ vtkIdType nr_points = cloud.size ();
vtkSmartPointer<vtkPoints> points = vtkSmartPointer<vtkPoints>::New ();
points->SetNumberOfPoints (nr_points);
// Get a pointer to the beginning of the data array
#pragma once
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/TextureMesh.h>
-#include <pcl/PolygonMesh.h>
#include <pcl/io/file_io.h>
namespace pcl
{
+ struct PolygonMesh;
+
class PCL_EXPORTS MTLReader
{
public:
{
// Fill r/g/b data, assuming that the order is BGRA
pcl::RGB rgb;
- memcpy (&rgb, reinterpret_cast<const char*> (&cloud->points[k]) + rgba_index, sizeof (RGB));
+ memcpy (&rgb, reinterpret_cast<const char*> (&(*cloud)[k]) + rgba_index, sizeof (RGB));
image_map[k].nRed = static_cast<XnUInt8> (rgb.r);
image_map[k].nGreen = static_cast<XnUInt8> (rgb.g);
image_map[k].nBlue = static_cast<XnUInt8> (rgb.b);
#include <pcl/io/ply/io_operators.h>
#include <pcl/pcl_macros.h>
-#include <fstream>
-#include <iostream>
#include <istream>
#include <memory>
-#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
#include <pcl/console/print.h>
#include <string>
-#include <vector>
#include <pcl/io/point_cloud_image_extractors.h>
namespace pcl
#include <pcl/io/hdl_grabber.h>
#include <pcl/io/grabber.h>
#include <pcl/point_types.h>
-#include <pcl/point_cloud.h>
#include <boost/asio.hpp>
#include <string>
*
*/
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/io/ascii_io.h>
#include <istream>
#include <fstream>
Eigen::Quaternionf& orientation, int& file_version, int& data_type,
unsigned int& data_idx, const int offset)
{
- (void)offset; //offset is not used for ascii file implementation
+ pcl::utils::ignore(offset); //offset is not used for ascii file implementation
boost::filesystem::path fpath = file_name;
*/
#include <pcl/io/auto_io.h>
-#include <pcl/io/boost.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/io/ifs_io.h>
int col = static_cast<int> (uv.u * COLOR_WIDTH);
int pixel = row * COLOR_WIDTH + col;
if (pixel >=0 && pixel < COLOR_WIDTH * COLOR_HEIGHT)
- memcpy (&xyzrgba_cloud->points[i].rgba, &color_data_[pixel * 3], 3);
+ memcpy (&(*xyzrgba_cloud)[i].rgba, &color_data_[pixel * 3], 3);
}
point_cloud_rgba_signal_->operator () (xyzrgba_cloud);
point.depth = (*depth_buffer_)[i];
if (std::isnan (point.depth))
{
- cloud.points[i].x = nan;
- cloud.points[i].y = nan;
- cloud.points[i].z = nan;
+ cloud[i].x = nan;
+ cloud[i].y = nan;
+ cloud[i].z = nan;
}
else
{
projection_->get3DCoordinates (&point, &vertex, 1);
- cloud.points[i].x = vertex.x;
- cloud.points[i].y = vertex.y;
- cloud.points[i].z = vertex.z;
+ cloud[i].x = vertex.x;
+ cloud[i].y = vertex.y;
+ cloud[i].z = vertex.z;
}
point.point.x += 1;
++i;
#include <pcl/pcl_config.h>
#include <pcl/io/dinast_grabber.h>
-using namespace std;
-
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pcl::DinastGrabber::DinastGrabber (const int device_position)
: image_width_ (320)
double s_theta = sin (theta_colati);
double c_ksai = static_cast<double> (x - 160) / r1;
double s_ksai = static_cast<double> (y - 120) / r1;
- cloud->points[depth_idx].x = static_cast<float> ((dist * s_theta * c_ksai) / 500.0 + 0.5);
- cloud->points[depth_idx].y = static_cast<float> ((dist * s_theta * s_ksai) / 500.0 + 0.5);
- cloud->points[depth_idx].z = static_cast<float> (dist * c_theta);
- if (cloud->points[depth_idx].z < 0.01f)
- cloud->points[depth_idx].z = 0.01f;
- cloud->points[depth_idx].z /= 500.0f;
- cloud->points[depth_idx].intensity = static_cast<float> (pixel);
+ (*cloud)[depth_idx].x = static_cast<float> ((dist * s_theta * c_ksai) / 500.0 + 0.5);
+ (*cloud)[depth_idx].y = static_cast<float> ((dist * s_theta * s_ksai) / 500.0 + 0.5);
+ (*cloud)[depth_idx].z = static_cast<float> (dist * c_theta);
+ if ((*cloud)[depth_idx].z < 0.01f)
+ (*cloud)[depth_idx].z = 0.01f;
+ (*cloud)[depth_idx].z /= 500.0f;
+ (*cloud)[depth_idx].intensity = static_cast<float> (pixel);
// Get rid of the noise
- if(cloud->points[depth_idx].z > 0.8f || cloud->points[depth_idx].z < 0.02f)
+ if((*cloud)[depth_idx].z > 0.8f || (*cloud)[depth_idx].z < 0.02f)
{
- cloud->points[depth_idx].x = std::numeric_limits<float>::quiet_NaN ();
- cloud->points[depth_idx].y = std::numeric_limits<float>::quiet_NaN ();
- cloud->points[depth_idx].z = std::numeric_limits<float>::quiet_NaN ();
- cloud->points[depth_idx].intensity = static_cast<float> (pixel);
+ (*cloud)[depth_idx].x = std::numeric_limits<float>::quiet_NaN ();
+ (*cloud)[depth_idx].y = std::numeric_limits<float>::quiet_NaN ();
+ (*cloud)[depth_idx].z = std::numeric_limits<float>::quiet_NaN ();
+ (*cloud)[depth_idx].intensity = static_cast<float> (pixel);
}
}
}
// Copy data in point cloud (and convert millimeters in meters)
for (std::size_t i = 0; i < pointMap.size (); i += 3)
{
- cloud.points[i / 3].x = pointMap[i] / 1000.0;
- cloud.points[i / 3].y = pointMap[i + 1] / 1000.0;
- cloud.points[i / 3].z = pointMap[i + 2] / 1000.0;
+ cloud[i / 3].x = pointMap[i] / 1000.0;
+ cloud[i / 3].y = pointMap[i + 1] / 1000.0;
+ cloud[i / 3].z = pointMap[i + 2] / 1000.0;
}
return (true);
// Copy data in point cloud (and convert millimeters in meters)
for (std::size_t i = 0; i < pointMap.size (); i += 3)
{
- cloud->points[i / 3].x = pointMap[i] / 1000.0;
- cloud->points[i / 3].y = pointMap[i + 1] / 1000.0;
- cloud->points[i / 3].z = pointMap[i + 2] / 1000.0;
+ (*cloud)[i / 3].x = pointMap[i] / 1000.0;
+ (*cloud)[i / 3].y = pointMap[i + 1] / 1000.0;
+ (*cloud)[i / 3].z = pointMap[i + 2] / 1000.0;
}
}
#include <pcl/io/lzf_image_io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/memory.h>
-#include <pcl/pcl_config.h>
-#include <pcl/pcl_macros.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
return (true);
#else
- PCL_ERROR ("[pcl::ImageGrabber::loadNextCloudVTK] Attempted to read image files, but PCL was not built with VTK [no -DPCL_BUILT_WITH_VTK]. \n");
- return (false);
+ pcl::utils::ignore(idx, blob, origin, orientation);
+ PCL_ERROR ("[pcl::ImageGrabber::loadNextCloudVTK] Attempted to read image files, but PCL was not built with VTK [no -DPCL_BUILT_WITH_VTK]. \n");
+ return false;
#endif //PCL_BUILT_WITH_VTK
-
}
bool
#include <vector>
#include <cstdlib>
-#include <cstdio>
#include <cassert>
+#include <cstring> // for memcpy
+#include <iterator> // for back_inserter
// user defined I/O callback methods for libPNG
// no bit pattern traps. Since the only platform that is both non-POSIX
// and fails to support both assumptions is windows 64 bit, we make a
// special workaround for it.
-#if defined (WIN32) && defined (_M_X64) && defined (_MSC_VER)
+#if defined (_WIN32) && defined (_M_X64) && defined (_MSC_VER)
// workaround for missing POSIX compliance
unsigned _int64 off;
#else
*/
#include <pcl/io/obj_io.h>
#include <fstream>
-#include <iostream>
#include <pcl/common/io.h>
#include <pcl/io/boost.h>
#include <pcl/console/time.h>
{
for (int u = -centerX; u < centerX; ++u, ++depth_idx)
{
- pcl::PointXYZ& pt = cloud->points[depth_idx];
+ pcl::PointXYZ& pt = (*cloud)[depth_idx];
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
depth_map[depth_idx] == depth_image->getNoSampleValue () ||
{
for (int u = -centerX; u < centerX; ++u, color_idx += 3, ++depth_idx)
{
- pcl::PointXYZRGB& pt = cloud->points[depth_idx];
+ pcl::PointXYZRGB& pt = (*cloud)[depth_idx];
/// @todo Different values for these cases
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
{
for (int u = -centerX; u < centerX; ++u, color_idx += 3, ++depth_idx)
{
- pcl::PointXYZRGBA& pt = cloud->points[depth_idx];
+ pcl::PointXYZRGBA& pt = (*cloud)[depth_idx];
/// @todo Different values for these cases
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
{
for (int u = -centerX; u < centerX; ++u, ++depth_idx)
{
- pcl::PointXYZI& pt = cloud->points[depth_idx];
+ pcl::PointXYZI& pt = (*cloud)[depth_idx];
/// @todo Different values for these cases
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
{
for (unsigned u = 0; u < depth_width_; ++u, ++depth_idx)
{
- pcl::PointXYZ& pt = cloud->points[depth_idx];
+ pcl::PointXYZ& pt = (*cloud)[depth_idx];
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
depth_map[depth_idx] == depth_image->getNoSampleValue () ||
pt.x = pt.y = pt.z = bad_point;
pt.b = pt.g = pt.r = 0;
pt.a = 255; // point has no color info -> alpha = max => transparent
- cloud->points.assign (cloud->points.size (), pt);
+ cloud->points.assign (cloud->size (), pt);
}
// fill in XYZ values
{
for (unsigned u = 0; u < depth_width_; ++u, ++value_idx, point_idx += step)
{
- PointT& pt = cloud->points[point_idx];
+ PointT& pt = (*cloud)[point_idx];
/// @todo Different values for these cases
// Check for invalid measurements
{
for (unsigned xIdx = 0; xIdx < image_width_; ++xIdx, point_idx += step, value_idx += 3)
{
- PointT& pt = cloud->points[point_idx];
+ PointT& pt = (*cloud)[point_idx];
color.Red = rgb_buffer[value_idx];
color.Green = rgb_buffer[value_idx + 1];
{
for (unsigned u = 0; u < depth_width_; ++u, ++depth_idx)
{
- pcl::PointXYZI& pt = cloud->points[depth_idx];
+ pcl::PointXYZI& pt = (*cloud)[depth_idx];
/// @todo Different values for these cases
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
#ifdef HAVE_OPENNI
#include <pcl/io/openni_camera/openni_depth_image.h>
-#include <sstream>
#include <limits>
-#include <iostream>
-
-using namespace std;
namespace openni_wrapper
{
#include <pcl/io/openni_camera/openni_depth_image.h>
#include <pcl/io/openni_camera/openni_ir_image.h>
#include <pcl/io/openni_camera/openni_image.h>
-#include <iostream>
-#include <limits>
-#include <sstream>
#include <map>
#include <vector>
#include "XnVersion.h"
openni_wrapper::OpenNIDevice::Ptr
openni_wrapper::OpenNIDriver::getDeviceByIndex (unsigned index) const
{
- using namespace std;
-
if (index >= device_context_.size ())
THROW_OPENNI_EXCEPTION ("Device index out of range. Only %d devices connected but device %d requested.", device_context_.size (), index);
auto device = device_context_[index].device.lock ();
#ifdef HAVE_OPENNI
#include <pcl/io/openni_camera/openni_image_bayer_grbg.h>
-#include <sstream>
-#include <iostream>
#include <pcl/io/debayer.h>
#define AVG(a,b) static_cast<unsigned char>((int(a) + int(b)) >> 1)
#define AVG3(a,b,c) static_cast<unsigned char>((int(a) + int(b) + int(c)) / 3)
#define AVG4(a,b,c,d) static_cast<unsigned char>((int(a) + int(b) + int(c) + int(d)) >> 2)
#define WAVG4(a,b,c,d,x,y) static_cast<unsigned char>( ( (int(a) + int(b)) * int(x) + (int(c) + int(d)) * int(y) ) / ( (int(x) + (int(y))) << 1 ) )
-using namespace std;
//////////////////////////////////////////////////////////////////////////////
openni_wrapper::ImageBayerGRBG::ImageBayerGRBG (pcl::shared_ptr<xn::ImageMetaData> image_meta_data, DebayeringMethod method) noexcept
#define CLIP_CHAR(c) static_cast<unsigned char> ((c)>255?255:(c)<0?0:(c))
-using namespace std;
namespace openni_wrapper
{
#ifdef HAVE_OPENNI
#include <pcl/io/openni_camera/openni_ir_image.h>
-#include <sstream>
-#include <limits>
-#include <iostream>
-
-using namespace std;
namespace openni_wrapper
{
{
for (unsigned int u = 0; u < depth_width_; ++u, ++depth_idx)
{
- pcl::PointXYZ& pt = cloud->points[depth_idx];
+ pcl::PointXYZ& pt = (*cloud)[depth_idx];
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
depth_map[depth_idx] == depth_image->getNoSampleValue () ||
pt.x = pt.y = pt.z = bad_point;
pt.b = pt.g = pt.r = 0;
pt.a = 0; // point has no color info -> alpha = min => transparent
- cloud->points.assign (cloud->points.size (), pt);
+ cloud->points.assign (cloud->size (), pt);
}
// fill in XYZ values
{
for (unsigned int u = 0; u < depth_width_; ++u, ++value_idx, point_idx += step)
{
- PointT& pt = cloud->points[point_idx];
+ PointT& pt = (*cloud)[point_idx];
/// @todo Different values for these cases
// Check for invalid measurements
{
for (unsigned xIdx = 0; xIdx < image_width_; ++xIdx, point_idx += step, value_idx += 3)
{
- PointT& pt = cloud->points[point_idx];
+ PointT& pt = (*cloud)[point_idx];
pt.r = rgb_buffer[value_idx];
pt.g = rgb_buffer[value_idx + 1];
{
for (unsigned int u = 0; u < depth_width_; ++u, ++depth_idx)
{
- pcl::PointXYZI& pt = cloud->points[depth_idx];
+ pcl::PointXYZI& pt = (*cloud)[depth_idx];
/// @todo Different values for these cases
// Check for invalid measurements
if (depth_map[depth_idx] == 0 ||
#include <pcl/io/pcd_io.h>
#include <pcl/io/tar.h>
#include <pcl/memory.h>
-#include <pcl/pcl_config.h>
-#include <pcl/pcl_macros.h>
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
///////////////////////////////////////////////////////////////////////////////////////////
//////////////////////// GrabberImplementation //////////////////////
#include <string>
#include <cstdlib>
#include <pcl/io/boost.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/common/io.h>
#include <pcl/io/low_level_io.h>
#include <pcl/io/lzf.h>
#include <cstring>
#include <cerrno>
-#include <boost/version.hpp>
-
///////////////////////////////////////////////////////////////////////////////////////////
void
pcl::PCDWriter::setLockingPermissions (const std::string &file_name,
boost::interprocess::file_lock &lock)
{
- (void)file_name;
- (void)lock;
-#ifndef WIN32
+ pcl::utils::ignore(file_name, lock);
+#ifndef _WIN32
#ifndef NO_MANDATORY_LOCKING
// Attempt to lock the file.
// For mandatory locking, the filesystem must be mounted with the "mand" option in Linux (see http://www.hackinglinuxexposed.com/articles/20030623.html)
pcl::PCDWriter::resetLockingPermissions (const std::string &file_name,
boost::interprocess::file_lock &lock)
{
- (void)file_name;
- (void)lock;
-#ifndef WIN32
+ pcl::utils::ignore(file_name, lock);
+#ifndef _WIN32
#ifndef NO_MANDATORY_LOCKING
- (void)file_name;
namespace fs = boost::filesystem;
try
{
#include <pcl/io/ply/ply_parser.h>
+#include <fstream> // for ifstream
+#include <sstream> // for istringstream
+
bool pcl::io::ply::ply_parser::parse (const std::string& filename)
{
std::ifstream istream (filename.c_str (), std::ios::in | std::ios::binary);
// ascii
if (format == ascii_format)
{
- for (const auto element_ptr: elements)
+ for (const auto &element_ptr: elements)
{
auto& element = *(element_ptr.get ());
for (std::size_t element_index = 0; element_index < element.count; ++element_index)
stringstream.unsetf (std::ios_base::skipws);
stringstream >> std::ws;
- for (const auto property_ptr: element.properties)
+ for (const auto &property_ptr: element.properties)
{
auto& property = *(property_ptr.get ());
if (!property.parse (*this, format, stringstream))
istream.open (filename.c_str (), std::ios::in | std::ios::binary);
istream.seekg (data_start);
- for (const auto element_ptr: elements)
+ for (const auto &element_ptr: elements)
{
auto& element = *(element_ptr.get ());
for (std::size_t element_index = 0; element_index < element.count; ++element_index)
{
if (element.begin_element_callback)
element.begin_element_callback ();
- for (const auto property_ptr: element.properties)
+ for (const auto &property_ptr: element.properties)
{
auto& property = *(property_ptr.get ());
if (!property.parse (*this, format, istream))
*
*/
-#include <fcntl.h>
#include <pcl/point_types.h>
#include <pcl/common/io.h>
#include <pcl/io/ply_io.h>
#include <cstdlib>
#include <fstream>
#include <functional>
-#include <map>
-#include <sstream>
#include <string>
#include <tuple>
void
pcl::io::savePNGFile (const std::string& file_name, const pcl::PointCloud<unsigned char>& cloud)
{
- saveCharPNGFile(file_name, &cloud.points[0], cloud.width, cloud.height, 1);
+ saveCharPNGFile(file_name, &cloud[0], cloud.width, cloud.height, 1);
}
void
pcl::io::savePNGFile (const std::string& file_name, const pcl::PointCloud<unsigned short>& cloud)
{
- saveShortPNGFile(file_name, &cloud.points[0], cloud.width, cloud.height, 1);
+ saveShortPNGFile(file_name, &cloud[0], cloud.width, cloud.height, 1);
}
void
cloud->width = sp.width ();
cloud->height = sp.height ();
cloud->is_dense = false;
- cloud->points.resize ( points.size () );
+ cloud->points.resize ( size () );
const auto cloud_vertices_ptr = points.get_vertices ();
const auto cloud_texture_ptr = points.get_texture_coordinates ();
#pragma omp parallel for \
default(none) \
shared(cloud, cloud_vertices_ptr, mapColorFunc)
- for (int index = 0; index < cloud->points.size (); ++index)
+ for (int index = 0; index < cloud->size (); ++index)
{
const auto ptr = cloud_vertices_ptr + index;
const auto uvptr = cloud_texture_ptr + index;
- auto& p = cloud->points[index];
+ auto& p = (*cloud)[index];
p.x = ptr->x;
p.y = ptr->y;
xyz_cloud->header.stamp = timestamp;
xyz_cloud->is_dense = false;
for (int i = 0; i < SIZE; i++)
- convertPoint (vertices[i], xyz_cloud->points[i]);
+ convertPoint (vertices[i], (*xyz_cloud)[i]);
}
if (need_xyzrgba_)
pcl::copyPointCloud (*xyz_cloud, *xyzrgba_cloud);
for (int i = 0; i < HEIGHT; i++)
{
- pcl::PointXYZRGBA* cloud_row = &xyzrgba_cloud->points[i * WIDTH];
+ pcl::PointXYZRGBA* cloud_row = &(*xyzrgba_cloud)[i * WIDTH];
std::uint32_t* color_row = &d[i * data.pitches[0] / sizeof (std::uint32_t)];
for (int j = 0; j < WIDTH; j++)
memcpy (&cloud_row[j].rgba, &color_row[j], sizeof (std::uint32_t));
for (int i = 0; i < HEIGHT; i++)
{
PXCPoint3DF32* vertices_row = &vertices[i * WIDTH];
- pcl::PointXYZRGBA* cloud_row = &xyzrgba_cloud->points[i * WIDTH];
+ pcl::PointXYZRGBA* cloud_row = &(*xyzrgba_cloud)[i * WIDTH];
std::uint32_t* color_row = &d[i * data.pitches[0] / sizeof (std::uint32_t)];
for (int j = 0; j < WIDTH; j++)
{
#include <pcl/point_types.h>
#include <pcl/io/vtk_io.h>
#include <fstream>
-#include <iostream>
#include <pcl/common/io.h>
//////////////////////////////////////////////////////////////////////////////////////////////
// First get the xyz information
pcl::PointCloud<pcl::PointXYZ>::Ptr xyz_cloud (new pcl::PointCloud<pcl::PointXYZ> ());
xyz_cloud->points.resize (nr_points);
- xyz_cloud->width = static_cast<std::uint32_t> (xyz_cloud->points.size ());
+ xyz_cloud->width = xyz_cloud->size ();
xyz_cloud->height = 1;
xyz_cloud->is_dense = true;
double point_xyz[3];
for (vtkIdType i = 0; i < mesh_points->GetNumberOfPoints (); i++)
{
mesh_points->GetPoint (i, &point_xyz[0]);
- xyz_cloud->points[i].x = static_cast<float> (point_xyz[0]);
- xyz_cloud->points[i].y = static_cast<float> (point_xyz[1]);
- xyz_cloud->points[i].z = static_cast<float> (point_xyz[2]);
+ (*xyz_cloud)[i].x = static_cast<float> (point_xyz[0]);
+ (*xyz_cloud)[i].y = static_cast<float> (point_xyz[1]);
+ (*xyz_cloud)[i].z = static_cast<float> (point_xyz[2]);
}
// And put it in the mesh cloud
pcl::toPCLPointCloud2 (*xyz_cloud, mesh.cloud);
{
pcl::PointCloud<pcl::RGB>::Ptr rgb_cloud (new pcl::PointCloud<pcl::RGB> ());
rgb_cloud->points.resize (nr_points);
- rgb_cloud->width = static_cast<std::uint32_t> (rgb_cloud->points.size ());
+ rgb_cloud->width = rgb_cloud->size ();
rgb_cloud->height = 1;
rgb_cloud->is_dense = true;
{
pcl::PointCloud<pcl::Normal>::Ptr normal_cloud (new pcl::PointCloud<pcl::Normal> ());
normal_cloud->resize (nr_points);
- normal_cloud->width = static_cast<std::uint32_t> (xyz_cloud->points.size ());
+ normal_cloud->width = xyz_cloud->size ();
normal_cloud->height = 1;
normal_cloud->is_dense = true;
{
float normal[3];
normals->GetTupleValue (i, normal);
- normal_cloud->points[i].normal_x = normal[0];
- normal_cloud->points[i].normal_y = normal[1];
- normal_cloud->points[i].normal_z = normal[2];
+ (*normal_cloud)[i].normal_x = normal[0];
+ (*normal_cloud)[i].normal_y = normal[1];
+ (*normal_cloud)[i].normal_z = normal[2];
}
pcl::PCLPointCloud2 normal_cloud2;
#include <iostream>
#include <pcl/common/io.h>
#include <pcl/io/pcd_io.h>
-
-using namespace std;
+#include <string>
/* ---[ */
int
pcl::PCLPointCloud2 cloud;
Eigen::Vector4f origin; Eigen::Quaternionf orientation;
- if (pcl::io::loadPCDFile (string (argv[1]), cloud, origin, orientation) < 0)
+ if (pcl::io::loadPCDFile (std::string (argv[1]), cloud, origin, orientation) < 0)
{
std::cerr << "Unable to load " << argv[1] << std::endl;
return (-1);
if (type == 0)
{
std::cerr << "Saving file " << argv[2] << " as ASCII." << std::endl;
- w.writeASCII (string (argv[2]), cloud, origin, orientation, (argc == 5) ? atoi (argv[4]) : 7);
+ w.writeASCII (std::string (argv[2]), cloud, origin, orientation, (argc == 5) ? atoi (argv[4]) : 7);
}
else if (type == 1)
{
std::cerr << "Saving file " << argv[2] << " as binary." << std::endl;
- w.writeBinary (string (argv[2]), cloud, origin, orientation);
+ w.writeBinary (std::string (argv[2]), cloud, origin, orientation);
}
else if (type == 2)
{
std::cerr << "Saving file " << argv[2] << " as binary_compressed." << std::endl;
- w.writeBinaryCompressed (string (argv[2]), cloud, origin, orientation);
+ w.writeBinaryCompressed (std::string (argv[2]), cloud, origin, orientation);
}
}
/* ]--- */
#include <memory>
#include <thread>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace pcl::console;
void
writeToDisk (const typename PointCloud<PointT>::ConstPtr& cloud)
{
- stringstream ss;
+ std::stringstream ss;
std::string time = boost::posix_time::to_iso_string (boost::posix_time::microsec_clock::local_time ());
ss << "frame-" << time << ".pcd";
writer_.writeBinaryCompressed (ss.str (), *cloud);
*
*/
-#include <pcl/common/common.h>
#include <pcl/io/pcd_io.h>
/** @brief PCL point object */
*
*/
-#include <pcl/io/boost.h>
#include <pcl/io/ply/ply_parser.h>
#include <cstdlib>
*
*/
-#include <pcl/io/boost.h>
#include <pcl/io/ply/ply_parser.h>
#include <cstdlib>
*
*/
-#include <pcl/io/boost.h>
#include <pcl/io/ply/ply_parser.h>
#include <cstdlib>
std::vector<int> nn_idx (1);
std::vector<float> nn_dists (1);
- indices.resize (cloud_in->points.size ());
- for (std::size_t i = 0; i < cloud_in->points.size (); ++i)
+ indices.resize (cloud_in->size ());
+ for (std::size_t i = 0; i < cloud_in->size (); ++i)
{
tree.nearestKSearchT ((*cloud_in)[i], 1, nn_idx, nn_dists);
indices[i] = nn_idx[0];
std::vector<int> nn_idx (1);
std::vector<float> nn_dists (1);
- indices.resize (cloud_in->points.size ());
- for (std::size_t i = 0; i < cloud_in->points.size (); ++i)
+ indices.resize (cloud_in->size ());
+ for (std::size_t i = 0; i < cloud_in->size (); ++i)
{
tree.nearestKSearch (*cloud_in, i, 1, nn_idx, nn_dists);
indices[i] = nn_idx[0];
#ifndef PCL_KDTREE_KDTREE_IMPL_FLANN_H_
#define PCL_KDTREE_KDTREE_IMPL_FLANN_H_
-#include <cstdio>
-
#include <flann/flann.hpp>
#include <pcl/kdtree/kdtree_flann.h>
return;
}
- int original_no_of_points = static_cast<int> (cloud.points.size ());
+ const auto original_no_of_points = cloud.size ();
cloud_.reset (new float[original_no_of_points * dim_], std::default_delete<float[]> ());
float* cloud_ptr = cloud_.get ();
index_mapping_.reserve (original_no_of_points);
identity_mapping_ = true;
- for (int cloud_index = 0; cloud_index < original_no_of_points; ++cloud_index)
+ for (std::size_t cloud_index = 0; cloud_index < original_no_of_points; ++cloud_index)
{
// Check if the point is invalid
- if (!point_representation_->isValid (cloud.points[cloud_index]))
+ if (!point_representation_->isValid (cloud[cloud_index]))
{
identity_mapping_ = false;
continue;
index_mapping_.push_back (cloud_index);
- point_representation_->vectorize (cloud.points[cloud_index], cloud_ptr);
+ point_representation_->vectorize (cloud[cloud_index], cloud_ptr);
cloud_ptr += dim_;
}
}
for (const int &index : indices)
{
// Check if the point is invalid
- if (!point_representation_->isValid (cloud.points[index]))
+ if (!point_representation_->isValid (cloud[index]))
continue;
// map from 0 - N -> indices [0] - indices [N]
index_mapping_.push_back (index); // If the returned index should be for the indices vector
- point_representation_->vectorize (cloud.points[index], cloud_ptr);
+ point_representation_->vectorize (cloud[index], cloud_ptr);
cloud_ptr += dim_;
}
}
/** \brief Search for k-nearest neighbors for the given query point.
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] cloud the point cloud data
* \param[in] index a \a valid index in \a cloud representing a \a valid (i.e., finite) query point
nearestKSearch (const PointCloud &cloud, int index, int k,
std::vector<int> &k_indices, std::vector<float> &k_sqr_distances) const
{
- assert (index >= 0 && index < static_cast<int> (cloud.points.size ()) && "Out-of-bounds error in nearestKSearch!");
- return (nearestKSearch (cloud.points[index], k, k_indices, k_sqr_distances));
+ assert (index >= 0 && index < static_cast<int> (cloud.size ()) && "Out-of-bounds error in nearestKSearch!");
+ return (nearestKSearch (cloud[index], k, k_indices, k_sqr_distances));
}
/** \brief Search for k-nearest neighbors for the given query point.
/** \brief Search for k-nearest neighbors for the given query point (zero-copy).
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] index a \a valid index representing a \a valid query point in the dataset given
* by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
{
if (indices_ == nullptr)
{
- assert (index >= 0 && index < static_cast<int> (input_->points.size ()) && "Out-of-bounds error in nearestKSearch!");
- return (nearestKSearch (input_->points[index], k, k_indices, k_sqr_distances));
+ assert (index >= 0 && index < static_cast<int> (input_->size ()) && "Out-of-bounds error in nearestKSearch!");
+ return (nearestKSearch ((*input_)[index], k, k_indices, k_sqr_distances));
}
assert (index >= 0 && index < static_cast<int> (indices_->size ()) && "Out-of-bounds error in nearestKSearch!");
- return (nearestKSearch (input_->points[(*indices_)[index]], k, k_indices, k_sqr_distances));
+
+ return (nearestKSearch ((*input_)[(*indices_)[index]], k, k_indices, k_sqr_distances));
}
/** \brief Search for all the nearest neighbors of the query point in a given radius.
/** \brief Search for all the nearest neighbors of the query point in a given radius.
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] cloud the point cloud data
* \param[in] index a \a valid index in \a cloud representing a \a valid (i.e., finite) query point
std::vector<int> &k_indices, std::vector<float> &k_sqr_distances,
unsigned int max_nn = 0) const
{
- assert (index >= 0 && index < static_cast<int> (cloud.points.size ()) && "Out-of-bounds error in radiusSearch!");
- return (radiusSearch(cloud.points[index], radius, k_indices, k_sqr_distances, max_nn));
+ assert (index >= 0 && index < static_cast<int> (cloud.size ()) && "Out-of-bounds error in radiusSearch!");
+ return (radiusSearch(cloud[index], radius, k_indices, k_sqr_distances, max_nn));
}
/** \brief Search for all the nearest neighbors of the query point in a given radius.
/** \brief Search for all the nearest neighbors of the query point in a given radius (zero-copy).
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] index a \a valid index representing a \a valid query point in the dataset given
* by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
{
if (indices_ == nullptr)
{
- assert (index >= 0 && index < static_cast<int> (input_->points.size ()) && "Out-of-bounds error in radiusSearch!");
- return (radiusSearch (input_->points[index], radius, k_indices, k_sqr_distances, max_nn));
+ assert (index >= 0 && index < static_cast<int> (input_->size ()) && "Out-of-bounds error in radiusSearch!");
+ return (radiusSearch ((*input_)[index], radius, k_indices, k_sqr_distances, max_nn));
}
assert (index >= 0 && index < static_cast<int> (indices_->size ()) && "Out-of-bounds error in radiusSearch!");
- return (radiusSearch (input_->points[(*indices_)[index]], radius, k_indices, k_sqr_distances, max_nn));
+ return (radiusSearch ((*input_)[(*indices_)[index]], radius, k_indices, k_sqr_distances, max_nn));
}
/** \brief Set the search epsilon precision (error bound) for nearest neighbors searches.
/** \brief Search for k-nearest neighbors for the given query point.
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] point a given \a valid (i.e., finite) query point
* \param[in] k the number of neighbors to search for
/** \brief Search for all the nearest neighbors of the query point in a given radius.
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] point a given \a valid (i.e., finite) query point
* \param[in] radius the radius of the sphere bounding all of p_q's neighbors
pcl::copyPointCloud (output_temp, output);
// we do not change the denseness
- output.width = int (output.points.size ());
+ output.width = output.size ();
output.height = 1;
output.is_dense = false; // set to false to be sure
#ifndef PCL_HARRIS_KEYPOINT_2D_IMPL_H_
#define PCL_HARRIS_KEYPOINT_2D_IMPL_H_
+#include <pcl/common/point_tests.h>
namespace pcl
{
else
{
std::sort (indices_->begin (), indices_->end (), [this] (int p1, int p2) { return greaterIntensityAtIndices (p1, p2); });
- const float threshold = threshold_ * response_->points[indices_->front ()].intensity;
+ const float threshold = threshold_ * (*response_)[indices_->front ()].intensity;
output.clear ();
output.reserve (response_->size());
std::vector<bool> occupency_map (response_->size (), false);
{
int idx = indices_->at (i);
const PointOutT& point_out = response_->points [idx];
- if (occupency_map[idx] || point_out.intensity < threshold || !isFinite (point_out))
+ if (occupency_map[idx] || point_out.intensity < threshold || !isXYZFinite (point_out))
continue;
#pragma omp critical
// refineCorners (output);
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.size());
+ output.width = output.size();
}
// we don not change the denseness
#pragma omp parallel for \
default(none) \
shared(output) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#else
#pragma omp parallel for \
default(none) \
shared(output, output_size) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#endif
for (int index = 0; index < output_size; ++index)
out_point.x = in_point.x;
out_point.y = in_point.y;
out_point.z = in_point.z;
- if (isFinite (in_point))
+ if (isXYZFinite (in_point))
{
computeSecondMomentMatrix (index, covar);
float trace = covar [0] + covar [2];
#pragma omp parallel for \
default(none) \
shared(output) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#else
#pragma omp parallel for \
default(none) \
shared(output, output_size) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#endif
for (int index = 0; index < output_size; ++index)
out_point.y = in_point.y;
out_point.z = in_point.z;
out_point.intensity = 0;
- if (isFinite (in_point))
+ if (isXYZFinite (in_point))
{
computeSecondMomentMatrix (index, covar);
float trace = covar [0] + covar [2];
#pragma omp parallel for \
default(none) \
shared(output) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#else
#pragma omp parallel for \
default(none) \
shared(output, output_size) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#endif
for (int index = 0; index < output_size; ++index)
out_point.y = in_point.y;
out_point.z = in_point.z;
out_point.intensity = 0;
- if (isFinite (in_point))
+ if (isXYZFinite (in_point))
{
computeSecondMomentMatrix (index, covar);
float trace = covar [0] + covar [2];
#pragma omp parallel for \
default(none) \
shared(output) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#else
#pragma omp parallel for \
default(none) \
shared(output, output_size) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
#endif
for (int index = 0; index < output_size; ++index)
out_point.y = in_point.y;
out_point.z = in_point.z;
out_point.intensity = 0;
- if (isFinite (in_point))
+ if (isXYZFinite (in_point))
{
computeSecondMomentMatrix (index, covar);
// min egenvalue
for (const int &neighbor : neighbors)
{
- if (std::isfinite (normals_->points[neighbor].normal_x))
+ if (std::isfinite ((*normals_)[neighbor].normal_x))
{
// nx, ny, nz, h
- norm1 = _mm_load_ps (&(normals_->points[neighbor].normal_x));
+ norm1 = _mm_load_ps (&((*normals_)[neighbor].normal_x));
// nx, nx, nx, nx
- norm2 = _mm_set1_ps (normals_->points[neighbor].normal_x);
+ norm2 = _mm_set1_ps ((*normals_)[neighbor].normal_x);
// nx * nx, nx * ny, nx * nz, nx * h
norm2 = _mm_mul_ps (norm1, norm2);
vec1 = _mm_add_ps (vec1, norm2);
// ny, ny, ny, ny
- norm2 = _mm_set1_ps (normals_->points[neighbor].normal_y);
+ norm2 = _mm_set1_ps ((*normals_)[neighbor].normal_y);
// ny * nx, ny * ny, ny * nz, ny * h
norm2 = _mm_mul_ps (norm1, norm2);
// accumulate
vec2 = _mm_add_ps (vec2, norm2);
- zz += normals_->points[neighbor].normal_z * normals_->points[neighbor].normal_z;
+ zz += (*normals_)[neighbor].normal_z * (*normals_)[neighbor].normal_z;
++count;
}
}
memset (coefficients, 0, sizeof (float) * 8);
for (std::vector<int>::const_iterator iIt = neighbors.begin(); iIt != neighbors.end(); ++iIt)
{
- if (std::isfinite (normals_->points[*iIt].normal_x))
+ if (std::isfinite ((*normals_)[*iIt].normal_x))
{
- coefficients[0] += normals_->points[*iIt].normal_x * normals_->points[*iIt].normal_x;
- coefficients[1] += normals_->points[*iIt].normal_x * normals_->points[*iIt].normal_y;
- coefficients[2] += normals_->points[*iIt].normal_x * normals_->points[*iIt].normal_z;
+ coefficients[0] += (*normals_)[*iIt].normal_x * (*normals_)[*iIt].normal_x;
+ coefficients[1] += (*normals_)[*iIt].normal_x * (*normals_)[*iIt].normal_y;
+ coefficients[2] += (*normals_)[*iIt].normal_x * (*normals_)[*iIt].normal_z;
- coefficients[5] += normals_->points[*iIt].normal_y * normals_->points[*iIt].normal_y;
- coefficients[6] += normals_->points[*iIt].normal_y * normals_->points[*iIt].normal_z;
- coefficients[7] += normals_->points[*iIt].normal_z * normals_->points[*iIt].normal_z;
+ coefficients[5] += (*normals_)[*iIt].normal_y * (*normals_)[*iIt].normal_y;
+ coefficients[6] += (*normals_)[*iIt].normal_y * (*normals_)[*iIt].normal_z;
+ coefficients[7] += (*normals_)[*iIt].normal_z * (*normals_)[*iIt].normal_z;
++count;
}
{
typename pcl::PointCloud<PointOutT>::Ptr response (new pcl::PointCloud<PointOutT>);
- response->points.reserve (input_->points.size());
+ response->points.reserve (input_->size());
switch (method_)
{
else
{
output.points.clear ();
- output.points.reserve (response->points.size());
+ output.points.reserve (response->size());
#pragma omp parallel for \
default(none) \
shared(output, response) \
num_threads(threads_)
- for (int idx = 0; idx < static_cast<int> (response->points.size ()); ++idx)
+ for (int idx = 0; idx < static_cast<int> (response->size ()); ++idx)
{
- if (!isFinite (response->points[idx]) ||
- !std::isfinite (response->points[idx].intensity) ||
- response->points[idx].intensity < threshold_)
+ if (!isFinite ((*response)[idx]) ||
+ !std::isfinite ((*response)[idx].intensity) ||
+ (*response)[idx].intensity < threshold_)
continue;
std::vector<int> nn_indices;
bool is_maxima = true;
for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
{
- if (response->points[idx].intensity < response->points[*iIt].intensity)
+ if ((*response)[idx].intensity < (*response)[*iIt].intensity)
{
is_maxima = false;
break;
if (is_maxima)
#pragma omp critical
{
- output.points.push_back (response->points[idx]);
+ output.points.push_back ((*response)[idx]);
keypoints_indices_->indices.push_back (idx);
}
}
refineCorners (output);
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size());
+ output.width = output.size();
output.is_dense = true;
}
}
#pragma omp parallel for \
default(none) \
shared(output) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
for (int pIdx = 0; pIdx < static_cast<int> (input_->size ()); ++pIdx)
{
#pragma omp parallel \
for default(none) \
shared(output) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
for (int pIdx = 0; pIdx < static_cast<int> (input_->size ()); ++pIdx)
{
#pragma omp parallel for \
default(none) \
shared(output) \
- private(covar) \
+ firstprivate(covar) \
num_threads(threads_)
for (int pIdx = 0; pIdx < static_cast<int> (input_->size ()); ++pIdx)
{
pcl::HarrisKeypoint3D<PointInT, PointOutT, NormalT>::responseCurvature (PointCloudOut &output) const
{
PointOutT point;
- for (std::size_t idx = 0; idx < input_->points.size(); ++idx)
+ for (std::size_t idx = 0; idx < input_->size(); ++idx)
{
- point.x = input_->points[idx].x;
- point.y = input_->points[idx].y;
- point.z = input_->points[idx].z;
- point.intensity = normals_->points[idx].curvature;
+ point.x = (*input_)[idx].x;
+ point.y = (*input_)[idx].y;
+ point.z = (*input_)[idx].z;
+ point.intensity = (*normals_)[idx].curvature;
output.points.push_back(point);
}
// does not change the order
#pragma omp parallel for \
default(none) \
shared(output) \
- private(covar, covariance_matrix) \
+ firstprivate(covar, covariance_matrix) \
num_threads(threads_)
for (int pIdx = 0; pIdx < static_cast<int> (input_->size ()); ++pIdx)
{
#pragma omp parallel for \
default(none) \
shared(corners) \
- private(nnT, NNT, NNTInv, NNTp, diff) \
+ firstprivate(nnT, NNT, NNTInv, NNTp, diff) \
num_threads(threads_)
for (int cIdx = 0; cIdx < static_cast<int> (corners.size ()); ++cIdx)
{
tree_->radiusSearch (corner, search_radius_, nn_indices, nn_dists);
for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
{
- if (!std::isfinite (normals_->points[*iIt].normal_x))
+ if (!std::isfinite ((*normals_)[*iIt].normal_x))
continue;
- nnT = normals_->points[*iIt].getNormalVector3fMap () * normals_->points[*iIt].getNormalVector3fMap ().transpose();
+ nnT = (*normals_)[*iIt].getNormalVector3fMap () * (*normals_)[*iIt].getNormalVector3fMap ().transpose();
NNT += nnT;
- NNTp += nnT * surface_->points[*iIt].getVector3fMap ();
+ NNTp += nnT * (*surface_)[*iIt].getVector3fMap ();
}
if (invert3x3SymMatrix (NNT, NNTInv) != 0)
corners[cIdx].getVector3fMap () = NNTInv * NNTp;
#include <pcl/keypoints/harris_6d.h>
#include <pcl/common/io.h>
-#include <pcl/filters/passthrough.h>
-#include <pcl/filters/extract_indices.h>
#include <pcl/features/normal_3d.h>
//#include <pcl/features/fast_intensity_gradient.h>
#include <pcl/features/intensity_gradient.h>
unsigned count = 0;
for (const int &neighbor : neighbors)
{
- if (std::isfinite (normals_->points[neighbor].normal_x) && std::isfinite (intensity_gradients_->points[neighbor].gradient [0]))
+ if (std::isfinite ((*normals_)[neighbor].normal_x) && std::isfinite ((*intensity_gradients_)[neighbor].gradient [0]))
{
- coefficients[ 0] += normals_->points[neighbor].normal_x * normals_->points[neighbor].normal_x;
- coefficients[ 1] += normals_->points[neighbor].normal_x * normals_->points[neighbor].normal_y;
- coefficients[ 2] += normals_->points[neighbor].normal_x * normals_->points[neighbor].normal_z;
- coefficients[ 3] += normals_->points[neighbor].normal_x * intensity_gradients_->points[neighbor].gradient [0];
- coefficients[ 4] += normals_->points[neighbor].normal_x * intensity_gradients_->points[neighbor].gradient [1];
- coefficients[ 5] += normals_->points[neighbor].normal_x * intensity_gradients_->points[neighbor].gradient [2];
+ coefficients[ 0] += (*normals_)[neighbor].normal_x * (*normals_)[neighbor].normal_x;
+ coefficients[ 1] += (*normals_)[neighbor].normal_x * (*normals_)[neighbor].normal_y;
+ coefficients[ 2] += (*normals_)[neighbor].normal_x * (*normals_)[neighbor].normal_z;
+ coefficients[ 3] += (*normals_)[neighbor].normal_x * (*intensity_gradients_)[neighbor].gradient [0];
+ coefficients[ 4] += (*normals_)[neighbor].normal_x * (*intensity_gradients_)[neighbor].gradient [1];
+ coefficients[ 5] += (*normals_)[neighbor].normal_x * (*intensity_gradients_)[neighbor].gradient [2];
- coefficients[ 6] += normals_->points[neighbor].normal_y * normals_->points[neighbor].normal_y;
- coefficients[ 7] += normals_->points[neighbor].normal_y * normals_->points[neighbor].normal_z;
- coefficients[ 8] += normals_->points[neighbor].normal_y * intensity_gradients_->points[neighbor].gradient [0];
- coefficients[ 9] += normals_->points[neighbor].normal_y * intensity_gradients_->points[neighbor].gradient [1];
- coefficients[10] += normals_->points[neighbor].normal_y * intensity_gradients_->points[neighbor].gradient [2];
+ coefficients[ 6] += (*normals_)[neighbor].normal_y * (*normals_)[neighbor].normal_y;
+ coefficients[ 7] += (*normals_)[neighbor].normal_y * (*normals_)[neighbor].normal_z;
+ coefficients[ 8] += (*normals_)[neighbor].normal_y * (*intensity_gradients_)[neighbor].gradient [0];
+ coefficients[ 9] += (*normals_)[neighbor].normal_y * (*intensity_gradients_)[neighbor].gradient [1];
+ coefficients[10] += (*normals_)[neighbor].normal_y * (*intensity_gradients_)[neighbor].gradient [2];
- coefficients[11] += normals_->points[neighbor].normal_z * normals_->points[neighbor].normal_z;
- coefficients[12] += normals_->points[neighbor].normal_z * intensity_gradients_->points[neighbor].gradient [0];
- coefficients[13] += normals_->points[neighbor].normal_z * intensity_gradients_->points[neighbor].gradient [1];
- coefficients[14] += normals_->points[neighbor].normal_z * intensity_gradients_->points[neighbor].gradient [2];
+ coefficients[11] += (*normals_)[neighbor].normal_z * (*normals_)[neighbor].normal_z;
+ coefficients[12] += (*normals_)[neighbor].normal_z * (*intensity_gradients_)[neighbor].gradient [0];
+ coefficients[13] += (*normals_)[neighbor].normal_z * (*intensity_gradients_)[neighbor].gradient [1];
+ coefficients[14] += (*normals_)[neighbor].normal_z * (*intensity_gradients_)[neighbor].gradient [2];
- coefficients[15] += intensity_gradients_->points[neighbor].gradient [0] * intensity_gradients_->points[neighbor].gradient [0];
- coefficients[16] += intensity_gradients_->points[neighbor].gradient [0] * intensity_gradients_->points[neighbor].gradient [1];
- coefficients[17] += intensity_gradients_->points[neighbor].gradient [0] * intensity_gradients_->points[neighbor].gradient [2];
+ coefficients[15] += (*intensity_gradients_)[neighbor].gradient [0] * (*intensity_gradients_)[neighbor].gradient [0];
+ coefficients[16] += (*intensity_gradients_)[neighbor].gradient [0] * (*intensity_gradients_)[neighbor].gradient [1];
+ coefficients[17] += (*intensity_gradients_)[neighbor].gradient [0] * (*intensity_gradients_)[neighbor].gradient [2];
- coefficients[18] += intensity_gradients_->points[neighbor].gradient [1] * intensity_gradients_->points[neighbor].gradient [1];
- coefficients[19] += intensity_gradients_->points[neighbor].gradient [1] * intensity_gradients_->points[neighbor].gradient [2];
+ coefficients[18] += (*intensity_gradients_)[neighbor].gradient [1] * (*intensity_gradients_)[neighbor].gradient [1];
+ coefficients[19] += (*intensity_gradients_)[neighbor].gradient [1] * (*intensity_gradients_)[neighbor].gradient [2];
- coefficients[20] += intensity_gradients_->points[neighbor].gradient [2] * intensity_gradients_->points[neighbor].gradient [2];
+ coefficients[20] += (*intensity_gradients_)[neighbor].gradient [2] * (*intensity_gradients_)[neighbor].gradient [2];
++count;
}
}
typename pcl::PointCloud<PointOutT>::Ptr response (new pcl::PointCloud<PointOutT>);
- response->points.reserve (input_->points.size());
+ response->points.reserve (input_->size());
responseTomasi(*response);
// just return the response
else
{
output.points.clear ();
- output.points.reserve (response->points.size());
+ output.points.reserve (response->size());
#pragma omp parallel for \
default(none) \
num_threads(threads_)
- for (std::size_t idx = 0; idx < response->points.size (); ++idx)
+ for (std::size_t idx = 0; idx < response->size (); ++idx)
{
- if (!isFinite (response->points[idx]) || response->points[idx].intensity < threshold_)
+ if (!isFinite ((*response)[idx]) || (*response)[idx].intensity < threshold_)
continue;
std::vector<int> nn_indices;
bool is_maxima = true;
for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
{
- if (response->points[idx].intensity < response->points[*iIt].intensity)
+ if ((*response)[idx].intensity < (*response)[*iIt].intensity)
{
is_maxima = false;
break;
if (is_maxima)
#pragma omp critical
{
- output.points.push_back (response->points[idx]);
+ output.points.push_back ((*response)[idx]);
keypoints_indices_->indices.push_back (idx);
}
}
refineCorners (output);
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size());
+ output.width = output.size();
output.is_dense = true;
}
}
#pragma omp parallel for \
default(none) \
- private(pointOut, covar, covariance, solver) \
+ firstprivate(pointOut, covar, covariance, solver) \
num_threads(threads_)
for (unsigned pIdx = 0; pIdx < input_->size (); ++pIdx)
{
search.radiusSearch (corner, search_radius_, nn_indices, nn_dists);
for (std::vector<int>::const_iterator iIt = nn_indices.begin(); iIt != nn_indices.end(); ++iIt)
{
- normal = reinterpret_cast<const Eigen::Vector3f*> (&(normals_->points[*iIt].normal_x));
- point = reinterpret_cast<const Eigen::Vector3f*> (&(surface_->points[*iIt].x));
+ normal = reinterpret_cast<const Eigen::Vector3f*> (&((*normals_)[*iIt].normal_x));
+ point = reinterpret_cast<const Eigen::Vector3f*> (&((*surface_)[*iIt].x));
nnT = (*normal) * (normal->transpose());
NNT += nnT;
NNTp += nnT * (*point);
#pragma omp parallel for \
default(none) \
shared(angle_threshold, boundary_estimator, border_radius, edge_points, input) \
- private(u, v) \
+ firstprivate(u, v) \
num_threads(threads_)
- for (int index = 0; index < int (input.points.size ()); index++)
+ for (int index = 0; index < int (input.size ()); index++)
{
edge_points[index] = false;
- PointInT current_point = input.points[index];
+ PointInT current_point = input[index];
if (pcl::isFinite(current_point))
{
if (n_neighbors >= min_neighbors_)
{
- boundary_estimator.getCoordinateSystemOnPlane (normals_->points[index], u, v);
+ boundary_estimator.getCoordinateSystemOnPlane ((*normals_)[index], u, v);
if (boundary_estimator.isBoundaryPoint (input, static_cast<int> (index), nn_indices, u, v, angle_threshold))
edge_points[index] = true;
template<typename PointInT, typename PointOutT, typename NormalT> void
pcl::ISSKeypoint3D<PointInT, PointOutT, NormalT>::getScatterMatrix (const int& current_index, Eigen::Matrix3d &cov_m)
{
- const PointInT& current_point = (*input_).points[current_index];
+ const PointInT& current_point = (*input_)[current_index];
double central_point[3];
memset(central_point, 0, sizeof(double) * 3);
for (int n_idx = 0; n_idx < n_neighbors; n_idx++)
{
- const PointInT& n_point = (*input_).points[nn_indices[n_idx]];
+ const PointInT& n_point = (*input_)[nn_indices[n_idx]];
double neigh_point[3];
memset(neigh_point, 0, sizeof(double) * 3);
for (int index = 0; index < int (input_->size ()); index++)
{
borders[index] = false;
- PointInT current_point = input_->points[index];
+ PointInT current_point = (*input_)[index];
if ((border_radius_ > 0.0) && (pcl::isFinite(current_point)))
{
#else
int tid = 0;
#endif
- PointInT current_point = input_->points[index];
+ PointInT current_point = (*input_)[index];
if ((!borders[index]) && pcl::isFinite(current_point))
{
for (int index = 0; index < int (input_->size ()); index++)
{
feat_max [index] = false;
- PointInT current_point = input_->points[index];
+ PointInT current_point = (*input_)[index];
if ((third_eigen_value_[index] > 0.0) && (pcl::isFinite(current_point)))
{
#pragma omp critical
{
PointOutT p;
- p.getVector3fMap () = input_->points[index].getVector3fMap ();
+ p.getVector3fMap () = (*input_)[index].getVector3fMap ();
output.points.push_back(p);
keypoints_indices_->indices.push_back (index);
}
}
output.header = input_->header;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.height = 1;
// Clear the contents of variables and arrays before the beginning of the next computation.
// Make sure the downsampled cloud still has enough points
const std::size_t min_nr_points = 25;
- if (cloud->points.size () < min_nr_points)
+ if (cloud->size () < min_nr_points)
break;
// Update the KdTree with the downsampled points
// Set final properties
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.header = input_->header;
output.sensor_origin_ = input_->sensor_origin_;
output.sensor_orientation_ = input_->sensor_orientation_;
std::vector<int> extrema_indices, extrema_scales;
findScaleSpaceExtrema (input, tree, diff_of_gauss, extrema_indices, extrema_scales);
- output.points.reserve (output.points.size () + extrema_indices.size ());
+ output.points.reserve (output.size () + extrema_indices.size ());
// Save scale?
if (scale_idx_ != -1)
{
PointOutT keypoint;
const int &keypoint_index = extrema_indices[i_keypoint];
- keypoint.x = input.points[keypoint_index].x;
- keypoint.y = input.points[keypoint_index].y;
- keypoint.z = input.points[keypoint_index].z;
+ keypoint.x = input[keypoint_index].x;
+ keypoint.y = input[keypoint_index].y;
+ keypoint.z = input[keypoint_index].z;
memcpy (reinterpret_cast<char*> (&keypoint) + out_fields_[scale_idx_].offset,
&scales[extrema_scales[i_keypoint]], sizeof (float));
output.points.push_back (keypoint);
for (const int &keypoint_index : extrema_indices)
{
PointOutT keypoint;
- keypoint.x = input.points[keypoint_index].x;
- keypoint.y = input.points[keypoint_index].y;
- keypoint.z = input.points[keypoint_index].z;
+ keypoint.x = input[keypoint_index].x;
+ keypoint.y = input[keypoint_index].y;
+ keypoint.z = input[keypoint_index].z;
output.points.push_back (keypoint);
}
float denominator = 0.0f;
for (std::size_t i_neighbor = 0; i_neighbor < nn_indices.size (); ++i_neighbor)
{
- const float &value = getFieldValue_ (input.points[nn_indices[i_neighbor]]);
+ const float &value = getFieldValue_ (input[nn_indices[i_neighbor]]);
const float &dist_sqr = nn_dist[i_neighbor];
if (dist_sqr <= 9*sigma_sqr)
{
std::vector<std::vector<float> > diffs (scales_.size ());
// The cloud with the smallest scale has no differences
- std::vector<float> aux_diffs (input_->points.size (), 0.0f);
+ std::vector<float> aux_diffs (input_->size (), 0.0f);
diffs[scales_[0].second] = aux_diffs;
cloud_trees_[scales_[0].second]->setInputCloud (clouds_[scales_[0].second]);
{
std::size_t cloud_i = scales_[scale_i].second,
cloud_i_minus_one = scales_[scale_i - 1].second;
- diffs[cloud_i].resize (input_->points.size ());
+ diffs[cloud_i].resize (input_->size ());
PCL_INFO ("cloud_i %u cloud_i_minus_one %u\n", cloud_i, cloud_i_minus_one);
- for (std::size_t point_i = 0; point_i < input_->points.size (); ++point_i)
- diffs[cloud_i][point_i] = cloud_normals_[cloud_i]->points[point_i].getNormalVector3fMap ().dot (
- clouds_[cloud_i]->points[point_i].getVector3fMap () - clouds_[cloud_i_minus_one]->points[point_i].getVector3fMap ());
+ for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
+ diffs[cloud_i][point_i] = (*cloud_normals_[cloud_i])[point_i].getNormalVector3fMap ().dot (
+ (*clouds_[cloud_i])[point_i].getVector3fMap () - (*clouds_[cloud_i_minus_one])[point_i].getVector3fMap ());
// Setup kdtree for this cloud
cloud_trees_[cloud_i]->setInputCloud (clouds_[cloud_i]);
// Find minima and maxima in differences inside the input cloud
typename pcl::search::Search<PointT>::Ptr input_tree = cloud_trees_.back ();
- for (int point_i = 0; point_i < static_cast<int> (input_->points.size ()); ++point_i)
+ for (int point_i = 0; point_i < static_cast<int> (input_->size ()); ++point_i)
{
std::vector<int> nn_indices;
std::vector<float> nn_distances;
// check if point was minimum/maximum over all the scales
if (passed_min || passed_max)
{
- output.points.push_back (input_->points[point_i]);
+ output.points.push_back ((*input_)[point_i]);
keypoints_indices_->indices.push_back (point_i);
}
}
}
output.header = input_->header;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.height = 1;
// debug stuff
// for (std::size_t scale_i = 0; scale_i < scales_.size (); ++scale_i)
// {
// PointCloud<PointXYZI>::Ptr debug_cloud (new PointCloud<PointXYZI> ());
-// debug_cloud->points.resize (input_->points.size ());
+// debug_cloud->points.resize (input_->size ());
// debug_cloud->width = input_->width;
// debug_cloud->height = input_->height;
-// for (std::size_t point_i = 0; point_i < input_->points.size (); ++point_i)
+// for (std::size_t point_i = 0; point_i < input_->size (); ++point_i)
// {
-// debug_cloud->points[point_i].intensity = diffs[scales_[scale_i].second][point_i];
-// debug_cloud->points[point_i].x = input_->points[point_i].x;
-// debug_cloud->points[point_i].y = input_->points[point_i].y;
-// debug_cloud->points[point_i].z = input_->points[point_i].z;
+// (*debug_cloud)[point_i].intensity = diffs[scales_[scale_i].second][point_i];
+// (*debug_cloud)[point_i].x = (*input_)[point_i].x;
+// (*debug_cloud)[point_i].y = (*input_)[point_i].y;
+// (*debug_cloud)[point_i].z = (*input_)[point_i].z;
// }
// char str[512]; sprintf (str, "diffs_%2d.pcd", scale_i);
return false;
}
- if (input_->points.size () != normals_->points.size ())
+ if (input_->size () != normals_->size ())
{
PCL_ERROR ("[pcl::SmoothedSurfacesKeypoints::initCompute] The input cloud and the input normals differ in size\n");
return false;
for (std::size_t cloud_i = 0; cloud_i < clouds_.size (); ++cloud_i)
{
- if (clouds_[cloud_i]->points.size () != input_->points.size ())
+ if (clouds_[cloud_i]->size () != input_->size ())
{
- PCL_ERROR ("[pcl::SmoothedSurfacesKeypoints::initCompute] Cloud %d does not have the same number of points as the input cloud\n", cloud_i);
+ PCL_ERROR("[pcl::SmoothedSurfacesKeypoints::initCompute] Cloud %zu does not have "
+ "the same number of points as the input cloud\n",
+ cloud_i);
return false;
}
- if (cloud_normals_[cloud_i]->points.size () != input_->points.size ())
+ if (cloud_normals_[cloud_i]->size () != input_->size ())
{
- PCL_ERROR ("[pcl::SmoothedSurfacesKeypoints::initCompute] Normals for cloud %d do not have the same number of points as the input cloud\n", cloud_i);
+ PCL_ERROR("[pcl::SmoothedSurfacesKeypoints::initCompute] Normals for cloud %zu "
+ "do not have the same number of points as the input cloud\n",
+ cloud_i);
return false;
}
}
// memset (&coefficients[0], 0, sizeof (float) * 6);
// for (std::vector<int>::const_iterator index = neighbors.begin(); index != neighbors.end(); ++index)
// {
-// if (std::isfinite (normals_->points[*index].normal_x))
+// if (std::isfinite ((*normals_)[*index].normal_x))
// {
-// Eigen::Vector3f diff = normals_->points[*index].getNormal3fMap () - nucleus_normal.getNormal3fMap ();
+// Eigen::Vector3f diff = (*normals_)[*index].getNormal3fMap () - nucleus_normal.getNormal3fMap ();
// float c = diff.norm () / t;
// c = -1 * pow (c, 6.0);
// c = std::exp (c);
-// Eigen::Vector3f xyz = surface_->points[*index].getVector3fMap ();
+// Eigen::Vector3f xyz = (*surface_)[*index].getVector3fMap ();
// centroid += c * xyz;
// area += c;
// coefficients[0] += c * (x0 - xyz.x) * (x0 - xyz.x);
// template <typename PointInT, typename PointOutT, typename NormalT, typename IntensityT> bool
// pcl::SUSANKeypoint<PointInT, PointOutT, NormalT, IntensityT>::isWithinSusan2D (int nucleus, int neighbor) const
// {
-// return (std::abs (intensity_ (surface_->points[nucleus]) -
-// intensity_ (surface_->points[neighbor])) <= intensity_threshold_);
+// return (std::abs (intensity_ ((*surface_)[nucleus]) -
+// intensity_ ((*surface_)[neighbor])) <= intensity_threshold_);
// }
// template <typename PointInT, typename PointOutT, typename NormalT, typename IntensityT> bool
// pcl::SUSANKeypoint<PointInT, PointOutT, NormalT, IntensityT>::isWithinSusan3D (int nucleus, int neighbor) const
// {
// Eigen::Vector3f nucleus_normal = normals_->point[nucleus].getVector3fMap ();
-// return (1 - nucleus_normal.dot (normals_->points[*index].getNormalVector3fMap ()) <= angular_threshold_);
+// return (1 - nucleus_normal.dot ((*normals_)[*index].getNormalVector3fMap ()) <= angular_threshold_);
// }
// template <typename PointInT, typename PointOutT, typename NormalT, typename IntensityT> bool
// pcl::SUSANKeypoint<PointInT, PointOutT, NormalT, IntensityT>::isWithinSusanH (int nucleus, int neighbor) const
// {
// Eigen::Vector3f nucleus_normal = normals_->point[nucleus].getVector3fMap ();
-// return ((1 - nucleus_normal.dot (normals_->points[*index].getNormalVector3fMap ()) <= angular_threshold_) ||
-// (std::abs (intensity_ (surface_->points[nucleus]) - intensity_ (surface_->points[neighbor])) <= intensity_threshold_));
+// return ((1 - nucleus_normal.dot ((*normals_)[*index].getNormalVector3fMap ()) <= angular_threshold_) ||
+// (std::abs (intensity_ ((*surface_)[nucleus]) - intensity_ ((*surface_)[neighbor])) <= intensity_threshold_));
// }
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::vector<int> usan; usan.reserve (nn_indices.size () - 1);
for (std::vector<int>::const_iterator index = nn_indices.begin(); index != nn_indices.end(); ++index)
{
- if ((*index != point_index) && std::isfinite (normals_->points[*index].normal_x))
+ if ((*index != point_index) && std::isfinite ((*normals_)[*index].normal_x))
{
// if the point fulfill condition
- if ((std::abs (nucleus_intensity - intensity_ (input_->points[*index])) <= intensity_threshold_) ||
- (1 - nucleus_normal.dot (normals_->points[*index].getNormalVector3fMap ()) <= angular_threshold_))
+ if ((std::abs (nucleus_intensity - intensity_ ((*input_)[*index])) <= intensity_threshold_) ||
+ (1 - nucleus_normal.dot ((*normals_)[*index].getNormalVector3fMap ()) <= angular_threshold_))
{
++area;
- centroid += input_->points[*index].getVector3fMap ();
+ centroid += (*input_)[*index].getVector3fMap ();
usan.push_back (*index);
}
}
auto usan_it = usan.cbegin ();
for (; usan_it != usan.cend (); ++usan_it)
{
- if (!isWithinNucleusCentroid (nucleus, centroid, nc, input_->points[*usan_it]))
+ if (!isWithinNucleusCentroid (nucleus, centroid, nc, (*input_)[*usan_it]))
break;
}
// All points within usan lies on the segment [nucleus centroid]
}
response->height = 1;
- response->width = static_cast<std::uint32_t> (response->size ());
+ response->width = response->size ();
if (!nonmax_)
{
else
{
output.points.clear ();
- output.points.reserve (response->points.size());
+ output.points.reserve (response->size());
- for (int idx = 0; idx < static_cast<int> (response->points.size ()); ++idx)
+ for (int idx = 0; idx < static_cast<int> (response->size ()); ++idx)
{
const PointOutT& point_in = response->points [idx];
const NormalT& normal_in = normals_->points [idx];
- //const float intensity = response->points[idx].intensity;
- const float intensity = intensity_out_ (response->points[idx]);
+ //const float intensity = (*response)[idx].intensity;
+ const float intensity = intensity_out_ ((*response)[idx]);
if (!isFinite (point_in) || !isFinite (normal_in) || (intensity == 0))
continue;
std::vector<int> nn_indices;
bool is_minima = true;
for (std::vector<int>::const_iterator nn_it = nn_indices.begin(); nn_it != nn_indices.end(); ++nn_it)
{
-// if (intensity > response->points[*nn_it].intensity)
- if (intensity > intensity_out_ (response->points[*nn_it]))
+// if (intensity > (*response)[*nn_it].intensity)
+ if (intensity > intensity_out_ ((*response)[*nn_it]))
{
is_minima = false;
break;
}
if (is_minima)
{
- output.points.push_back (response->points[idx]);
+ output.points.push_back ((*response)[idx]);
keypoints_indices_->indices.push_back (idx);
}
}
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size());
+ output.width = output.size();
output.is_dense = true;
}
}
for (std::size_t i = 0; i < indices.size (); ++i)
{
int idx = indices[i];
- if ((response_->points[idx] < second_threshold_) || occupency_map[idx])
+ if (((*response_)[idx] < second_threshold_) || occupency_map[idx])
continue;
PointOutT p;
- p.getVector3fMap () = input_->points[idx].getVector3fMap ();
+ p.getVector3fMap () = (*input_)[idx].getVector3fMap ();
p.intensity = response_->points [idx];
#pragma omp critical
}
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.size());
+ output.width = output.size();
// we don not change the denseness
output.is_dense = input_->is_dense;
}
for (int i = 0; i < static_cast<int>(indices.size ()); ++i)
{
int idx = indices[static_cast<std::size_t>(i)];
- if ((response_->points[idx] < second_threshold_) || occupency_map[idx])
+ if (((*response_)[idx] < second_threshold_) || occupency_map[idx])
continue;
PointOutT p;
- p.getVector3fMap () = input_->points[idx].getVector3fMap ();
+ p.getVector3fMap () = (*input_)[idx].getVector3fMap ();
p.intensity = response_->points [idx];
#pragma omp critical
}
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.size());
+ output.width = output.size();
// we don not change the denseness
output.is_dense = true;
}
detect (&(intensity_data[0]), output.points);
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
}
/////////////////////////////////////////////////////////////////////////////////////////
detect (&(intensity_data[0]), output.points);
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
}
/////////////////////////////////////////////////////////////////////////////////////////
}
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.is_dense = input.is_dense;
}
#include <pcl/pcl_macros.h>
#include <pcl/keypoints/brisk_2d.h>
#include <pcl/point_types.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/impl/instantiate.hpp>
#if defined(__SSSE3__) && !defined(__i386__)
#include <tmmintrin.h>
int width, int height,
float xf, float yf, float scale)
{
- (void)height;
+ pcl::utils::ignore(height);
assert (!mat.empty ());
// get the position
const int x = int (std::floor (xf));
std::vector<unsigned char>& dstimg,
int dstwidth, int dstheight)
{
- (void)dstheight;
+ pcl::utils::ignore(dstheight);
#if defined(__SSSE3__) && !defined(__i386__)
const unsigned short leftoverCols = static_cast<unsigned short> ((srcwidth % 16) / 2); // take care with border...
const bool noleftover = (srcwidth % 16) == 0; // note: leftoverCols can be zero but this still false...
}
}
#else
- (void) (srcimg);
- (void) (srcwidth);
- (void) (srcheight);
- (void) (dstimg);
- (void) (dstwidth);
+ pcl::utils::ignore(srcimg, srcwidth, srcheight, dstimg, dstwidth);
PCL_ERROR("brisk without SSSE3 support not implemented");
#endif
}
std::vector<unsigned char>& dstimg,
int dstwidth, int dstheight)
{
- (void)dstheight;
+ pcl::utils::ignore(dstheight);
#if defined(__SSSE3__) && !defined(__i386__)
const unsigned short leftoverCols = static_cast<unsigned short> (((srcwidth / 3) * 3) % 15);// take care with border...
p_dest2 = p_dest1 + dstwidth;
}
#else
- (void) (srcimg);
- (void) (srcwidth);
- (void) (srcheight);
- (void) (dstimg);
- (void) (dstwidth);
+ pcl::utils::ignore(srcimg, srcwidth, srcheight, dstimg, dstwidth);
PCL_ERROR("brisk without SSSE3 support not implemented");
#endif
}
int y = index/range_image.width,
x = index - y*range_image.width;
- const BorderTraits& border_traits = border_descriptions.points[index].traits;
+ const BorderTraits& border_traits = border_descriptions[index].traits;
if (border_traits[BORDER_TRAIT__SHADOW_BORDER] || border_traits[BORDER_TRAIT__VEIL_POINT])
continue;
int index2 = neighbors_to_check[neighbors_to_check_idx];
if (!range_image.isValid (index2))
continue;
- const BorderTraits& border_traits2 = border_descriptions.points[index2].traits;
+ const BorderTraits& border_traits2 = border_descriptions[index2].traits;
if (border_traits2[BORDER_TRAIT__SHADOW_BORDER] || border_traits2[BORDER_TRAIT__VEIL_POINT])
continue;
int y2 = index2/range_image.width,
interest_image_[index] = 0.0f;
if (!range_image.isValid (index))
continue;
- const BorderTraits& border_traits = border_descriptions.points[index].traits;
+ const BorderTraits& border_traits = border_descriptions[index].traits;
if (border_traits[BORDER_TRAIT__SHADOW_BORDER] || border_traits[BORDER_TRAIT__VEIL_POINT])
continue;
interest_image_[index] = 2.0f;
int index2 = neighbors_to_check[neighbors_to_check_idx];
if (!range_image.isValid (index2))
continue;
- const BorderTraits& border_traits2 = border_descriptions.points[index2].traits;
+ const BorderTraits& border_traits2 = border_descriptions[index2].traits;
if (border_traits2[BORDER_TRAIT__SHADOW_BORDER] || border_traits2[BORDER_TRAIT__VEIL_POINT])
continue;
int y2 = index2/range_image.width,
if (invalid_beams[i] || !range_image.isValid (x2, y2))
continue;
int index2 = y2*width + x2;
- const BorderTraits& neighbor_border_traits = border_descriptions.points[index2].traits;
+ const BorderTraits& neighbor_border_traits = border_descriptions[index2].traits;
if (neighbor_border_traits[BORDER_TRAIT__SHADOW_BORDER] || neighbor_border_traits[BORDER_TRAIT__VEIL_POINT])
{
invalid_beams[i] = true;
if (range_image.isValid (image_x, image_y))
is_interest_point_image_[image_y*width + image_x] = true;
}
- interest_points_->width = static_cast<std::uint32_t> (interest_points_->points.size ());
+ interest_points_->width = interest_points_->size ();
interest_points_->height = 1;
interest_points_->is_dense = true;
}
#pragma once
#include <pcl/common/common.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/ml/stats_estimator.h>
#include <istream>
const float threshold,
unsigned char& branch_index) const override
{
- (void)flag;
+ pcl::utils::ignore(flag);
branch_index = (result > threshold) ? 1 : 0;
}
};
#include <pcl/ml/pairwise_potential.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
namespace pcl {
PCL_INFO("Use X Y Z as input data\n");
// create input data
/*
- for (std::size_t i = 0; i < input_->points.size (); i++)
+ for (std::size_t i = 0; i < input_->size (); i++)
{
DataPoint data (3);
- data[0] = input_->points[i].data[0];
+ data[0] = (*input_)[i].data[0];
std::cout << "x index: " << x_index << std::endl;
float x = 0.0;
- memcpy(&x, &input_->points[0] + fields[x_index].offset, sizeof(float));
+ memcpy(&x, &(*input_)[0] + fields[x_index].offset, sizeof(float));
std::cout << "xxx: " << x << std::endl;
- // memcpy (&x, reinterpret_cast<float*> (&input_->points[0]) + x_index, sizeof
+ // memcpy (&x, reinterpret_cast<float*> (&(*input_)[0]) + x_index, sizeof
// (float));
// int rgba_index = 1;
// pcl::RGB rgb;
// memcpy (&rgb, reinterpret_cast<const char*>
- // (&input_->points[index_vector[i].cloud_point_index]) + rgba_index, sizeof (RGB));
+ // (&(*input_)[index_vector[i].cloud_point_index]) + rgba_index, sizeof (RGB));
}
// if cluster field name is set, check if field name is valid
else {
for (std::size_t i = 0; i < fields[vfh_idx].count; ++i)
{
- //vfh.second[i] = point.points[0].histogram[i];
+ //vfh.second[i] = point[0].histogram[i];
}
*/
#pragma once
-#include <pcl/common/io.h>
-#include <pcl/console/parse.h>
-#include <pcl/console/print.h>
#include <pcl/memory.h>
-#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
#include <set>
+#include <vector> // for vector
namespace pcl {
#pragma GCC system_header
#endif
-#include <pcl/common/eigen.h>
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
-#include <boost/intrusive/hashtable.hpp>
-
-#include <cassert>
-#include <cmath>
-#include <cstdio>
-#include <cstdlib>
#include <cstring>
-#include <map>
+#include <iostream> // for size_t, operator<<, endl, cout
#include <vector>
namespace pcl {
#pragma once
-#include <pcl/common/common.h>
+#include <pcl/pcl_exports.h> // for PCL_EXPORTS
#include <istream>
-#include <ostream>
namespace pcl {
#pragma once
-#include <pcl/common/common.h>
+#include <pcl/pcl_exports.h> // for PCL_EXPORTS
#include <istream>
-#include <ostream>
namespace pcl {
#pragma once
-#include <pcl/common/eigen.h>
+#include <pcl/console/print.h> // for PCL_ERROR
#include <pcl/ml/svm.h>
-#include <cctype>
-#include <cerrno>
+#include <cassert> // for assert
#include <cstdio>
#include <cstdlib>
-#include <cstring>
-#include <fstream>
-#include <iostream>
+#include <limits> // for numeric_limits
+#include <string> // for string
#include <vector>
#define Malloc(type, n) static_cast<type*>(malloc((n) * sizeof(type)))
// create input data
/*
- for (std::size_t i = 0; i < input_->points.size (); i++)
+ for (std::size_t i = 0; i < input_->size (); i++)
{
DataPoint data (3);
- data[0] = input_->points[i].data[0];
+ data[0] = (*input_)[i].data[0];
std::cout << "x index: " << x_index << std::endl;
float x = 0.0;
-memcpy (&x, &input_->points[0] + fields[x_index].offset, sizeof(float));
+memcpy (&x, &(*input_)[0] + fields[x_index].offset, sizeof(float));
std::cout << "xxx: " << x << std::endl;
*/
-// memcpy (&x, reinterpret_cast<float*> (&input_->points[0]) + x_index, sizeof (float));
+// memcpy (&x, reinterpret_cast<float*> (&(*input_)[0]) + x_index, sizeof (float));
// int rgba_index = 1;
// pcl::RGB rgb;
// memcpy (&rgb, reinterpret_cast<const char*>
-// (&input_->points[index_vector[i].cloud_point_index]) + rgba_index, sizeof (RGB));
+// (&(*input_)[index_vector[i].cloud_point_index]) + rgba_index, sizeof (RGB));
/*
}
for (std::size_t i = 0; i < fields[vfh_idx].count; ++i)
{
- //vfh.second[i] = point.points[0].histogram[i];
+ //vfh.second[i] = point[0].histogram[i];
}
*/
*/
#include <pcl/ml/permutohedral.h>
+#include <pcl/pcl_macros.h> // for pcl_round
+
+#include <map> // for multimap
pcl::Permutohedral::Permutohedral()
: N_(0)
#include <pcl/ml/point_xy_32f.h>
-#include <istream>
-#include <ostream>
-
pcl::PointXY32f
pcl::PointXY32f::randomPoint(const int min_x,
const int max_x,
#include <pcl/ml/point_xy_32i.h>
-#include <istream>
-#include <ostream>
-
pcl::PointXY32i
pcl::PointXY32i::randomPoint(const int min_x,
const int max_x,
#ifndef _LIBSVM_HPP_
#define _LIBSVM_HPP_
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/ml/svm.h>
-#include <cctype>
-#include <cfloat>
#include <climits>
#include <cmath>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
-#include <iostream>
int libsvm_version = LIBSVM_VERSION;
using Qfloat = float;
using schar = signed char;
free(model);
return nullptr;
}
- (void)res; // to inform clang-tidy to ignore the dead-stores
+ pcl::utils::ignore(res); // to inform clang-tidy to ignore the dead-stores
}
// read sv_coef and SV
#include <pcl/ml/svm_wrapper.h>
#include <cassert>
+#include <cmath> // for isfinite
+#include <cstring> // for strrchr
#include <fstream>
char*
{
if (indices_) {
for (const int& index : *indices_) {
- assert((index >= 0) && (index < static_cast<int>(input_->points.size())));
+ assert((index >= 0) && (index < static_cast<int>(input_->size())));
- if (isFinite(input_->points[index])) {
+ if (isFinite((*input_)[index])) {
// add points to octree
this->addPointIdx(index);
}
}
}
else {
- for (std::size_t i = 0; i < input_->points.size(); i++) {
- if (isFinite(input_->points[i])) {
+ for (std::size_t i = 0; i < input_->size(); i++) {
+ if (isFinite((*input_)[i])) {
// add points to octree
this->addPointIdx(static_cast<unsigned int>(i));
}
cloud_arg->push_back(point_arg);
- this->addPointIdx(static_cast<const int>(cloud_arg->points.size()) - 1);
+ this->addPointIdx(static_cast<const int>(cloud_arg->size()) - 1);
}
//////////////////////////////////////////////////////////////////////////////////////////////
cloud_arg->push_back(point_arg);
- this->addPointFromCloud(static_cast<const int>(cloud_arg->points.size()) - 1,
- indices_arg);
+ this->addPointFromCloud(static_cast<const int>(cloud_arg->size()) - 1, indices_arg);
}
//////////////////////////////////////////////////////////////////////////////////////////////
isVoxelOccupiedAtPoint(const int& point_idx_arg) const
{
// retrieve point from input cloud
- const PointT& point = this->input_->points[point_idx_arg];
+ const PointT& point = (*this->input_)[point_idx_arg];
// search for voxel at point in octree
return (this->isVoxelOccupiedAtPoint(point));
deleteVoxelAtPoint(const int& point_idx_arg)
{
// retrieve point from input cloud
- const PointT& point = this->input_->points[point_idx_arg];
+ const PointT& point = (*this->input_)[point_idx_arg];
// delete leaf at point
this->deleteVoxelAtPoint(point);
for (const int& leafIndex : leafIndices) {
- const PointT& point_from_index = input_->points[leafIndex];
+ const PointT& point_from_index = (*input_)[leafIndex];
// generate key
genOctreeKeyforPoint(point_from_index, new_index_key);
{
OctreeKey key;
- assert(point_idx_arg < static_cast<int>(input_->points.size()));
+ assert(point_idx_arg < static_cast<int>(input_->size()));
- const PointT& point = input_->points[point_idx_arg];
+ const PointT& point = (*input_)[point_idx_arg];
// make sure bounding box is big enough
adoptBoundingBoxToPoint(point);
getPointByIndex(const unsigned int index_arg) const
{
// retrieve point from input cloud
- assert(index_arg < static_cast<unsigned int>(input_->points.size()));
- return (this->input_->points[index_arg]);
+ assert(index_arg < static_cast<unsigned int>(input_->size()));
+ return ((*this->input_)[index_arg]);
}
//////////////////////////////////////////////////////////////////////////////////////////////
maxZ = -std::numeric_limits<float>::max();
for (std::size_t i = 0; i < input_->size(); ++i) {
- PointT temp(input_->points[i]);
+ PointT temp((*input_)[i]);
if (transform_func_) // Search for point with
transform_func_(temp);
if (!pcl::isFinite(
{
OctreeKey key;
- assert(pointIdx_arg < static_cast<int>(this->input_->points.size()));
+ assert(pointIdx_arg < static_cast<int>(this->input_->size()));
- const PointT& point = this->input_->points[pointIdx_arg];
+ const PointT& point = (*this->input_)[pointIdx_arg];
if (!pcl::isFinite(point))
return;
void
setOccupiedVoxelsAtPointsFromCloud(PointCloudPtr cloud_arg)
{
- for (std::size_t i = 0; i < cloud_arg->points.size(); i++) {
+ for (const auto& point : *cloud_arg) {
// check for NaNs
- if (isFinite(cloud_arg->points[i])) {
+ if (isFinite(point)) {
// set voxel at point
- this->setOccupiedVoxelAtPoint(cloud_arg->points[i]);
+ this->setOccupiedVoxelAtPoint(point);
}
}
}
{
OctreeKey key;
- assert(pointIdx_arg < static_cast<int>(this->input_->points.size()));
+ assert(pointIdx_arg < static_cast<int>(this->input_->size()));
- const PointT& point = this->input_->points[pointIdx_arg];
+ const PointT& point = (*this->input_)[pointIdx_arg];
// make sure bounding box is big enough
this->adoptBoundingBoxToPoint(point);
getVoxelCentroidAtPoint(const int& point_idx_arg, PointT& voxel_centroid_arg) const
{
// get centroid at point
- return (this->getVoxelCentroidAtPoint(this->input_->points[point_idx_arg],
+ return (this->getVoxelCentroidAtPoint((*this->input_)[point_idx_arg],
voxel_centroid_arg));
}
int& result_index,
float& sqr_distance)
{
- return (approxNearestSearch(cloud.points[query_index], result_index, sqr_distance));
+ return (approxNearestSearch(cloud[query_index], result_index, sqr_distance));
}
/** \brief Search for approx. nearest neighbor at the query point.
std::vector<float>& k_sqr_distances,
unsigned int max_nn = 0)
{
- return (
- radiusSearch(cloud.points[index], radius, k_indices, k_sqr_distances, max_nn));
+ return (radiusSearch(cloud[index], radius, k_indices, k_sqr_distances, max_nn));
}
/** \brief Search for all neighbors of query point that are within a given radius.
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
-#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
PCL_INSTANTIATE(OctreePointCloudSingleBufferWithLeafDataTVector, PCL_XYZ_POINT_TYPES)
template<typename ContainerT, typename PointT> void
OutofcoreOctreeBase<ContainerT, PointT>::setLODFilter (const pcl::Filter<pcl::PCLPointCloud2>::Ptr& filter_arg)
{
- lod_filter_ptr_ = filter_arg;
+ lod_filter_ptr_ = std::static_pointer_cast<decltype(lod_filter_ptr_)>(filter_arg);
}
////////////////////////////////////////////////////////////////////////////////
#include <exception>
#include <pcl/common/common.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/visualization/common/common.h>
#include <pcl/outofcore/octree_base_node.h>
#include <pcl/filters/random_sample.h>
PCL_DEBUG ("[pcl::outofocre::OutofcoreOctreeBaseNode::%s] Size of cloud before: %lu\n", __FUNCTION__, dst_blob->width*dst_blob->height );
PCL_DEBUG ("[pcl::outofcore::OutofcoreOctreeBaseNode::%s] Concatenating point cloud\n", __FUNCTION__);
int res = pcl::concatenate (*dst_blob, *tmp_blob, *dst_blob);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 1);
PCL_DEBUG ("[pcl::outofocre::OutofcoreOctreeBaseNode::%s] Size of cloud after: %lu\n", __FUNCTION__, dst_blob->width*dst_blob->height );
//concatenate those points into the returned dst_blob
PCL_DEBUG ("[pcl::outofcore::OutofcoreOctreeBaseNode::%s] Concatenating point cloud in place\n", __FUNCTION__);
std::uint64_t orig_points_in_destination = dst_blob->width*dst_blob->height;
- (void)orig_points_in_destination;
int res = pcl::concatenate (*dst_blob, *tmp_blob_within_bb, *dst_blob);
- (void)res;
+ pcl::utils::ignore(orig_points_in_destination, res);
assert (res == 1);
assert (dst_blob->width*dst_blob->height == indices.size () + orig_points_in_destination);
#include <boost/uuid/uuid_io.hpp>
// PCL
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/PCLPointCloud2.h>
#include <pcl/outofcore/octree_disk_container.h>
//allows operation on POSIX
-#if !defined WIN32
+#if !defined _WIN32
#define _fseeki64 fseeko
#elif defined __MINGW32__
#define _fseeki64 fseeko64
//construct the point cloud for this node
typename pcl::PointCloud<PointT>::Ptr cloud (new pcl::PointCloud<PointT>);
- cloud->width = static_cast<std::uint32_t> (writebuff_.size ());
+ cloud->width = writebuff_.size ();
cloud->height = 1;
cloud->points = writebuff_;
// Write ascii for now to debug
int res = writer.writeBinaryCompressed (disk_storage_filename_, *cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
if (force_cache_dealloc)
{
//seek the right length;
int seekret = _fseeki64 (f, idx * sizeof(PointT), SEEK_SET);
- (void)seekret;
+ pcl::utils::ignore(seekret);
assert (seekret == 0);
std::size_t readlen = fread (&temp, 1, sizeof(PointT), f);
- (void)readlen;
+ pcl::utils::ignore(readlen);
assert (readlen == sizeof (PointT));
int res = fclose (f);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
return (temp);
typename pcl::PointCloud<PointT>::Ptr cloud (new pcl::PointCloud<PointT> ());
int res = reader.read (disk_storage_filename_, *cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
- for (std::size_t i=0; i < cloud->points.size (); i++)
- dst.push_back (cloud->points[i]);
+ dst.insert(dst.end(), cloud->cbegin(), cloud->cend());
}
////////////////////////////////////////////////////////////////////////////////
for (std::uint64_t i = 0; i < filesamp; i++)
{
int seekret = _fseeki64 (f, offsets[i] * static_cast<std::uint64_t> (sizeof(PointT)), SEEK_SET);
- (void)seekret;
+ pcl::utils::ignore(seekret);
assert (seekret == 0);
std::size_t readlen = fread (loc, sizeof(PointT), 1, f);
- (void)readlen;
+ pcl::utils::ignore(readlen);
assert (readlen == 1);
dst.push_back (p);
for (std::uint64_t i = 0; i < filesamp; i++)
{
int seekret = _fseeki64 (f, offsets[i] * static_cast<std::uint64_t> (sizeof(PointT)), SEEK_SET);
- (void)seekret;
+ pcl::utils::ignore(seekret);
assert (seekret == 0);
std::size_t readlen = fread (loc, sizeof(PointT), 1, f);
- (void)readlen;
+ pcl::utils::ignore(readlen);
assert (readlen == 1);
dst.push_back (p);
}
int res = fclose (f);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
}
}
// Open the existing file
pcl::PCDReader reader;
int res = reader.read (disk_storage_filename_, *tmp_cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
}
// Otherwise create the point cloud which will be saved to the pcd file for the first time
else
{
- tmp_cloud->width = static_cast<std::uint32_t> (count + writebuff_.size ());
+ tmp_cloud->width = count + writebuff_.size ();
tmp_cloud->height = 1;
}
for (std::size_t i = 0; i < src.size (); i++)
- tmp_cloud->points.push_back (src[i]);
+ tmp_cloud->push_back (src[i]);
// If there are any points in the write cache writebuff_, a different write cache than this one, concatenate
for (std::size_t i = 0; i < writebuff_.size (); i++)
{
- tmp_cloud->points.push_back (writebuff_[i]);
+ tmp_cloud->push_back (writebuff_[i]);
}
//assume unorganized point cloud
- tmp_cloud->width = static_cast<std::uint32_t> (tmp_cloud->points.size ());
+ tmp_cloud->width = tmp_cloud->size ();
//save and close
PCDWriter writer;
int res = writer.writeBinaryCompressed (disk_storage_filename_, *tmp_cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
}
//open the existing file
pcl::PCDReader reader;
int res = reader.read (disk_storage_filename_, *tmp_cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
pcl::PCDWriter writer;
PCL_DEBUG ("[pcl::outofcore::OutofcoreOctreeDiskContainer::%s] Concatenating point cloud from %s to new cloud\n", __FUNCTION__, disk_storage_filename_.c_str ());
pcl::concatenate (*tmp_cloud, *input_cloud, *tmp_cloud);
std::size_t res_pts = tmp_cloud->width*tmp_cloud->height;
- (void)previous_num_pts;
- (void)res_pts;
+ pcl::utils::ignore(previous_num_pts);
+ pcl::utils::ignore(res_pts);
assert (previous_num_pts == res_pts);
{
pcl::PCDWriter writer;
int res = writer.writeBinaryCompressed (disk_storage_filename_, *input_cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
}
// PCL_INFO ("[pcl::outofcore::OutofcoreOctreeDiskContainer::%s] Reading points from disk from %s.\n", __FUNCTION__ , disk_storage_filename_->c_str ());
int pcd_version;
int res = reader.read (disk_storage_filename_, *dst, origin, orientation, pcd_version);
- (void)res;
+ pcl::utils::ignore(res);
assert (res != -1);
}
else
{
// PCL_INFO ("[pcl::outofcore::OutofcoreOctreeDiskContainer::%s] Reading points from disk from %s.\n", __FUNCTION__ , disk_storage_filename_->c_str ());
int res = pcl::io::loadPCDFile (disk_storage_filename_, *temp_output_cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res != -1);
if(res == -1)
return (-1);
pcl::PCDReader reader;
// Open it
int res = reader.read (disk_storage_filename_, *tmp_cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
}
else //otherwise create the pcd file
{
- tmp_cloud->width = static_cast<std::uint32_t> (count) + static_cast<std::uint32_t> (writebuff_.size ());
+ tmp_cloud->width = count + writebuff_.size ();
tmp_cloud->height = 1;
}
// Add any points in the cache
for (std::size_t i = 0; i < writebuff_.size (); i++)
{
- tmp_cloud->points.push_back (writebuff_ [i]);
+ tmp_cloud->push_back (writebuff_ [i]);
}
//add the new points passed with this function
for (std::size_t i = 0; i < count; i++)
{
- tmp_cloud->points.push_back (*(start + i));
+ tmp_cloud->push_back (*(start + i));
}
- tmp_cloud->width = static_cast<std::uint32_t> (tmp_cloud->points.size ());
+ tmp_cloud->width = tmp_cloud->size ();
tmp_cloud->height = 1;
//save and close
PCDWriter writer;
int res = writer.writeBinaryCompressed (disk_storage_filename_, *tmp_cloud);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
}
////////////////////////////////////////////////////////////////////////////////
// Boost
#include <boost/uuid/random_generator.hpp>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/outofcore/boost.h>
#include <pcl/outofcore/octree_abstract_node_container.h>
#include <pcl/io/pcd_io.h>
#include <pcl/PCLPointCloud2.h>
//allows operation on POSIX
-#if !defined WIN32
+#if !defined _WIN32
#define _fseeki64 fseeko
#elif defined __MINGW32__
#define _fseeki64 fseeko64
for (std::uint64_t i = 0; i < num; i++)
{
int seekret = _fseeki64 (f, i * sizeof (PointT), SEEK_SET);
- (void)seekret;
+ pcl::utils::ignore(seekret);
assert (seekret == 0);
std::size_t readlen = fread (loc, sizeof (PointT), 1, f);
- (void)readlen;
+ pcl::utils::ignore(readlen);
assert (readlen == 1);
//of << p.x << "\t" << p.y << "\t" << p.z << "\n";
fwrite (ss.str ().c_str (), 1, ss.str ().size (), fxyz);
}
int res = fclose (f);
- (void)res;
+ pcl::utils::ignore(res);
assert (res == 0);
res = fclose (fxyz);
assert (res == 0);
#include <pcl/pcl_macros.h>
#include <pcl/exceptions.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/console/print.h>
#include <fstream>
void
OutofcoreOctreeBaseMetadata::writeMetadataString (std::vector<char>& buf)
{
- (void)buf;
+ pcl::utils::ignore(buf);
PCL_THROW_EXCEPTION (PCLException, "Not implemented\n");
}
std::ostream&
operator<<(std::ostream& os, const OutofcoreOctreeBaseMetadata& metadata_arg)
{
- (void) metadata_arg;
+ pcl::utils::ignore(metadata_arg);
return (os);
}
#define PCL_VERSION_COMPARE(OP, MAJ, MIN, PATCH) \
(PCL_VERSION*10+PCL_DEV_VERSION OP PCL_VERSION_CALC(MAJ, MIN, PATCH)*10)
+/* Index type and signed/unsigned property */
+#define PCL_INDEX_SIGNED ${PCL_INDEX_SIGNED_STR}
+
+#if (${PCL_INDEX_SIZE} > 0)
+ #define PCL_INDEX_SIZE ${PCL_INDEX_SIZE}
+#else
+ #if PCL_MINOR_VERSION <= 11
+ // sizeof returns bytes, while we measure size by bits in the template
+ #define PCL_INDEX_SIZE (sizeof(int) * 8)
+ #else
+ #define PCL_INDEX_SIZE 32
+ #endif //PCL_MINOR_VERSION
+#endif
+
#cmakedefine HAVE_TBB 1
#cmakedefine HAVE_OPENNI 1
Eigen::VectorXf ground_coeffs;
ground_coeffs.resize(4);
std::vector<int> clicked_points_indices;
- for (std::size_t i = 0; i < clicked_points_3d->points.size(); i++)
+ for (std::size_t i = 0; i < clicked_points_3d->size(); i++)
clicked_points_indices.push_back(i);
pcl::SampleConsensusModelPlane<PointT> model_plane(clicked_points_3d);
model_plane.computeModelCoefficients(clicked_points_indices,ground_coeffs);
// Project maxima on the ground plane:
for(int i = 0; i < maxima_number; i++) // for every maximum
{
- PointT* current_point = &cloud_->points[maxima_cloud_indices[i]]; // current maximum point cloud point
+ PointT* current_point = &(*cloud_)[maxima_cloud_indices[i]]; // current maximum point cloud point
Eigen::Vector3f p_current_eigen(current_point->x, current_point->y, current_point->z); // conversion to eigen
float t = p_current_eigen.dot(head_ground_coeffs) / normalize_factor; // height from the ground
maxima_projected.col(i).matrix () = p_current_eigen - head_ground_coeffs * t; // projection of the point on the groundplane
// Associate cluster points to one of the maximum:
for(std::vector<int>::const_iterator points_iterator = cluster.getIndices().indices.begin(); points_iterator != cluster.getIndices().indices.end(); ++points_iterator)
{
- PointT* current_point = &cloud_->points[*points_iterator]; // current point cloud point
+ PointT* current_point = &(*cloud_)[*points_iterator]; // current point cloud point
Eigen::Vector3f p_current_eigen(current_point->x, current_point->y, current_point->z); // conversion to eigen
float t = p_current_eigen.dot(head_ground_coeffs) / normalize_factor; // height from the ground
p_current_eigen -= head_ground_coeffs * t; // projection of the point on the groundplane
for(std::vector<int>::const_iterator pit = cluster.getIndices().indices.begin(); pit != cluster.getIndices().indices.end(); ++pit)
{
- PointT* p = &cloud_->points[*pit];
+ PointT* p = &(*cloud_)[*pit];
int index;
if (!vertical_) // camera horizontal
index = int((p->x - cluster.getMin()(0)) / bin_size_);
{
bool good_maximum = true;
- PointT* p_current = &cloud_->points[maxima_cloud_indices_[i]]; // pointcloud point referring to the current maximum
+ PointT* p_current = &(*cloud_)[maxima_cloud_indices_[i]]; // pointcloud point referring to the current maximum
Eigen::Vector3f p_current_eigen(p_current->x, p_current->y, p_current->z); // conversion to eigen
float t = p_current_eigen.dot(ground_coeffs_.head(3)) / std::pow(sqrt_ground_coeffs_, 2); // height from the ground
p_current_eigen -= ground_coeffs_.head(3) * t; // projection of the point on the groundplane
int j = i-1;
while ((j >= 0) && (good_maximum))
{
- PointT* p_previous = &cloud_->points[maxima_cloud_indices_[j]]; // pointcloud point referring to an already validated maximum
+ PointT* p_previous = &(*cloud_)[maxima_cloud_indices_[j]]; // pointcloud point referring to an already validated maximum
Eigen::Vector3f p_previous_eigen(p_previous->x, p_previous->y, p_previous->z); // conversion to eigen
float t = p_previous_eigen.dot(ground_coeffs_.head(3)) / std::pow(sqrt_ground_coeffs_, 2); // height from the ground
p_previous_eigen -= ground_coeffs_.head(3) * t; // projection of the point on the groundplane
for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); pit++)
{
- PointT* p = &input_cloud->points[*pit];
+ PointT* p = &(*input_cloud)[*pit];
min_x_ = std::min(p->x, min_x_);
max_x_ = std::max(p->x, max_x_);
head_threshold_value = min_y_ + height_ / 8.0f; // head is suppose to be 1/8 of the human height
for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); pit++)
{
- PointT* p = &input_cloud->points[*pit];
+ PointT* p = &(*input_cloud)[*pit];
if(p->y < head_threshold_value)
{
head_threshold_value = max_x_ - height_ / 8.0f; // head is suppose to be 1/8 of the human height
for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); pit++)
{
- PointT* p = &input_cloud->points[*pit];
+ PointT* p = &(*input_cloud)[*pit];
if(p->x > head_threshold_value)
{
float max_z = c_z_;
for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); ++pit)
{
- PointT* p = &input_cloud->points[*pit];
+ PointT* p = &(*input_cloud)[*pit];
min_x = std::min(p->x, min_x);
max_x = std::max(p->x, max_x);
float max_z = c_z_;
for (std::vector<int>::const_iterator pit = points_indices_.indices.begin(); pit != points_indices_.indices.end(); ++pit)
{
- PointT* p = &input_cloud->points[*pit];
+ PointT* p = &(*input_cloud)[*pit];
min_y = std::min(p->y, min_y);
max_y = std::max(p->y, max_y);
const int index_c = row_index*width+col_index+2;
const int index_r = (row_index+2)*width+col_index;
- const unsigned char r0 = input_->points[index0].r;
- const unsigned char g0 = input_->points[index0].g;
- const unsigned char b0 = input_->points[index0].b;
+ const unsigned char r0 = (*input_)[index0].r;
+ const unsigned char g0 = (*input_)[index0].g;
+ const unsigned char b0 = (*input_)[index0].b;
- const unsigned char r_c = input_->points[index_c].r;
- const unsigned char g_c = input_->points[index_c].g;
- const unsigned char b_c = input_->points[index_c].b;
+ const unsigned char r_c = (*input_)[index_c].r;
+ const unsigned char g_c = (*input_)[index_c].g;
+ const unsigned char b_c = (*input_)[index_c].b;
- const unsigned char r_r = input_->points[index_r].r;
- const unsigned char g_r = input_->points[index_r].g;
- const unsigned char b_r = input_->points[index_r].b;
+ const unsigned char r_r = (*input_)[index_r].r;
+ const unsigned char g_r = (*input_)[index_r].g;
+ const unsigned char b_r = (*input_)[index_r].b;
const float r_dx = static_cast<float> (r_c) - static_cast<float> (r0);
const float g_dx = static_cast<float> (g_c) - static_cast<float> (g0);
//const int index_d = (row_index+1)*width+col_index+1;
- const unsigned char r0 = cloud->points[index0].r;
- const unsigned char g0 = cloud->points[index0].g;
- const unsigned char b0 = cloud->points[index0].b;
+ const unsigned char r0 = (*cloud)[index0].r;
+ const unsigned char g0 = (*cloud)[index0].g;
+ const unsigned char b0 = (*cloud)[index0].b;
- const unsigned char r_c = cloud->points[index_c].r;
- const unsigned char g_c = cloud->points[index_c].g;
- const unsigned char b_c = cloud->points[index_c].b;
+ const unsigned char r_c = (*cloud)[index_c].r;
+ const unsigned char g_c = (*cloud)[index_c].g;
+ const unsigned char b_c = (*cloud)[index_c].b;
- const unsigned char r_r = cloud->points[index_r].r;
- const unsigned char g_r = cloud->points[index_r].g;
- const unsigned char b_r = cloud->points[index_r].b;
+ const unsigned char r_r = (*cloud)[index_r].r;
+ const unsigned char g_r = (*cloud)[index_r].g;
+ const unsigned char b_r = (*cloud)[index_r].b;
const float r_dx = static_cast<float> (r_c) - static_cast<float> (r0);
const float g_dx = static_cast<float> (g_c) - static_cast<float> (g0);
{
for (int col_index = 1; col_index < width-1; ++col_index)
{
- const int r7 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index-1)].r);
- const int g7 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index-1)].g);
- const int b7 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index-1)].b);
- const int r8 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index)].r);
- const int g8 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index)].g);
- const int b8 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index)].b);
- const int r9 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index+1)].r);
- const int g9 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index+1)].g);
- const int b9 = static_cast<int> (cloud->points[(row_index-1)*width + (col_index+1)].b);
- const int r4 = static_cast<int> (cloud->points[(row_index)*width + (col_index-1)].r);
- const int g4 = static_cast<int> (cloud->points[(row_index)*width + (col_index-1)].g);
- const int b4 = static_cast<int> (cloud->points[(row_index)*width + (col_index-1)].b);
- const int r6 = static_cast<int> (cloud->points[(row_index)*width + (col_index+1)].r);
- const int g6 = static_cast<int> (cloud->points[(row_index)*width + (col_index+1)].g);
- const int b6 = static_cast<int> (cloud->points[(row_index)*width + (col_index+1)].b);
- const int r1 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index-1)].r);
- const int g1 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index-1)].g);
- const int b1 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index-1)].b);
- const int r2 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index)].r);
- const int g2 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index)].g);
- const int b2 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index)].b);
- const int r3 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index+1)].r);
- const int g3 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index+1)].g);
- const int b3 = static_cast<int> (cloud->points[(row_index+1)*width + (col_index+1)].b);
+ const int r7 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index-1)].r);
+ const int g7 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index-1)].g);
+ const int b7 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index-1)].b);
+ const int r8 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index)].r);
+ const int g8 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index)].g);
+ const int b8 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index)].b);
+ const int r9 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index+1)].r);
+ const int g9 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index+1)].g);
+ const int b9 = static_cast<int> ((*cloud)[(row_index-1)*width + (col_index+1)].b);
+ const int r4 = static_cast<int> ((*cloud)[(row_index)*width + (col_index-1)].r);
+ const int g4 = static_cast<int> ((*cloud)[(row_index)*width + (col_index-1)].g);
+ const int b4 = static_cast<int> ((*cloud)[(row_index)*width + (col_index-1)].b);
+ const int r6 = static_cast<int> ((*cloud)[(row_index)*width + (col_index+1)].r);
+ const int g6 = static_cast<int> ((*cloud)[(row_index)*width + (col_index+1)].g);
+ const int b6 = static_cast<int> ((*cloud)[(row_index)*width + (col_index+1)].b);
+ const int r1 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index-1)].r);
+ const int g1 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index-1)].g);
+ const int b1 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index-1)].b);
+ const int r2 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index)].r);
+ const int g2 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index)].g);
+ const int b2 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index)].b);
+ const int r3 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index+1)].r);
+ const int g3 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index+1)].g);
+ const int b3 = static_cast<int> ((*cloud)[(row_index+1)*width + (col_index+1)].b);
//const int r_tmp1 = - r7 + r3;
//const int r_tmp2 = - r1 + r9;
output.resize (width, height);
// compute distance map
- //float *distance_map = new float[input_->points.size ()];
+ //float *distance_map = new float[input_->size ()];
const unsigned char * mask_map = input.getData ();
float * distance_map = output.getData ();
for (std::size_t index = 0; index < width*height; ++index)
pcl::PointCloud<pcl::Histogram<nbins_> > input_ftt_negate (input_ftt);
for (int i = 2; i < (nbins_); i += 2)
- input_ftt_negate.points[0].histogram[i] = -input_ftt_negate.points[0].histogram[i];
+ input_ftt_negate[0].histogram[i] = -input_ftt_negate[0].histogram[i];
int nr_bins_after_padding = 180;
int peak_distance = 5;
multAB[i].r = multAB[i].i = 0.f;
int k = 0;
- multAB[k].r = input_ftt_negate.points[0].histogram[0] * target_ftt.points[0].histogram[0];
+ multAB[k].r = input_ftt_negate[0].histogram[0] * target_ftt[0].histogram[0];
k++;
float a, b, c, d;
for (int i = 1; i < cutoff; i += 2, k++)
{
- a = input_ftt_negate.points[0].histogram[i];
- b = input_ftt_negate.points[0].histogram[i + 1];
- c = target_ftt.points[0].histogram[i];
- d = target_ftt.points[0].histogram[i + 1];
+ a = input_ftt_negate[0].histogram[i];
+ b = input_ftt_negate[0].histogram[i + 1];
+ c = target_ftt[0].histogram[i];
+ d = target_ftt[0].histogram[i + 1];
multAB[k].r = a * c - b * d;
multAB[k].i = b * c + a * d;
multAB[k].i /= tmp;
}
- multAB[nbins_ - 1].r = input_ftt_negate.points[0].histogram[nbins_ - 1] * target_ftt.points[0].histogram[nbins_ - 1];
+ multAB[nbins_ - 1].r = input_ftt_negate[0].histogram[nbins_ - 1] * target_ftt[0].histogram[nbins_ - 1];
kiss_fft_cfg mycfg = kiss_fft_alloc (nr_bins_after_padding, 1, nullptr, nullptr);
kiss_fft_cpx * invAB = new kiss_fft_cpx[nr_bins_after_padding];
#include <vector>
#include <cstddef>
-#include <cstring>
#include <pcl/pcl_macros.h>
#include <pcl/recognition/dot_modality.h>
#include <pcl/recognition/dense_quantized_multi_mod_template.h>
#pragma once
#include <pcl/memory.h>
-#include <pcl/common/common.h>
#include <pcl/ml/dt/decision_tree_data_provider.h>
#include <pcl/recognition/face_detection/face_common.h>
#include <boost/algorithm/string.hpp>
#include <boost/filesystem/operations.hpp>
-#include <iostream>
#include <fstream>
#include <string>
#include "pcl/recognition/face_detection/face_detector_data_provider.h"
#include "pcl/recognition/face_detection/rf_face_utils.h"
#include "pcl/ml/dt/decision_forest.h"
-#include <pcl/features/integral_image2D.h>
namespace pcl
{
void getVotes(pcl::PointCloud<pcl::PointXYZ>::Ptr & votes_cloud)
{
votes_cloud->points.resize (head_center_votes_.size ());
- votes_cloud->width = static_cast<int>(head_center_votes_.size ());
+ votes_cloud->width = head_center_votes_.size ();
votes_cloud->height = 1;
for (std::size_t i = 0; i < head_center_votes_.size (); i++)
{
- votes_cloud->points[i].getVector3fMap () = head_center_votes_[i];
+ (*votes_cloud)[i].getVector3fMap () = head_center_votes_[i];
}
}
void getVotes(pcl::PointCloud<pcl::PointXYZI>::Ptr & votes_cloud)
{
votes_cloud->points.resize (head_center_votes_.size ());
- votes_cloud->width = static_cast<int>(head_center_votes_.size ());
+ votes_cloud->width = head_center_votes_.size ();
votes_cloud->height = 1;
int p = 0;
{
for (std::size_t j = 0; j < head_center_votes_clustered_[i].size (); j++, p++)
{
- votes_cloud->points[p].getVector3fMap () = head_center_votes_clustered_[i][j];
- votes_cloud->points[p].intensity = 0.1f * static_cast<float> (i);
+ (*votes_cloud)[p].getVector3fMap () = head_center_votes_clustered_[i][j];
+ (*votes_cloud)[p].intensity = 0.1f * static_cast<float> (i);
}
}
void getVotes2(pcl::PointCloud<pcl::PointXYZI>::Ptr & votes_cloud)
{
votes_cloud->points.resize (head_center_votes_.size ());
- votes_cloud->width = static_cast<int>(head_center_votes_.size ());
+ votes_cloud->width = head_center_votes_.size ();
votes_cloud->height = 1;
int p = 0;
{
for (std::size_t j = 0; j < head_center_original_votes_clustered_[i].size (); j++, p++)
{
- votes_cloud->points[p].getVector3fMap () = head_center_original_votes_clustered_[i][j];
- votes_cloud->points[p].intensity = 0.1f * static_cast<float> (i);
+ (*votes_cloud)[p].getVector3fMap () = head_center_original_votes_clustered_[i][j];
+ (*votes_cloud)[p].intensity = 0.1f * static_cast<float> (i);
}
}
typename pcl::PointCloud<ModelT>::Ptr filtered (new pcl::PointCloud<ModelT> ());
std::vector<int> indices_to_keep;
- indices_to_keep.resize (to_be_filtered->points.size ());
+ indices_to_keep.resize (to_be_filtered->size ());
int keep = 0;
- for (std::size_t i = 0; i < to_be_filtered->points.size (); i++)
+ for (std::size_t i = 0; i < to_be_filtered->size (); i++)
{
- float x = to_be_filtered->points[i].x;
- float y = to_be_filtered->points[i].y;
- float z = to_be_filtered->points[i].z;
+ float x = (*to_be_filtered)[i].x;
+ float y = (*to_be_filtered)[i].y;
+ float z = (*to_be_filtered)[i].z;
int u = static_cast<int> (f * x / z + cx);
int v = static_cast<int> (f * y / z + cy);
typename pcl::PointCloud<ModelT>::Ptr filtered (new pcl::PointCloud<ModelT> ());
std::vector<int> indices_to_keep;
- indices_to_keep.resize (to_be_filtered->points.size ());
+ indices_to_keep.resize (to_be_filtered->size ());
int keep = 0;
- for (std::size_t i = 0; i < to_be_filtered->points.size (); i++)
+ for (std::size_t i = 0; i < to_be_filtered->size (); i++)
{
- float x = to_be_filtered->points[i].x;
- float y = to_be_filtered->points[i].y;
- float z = to_be_filtered->points[i].z;
+ float x = (*to_be_filtered)[i].x;
+ float y = (*to_be_filtered)[i].y;
+ float z = (*to_be_filtered)[i].z;
int u = static_cast<int> (f * x / z + cx);
int v = static_cast<int> (f * y / z + cy);
typename pcl::PointCloud<ModelT>::Ptr filtered (new pcl::PointCloud<ModelT> ());
std::vector<int> indices_to_keep;
- indices_to_keep.resize (to_be_filtered->points.size ());
+ indices_to_keep.resize (to_be_filtered->size ());
int keep = 0;
- for (std::size_t i = 0; i < to_be_filtered->points.size (); i++)
+ for (std::size_t i = 0; i < to_be_filtered->size (); i++)
{
- float x = to_be_filtered->points[i].x;
- float y = to_be_filtered->points[i].y;
- float z = to_be_filtered->points[i].z;
+ float x = (*to_be_filtered)[i].x;
+ float y = (*to_be_filtered)[i].y;
+ float z = (*to_be_filtered)[i].z;
int u = static_cast<int> (f * x / z + cx);
int v = static_cast<int> (f * y / z + cy);
mask_[i] = false;
// initialize explained_by_RM
- points_explained_by_rm_.resize (scene_cloud_downsampled_->points.size ());
+ points_explained_by_rm_.resize (scene_cloud_downsampled_->size ());
// initialize model
for (std::size_t m = 0; m < visible_models_.size (); m++)
std::vector<int> nn_indices;
std::vector<float> nn_distances;
- for (std::size_t i = 0; i < recog_model->cloud_->points.size (); i++)
+ for (std::size_t i = 0; i < recog_model->cloud_->size (); i++)
{
- if (!scene_downsampled_tree_->radiusSearch (recog_model->cloud_->points[i], inliers_threshold_, nn_indices, nn_distances,
+ if (!scene_downsampled_tree_->radiusSearch ((*recog_model->cloud_)[i], inliers_threshold_, nn_indices, nn_distances,
std::numeric_limits<int>::max ()))
{
outliers.push_back (static_cast<int> (i));
unsigned int min_pts_per_cluster, unsigned int max_pts_per_cluster = (std::numeric_limits<int>::max) ())
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset\n");
return;
}
- if (cloud.points.size () != normals.points.size ())
+ if (cloud.size () != normals.size ())
{
PCL_ERROR("[pcl::extractEuclideanClusters] Number of points in the input point cloud different than normals!\n");
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- int size = static_cast<int> (cloud.points.size ());
+ int size = static_cast<int> (cloud.size ());
for (int i = 0; i < size; ++i)
{
if (processed[i])
while (sq_idx < static_cast<int> (seed_queue.size ()))
{
- if (normals.points[seed_queue[sq_idx]].curvature > curvature_threshold)
+ if (normals[seed_queue[sq_idx]].curvature > curvature_threshold)
{
sq_idx++;
continue;
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
- if (normals.points[nn_indices[j]].curvature > curvature_threshold)
+ if (normals[nn_indices[j]].curvature > curvature_threshold)
{
continue;
}
//processed[nn_indices[j]] = true;
// [-1;1]
- double dot_p = normals.points[seed_queue[sq_idx]].normal[0] * normals.points[nn_indices[j]].normal[0]
- + normals.points[seed_queue[sq_idx]].normal[1] * normals.points[nn_indices[j]].normal[1]
- + normals.points[seed_queue[sq_idx]].normal[2] * normals.points[nn_indices[j]].normal[2];
+ double dot_p = normals[seed_queue[sq_idx]].normal[0] * normals[nn_indices[j]].normal[0]
+ + normals[seed_queue[sq_idx]].normal[1] * normals[nn_indices[j]].normal[1]
+ + normals[seed_queue[sq_idx]].normal[2] * normals[nn_indices[j]].normal[2];
if (std::abs (std::acos (dot_p)) < eps_angle)
{
//check nans...
int j = 0;
- for (std::size_t i = 0; i < scene_normals_->points.size (); ++i)
+ for (std::size_t i = 0; i < scene_normals_->size (); ++i)
{
- if (!std::isfinite (scene_normals_->points[i].normal_x) || !std::isfinite (scene_normals_->points[i].normal_y)
- || !std::isfinite (scene_normals_->points[i].normal_z))
+ if (!std::isfinite ((*scene_normals_)[i].normal_x) || !std::isfinite ((*scene_normals_)[i].normal_y)
+ || !std::isfinite ((*scene_normals_)[i].normal_z))
continue;
- scene_normals_->points[j] = scene_normals_->points[i];
- scene_cloud_downsampled_->points[j] = scene_cloud_downsampled_->points[i];
+ (*scene_normals_)[j] = (*scene_normals_)[i];
+ (*scene_cloud_downsampled_)[j] = (*scene_cloud_downsampled_)[i];
j++;
}
scene_cloud_downsampled_->width = j;
scene_cloud_downsampled_->height = 1;
- explained_by_RM_.resize (scene_cloud_downsampled_->points.size (), 0);
- explained_by_RM_distance_weighted.resize (scene_cloud_downsampled_->points.size (), 0.f);
- unexplained_by_RM_neighboorhods.resize (scene_cloud_downsampled_->points.size (), 0.f);
+ explained_by_RM_.resize (scene_cloud_downsampled_->size (), 0);
+ explained_by_RM_distance_weighted.resize (scene_cloud_downsampled_->size (), 0.f);
+ unexplained_by_RM_neighboorhods.resize (scene_cloud_downsampled_->size (), 0.f);
//compute segmentation of the scene if detect_clutter_
if (detect_clutter_)
clusters, eps_angle_threshold, curvature_threshold, min_points);
clusters_cloud_.reset (new pcl::PointCloud<pcl::PointXYZI>);
- clusters_cloud_->points.resize (scene_cloud_downsampled_->points.size ());
+ clusters_cloud_->points.resize (scene_cloud_downsampled_->size ());
clusters_cloud_->width = scene_cloud_downsampled_->width;
clusters_cloud_->height = 1;
- for (std::size_t i = 0; i < scene_cloud_downsampled_->points.size (); i++)
+ for (std::size_t i = 0; i < scene_cloud_downsampled_->size (); i++)
{
pcl::PointXYZI p;
- p.getVector3fMap () = scene_cloud_downsampled_->points[i].getVector3fMap ();
+ p.getVector3fMap () = (*scene_cloud_downsampled_)[i].getVector3fMap ();
p.intensity = 0.f;
- clusters_cloud_->points[i] = p;
+ (*clusters_cloud_)[i] = p;
}
float intens_incr = 100.f / static_cast<float> (clusters.size ());
{
for (const auto &vertex : cluster.indices)
{
- clusters_cloud_->points[vertex].intensity = intens;
+ (*clusters_cloud_)[vertex].intensity = intens;
}
intens += intens_incr;
std::map<int, bool> banned;
std::map<int, bool>::iterator banned_it;
- for (std::size_t j = 0; j < complete_models_[indices_[i]]->points.size (); j++)
+ for (const auto& point: *complete_models_[indices_[i]])
{
- int pos_x, pos_y, pos_z;
- pos_x = static_cast<int> (std::floor ((complete_models_[indices_[i]]->points[j].x - min_pt_all.x) / res_occupancy_grid_));
- pos_y = static_cast<int> (std::floor ((complete_models_[indices_[i]]->points[j].y - min_pt_all.y) / res_occupancy_grid_));
- pos_z = static_cast<int> (std::floor ((complete_models_[indices_[i]]->points[j].z - min_pt_all.z) / res_occupancy_grid_));
+ const int pos_x = static_cast<int> (std::floor ((point.x - min_pt_all.x) / res_occupancy_grid_));
+ const int pos_y = static_cast<int> (std::floor ((point.y - min_pt_all.y) / res_occupancy_grid_));
+ const int pos_z = static_cast<int> (std::floor ((point.z - min_pt_all.z) / res_occupancy_grid_));
- int idx = pos_z * size_x * size_y + pos_y * size_x + pos_x;
+ const int idx = pos_z * size_x * size_y + pos_y * size_x + pos_x;
banned_it = banned.find (idx);
if (banned_it == banned.end ())
{
{
//check nans...
int j = 0;
- for (std::size_t i = 0; i < recog_model->cloud_->points.size (); ++i)
+ for (auto& point: *(recog_model->cloud_))
{
- if (!std::isfinite (recog_model->cloud_->points[i].x) || !std::isfinite (recog_model->cloud_->points[i].y)
- || !std::isfinite (recog_model->cloud_->points[i].z))
+ if (!isXYZFinite (point))
continue;
- recog_model->cloud_->points[j] = recog_model->cloud_->points[i];
+ (*recog_model->cloud_)[j] = point;
j++;
}
//check nans...
int j = 0;
- for (std::size_t i = 0; i < recog_model->normals_->points.size (); ++i)
+ for (std::size_t i = 0; i < recog_model->normals_->size (); ++i)
{
- if (!std::isfinite (recog_model->normals_->points[i].normal_x) || !std::isfinite (recog_model->normals_->points[i].normal_y)
- || !std::isfinite (recog_model->normals_->points[i].normal_z))
+ if (isNormalFinite((*recog_model->normals_)[i]))
continue;
- recog_model->normals_->points[j] = recog_model->normals_->points[i];
- recog_model->cloud_->points[j] = recog_model->cloud_->points[i];
+ (*recog_model->normals_)[j] = (*recog_model->normals_)[i];
+ (*recog_model->cloud_)[j] = (*recog_model->cloud_)[i];
j++;
}
std::map<int, std::shared_ptr<std::vector<std::pair<int, float>>>> model_explains_scene_points; //which point i from the scene is explained by a points j_k with dist d_k from the model
- outliers_weight.resize (recog_model->cloud_->points.size ());
- recog_model->outlier_indices_.resize (recog_model->cloud_->points.size ());
+ outliers_weight.resize (recog_model->cloud_->size ());
+ recog_model->outlier_indices_.resize (recog_model->cloud_->size ());
std::size_t o = 0;
- for (std::size_t i = 0; i < recog_model->cloud_->points.size (); i++)
+ for (std::size_t i = 0; i < recog_model->cloud_->size (); i++)
{
- if (!scene_downsampled_tree_->radiusSearch (recog_model->cloud_->points[i], inliers_threshold_, nn_indices, nn_distances, std::numeric_limits<int>::max ()))
+ if (!scene_downsampled_tree_->radiusSearch ((*recog_model->cloud_)[i], inliers_threshold_, nn_indices, nn_distances, std::numeric_limits<int>::max ()))
{
//outlier
outliers_weight[o] = regularizer_;
//it->first is index to scene point
//using normals to weight inliers
- Eigen::Vector3f scene_p_normal = scene_normals_->points[it->first].getNormalVector3fMap ();
- Eigen::Vector3f model_p_normal = recog_model->normals_->points[it->second->at (closest).first].getNormalVector3fMap ();
+ Eigen::Vector3f scene_p_normal = (*scene_normals_)[it->first].getNormalVector3fMap ();
+ Eigen::Vector3f model_p_normal =
+ (*recog_model->normals_)[it->second->at(closest).first].getNormalVector3fMap();
float dotp = scene_p_normal.dot (model_p_normal) * 1.f; //[-1,1] from antiparallel trough perpendicular to parallel
if (dotp < 0.f)
std::vector < std::pair<int, int> > neighborhood_indices; //first is indices to scene point and second is indices to explained_ scene points
for (int i = 0; i < static_cast<int> (recog_model->explained_.size ()); i++)
{
- if (scene_downsampled_tree_->radiusSearch (scene_cloud_downsampled_->points[recog_model->explained_[i]], radius_neighborhood_GO_, nn_indices,
+ if (scene_downsampled_tree_->radiusSearch ((*scene_cloud_downsampled_)[recog_model->explained_[i]], radius_neighborhood_GO_, nn_indices,
nn_distances, std::numeric_limits<int>::max ()))
{
for (std::size_t k = 0; k < nn_distances.size (); k++)
//recog_model->unexplained_in_neighborhood.push_back(neighborhood_indices[i]);
recog_model->unexplained_in_neighborhood[p] = neighborhood_index.first;
- if (clusters_cloud_->points[recog_model->explained_[neighborhood_index.second]].intensity != 0.f
- && (clusters_cloud_->points[recog_model->explained_[neighborhood_index.second]].intensity
- == clusters_cloud_->points[neighborhood_index.first].intensity))
+ if ((*clusters_cloud_)[recog_model->explained_[neighborhood_index.second]].intensity != 0.f
+ && ((*clusters_cloud_)[recog_model->explained_[neighborhood_index.second]].intensity
+ == (*clusters_cloud_)[neighborhood_index.first].intensity))
{
recog_model->unexplained_in_neighborhood_weights[p] = clutter_regularizer_;
//neighborhood_indices[i].first gives the index to the scene point and second to the explained scene point by the model causing this...
//calculate weight of this clutter point based on the distance of the scene point and the model point causing it
float d = static_cast<float> (pow (
- (scene_cloud_downsampled_->points[recog_model->explained_[neighborhood_index.second]].getVector3fMap ()
- - scene_cloud_downsampled_->points[neighborhood_index.first].getVector3fMap ()).norm (), 2));
+ ((*scene_cloud_downsampled_)[recog_model->explained_[neighborhood_index.second]].getVector3fMap ()
+ - (*scene_cloud_downsampled_)[neighborhood_index.first].getVector3fMap ()).norm (), 2));
float d_weight = -(d / rn_sqr) + 1; //points that are close have a strong weight*/
//using normals to weight clutter points
- Eigen::Vector3f scene_p_normal = scene_normals_->points[neighborhood_index.first].getNormalVector3fMap ();
- Eigen::Vector3f model_p_normal = scene_normals_->points[recog_model->explained_[neighborhood_index.second]].getNormalVector3fMap ();
+ Eigen::Vector3f scene_p_normal = (*scene_normals_)[neighborhood_index.first].getNormalVector3fMap ();
+ Eigen::Vector3f model_p_normal = (*scene_normals_)[recog_model->explained_[neighborhood_index.second]].getNormalVector3fMap ();
float dotp = scene_p_normal.dot (model_p_normal); //[-1,1] from antiparallel trough perpendicular to parallel
if (dotp < 0)
mask_[i] = true;
// initialize explained_by_RM
- explained_by_RM_.resize (scene_cloud_downsampled_->points.size ());
- points_explained_by_rm_.resize (scene_cloud_downsampled_->points.size ());
+ explained_by_RM_.resize (scene_cloud_downsampled_->size ());
+ points_explained_by_rm_.resize (scene_cloud_downsampled_->size ());
// initialize model
for (std::size_t m = 0; m < complete_models_.size (); m++)
std::vector<int> nn_indices;
std::vector<float> nn_distances;
- for (std::size_t i = 0; i < recog_model->cloud_->points.size (); i++)
+ for (std::size_t i = 0; i < recog_model->cloud_->size (); i++)
{
- if (!scene_downsampled_tree_->radiusSearch (recog_model->cloud_->points[i], inliers_threshold_, nn_indices, nn_distances,
+ if (!scene_downsampled_tree_->radiusSearch ((*recog_model->cloud_)[i], inliers_threshold_, nn_indices, nn_distances,
std::numeric_limits<int>::max ()))
{
outliers.push_back (static_cast<int> (i));
recog_model->bad_information_ = static_cast<int> (outliers.size ());
- if ((static_cast<float> (recog_model->bad_information_) / static_cast<float> (recog_model->complete_cloud_->points.size ()))
+ if ((static_cast<float> (recog_model->bad_information_) / static_cast<float> (recog_model->complete_cloud_->size ()))
<= penalty_threshold_ && (static_cast<float> (explained_indices.size ())
- / static_cast<float> (recog_model->complete_cloud_->points.size ())) >= support_threshold_)
+ / static_cast<float> (recog_model->complete_cloud_->size ())) >= support_threshold_)
{
recog_model->explained_ = explained_indices;
recognition_models_.push_back (recog_model);
// check if number of points is big enough to create a conflict
bool add_conflict = false;
- add_conflict = ((n_conflicts / static_cast<float> (recognition_models_[i]->complete_cloud_->points.size ())) > conflict_threshold_size_)
- || ((n_conflicts / static_cast<float> (recognition_models_[j]->complete_cloud_->points.size ())) > conflict_threshold_size_);
+ add_conflict = ((n_conflicts / static_cast<float> (recognition_models_[i]->complete_cloud_->size ())) > conflict_threshold_size_)
+ || ((n_conflicts / static_cast<float> (recognition_models_[j]->complete_cloud_->size ())) > conflict_threshold_size_);
if (add_conflict)
{
#include <pcl/recognition/hv/occlusion_reasoning.h>
+#include <algorithm>
+
///////////////////////////////////////////////////////////////////////////////////////////
template<typename ModelT, typename SceneT>
pcl::occlusion_reasoning::ZBuffering<ModelT, SceneT>::ZBuffering (int resx, int resy, float f) :
cx = static_cast<float> (cx_) / 2.f - 0.5f;
cy = static_cast<float> (cy_) / 2.f - 0.5f;
- indices_to_keep.resize (model->points.size ());
+ indices_to_keep.resize (model->size ());
int keep = 0;
- for (std::size_t i = 0; i < model->points.size (); i++)
+ for (std::size_t i = 0; i < model->size (); i++)
{
- float x = model->points[i].x;
- float y = model->points[i].y;
- float z = model->points[i].z;
+ float x = (*model)[i].x;
+ float y = (*model)[i].y;
+ float z = (*model)[i].z;
int u = static_cast<int> (f_ * x / z + cx);
int v = static_cast<int> (f_ * y / z + cy);
max_u = max_v = std::numeric_limits<float>::max () * -1;
min_u = min_v = std::numeric_limits<float>::max ();
- for (std::size_t i = 0; i < scene->points.size (); i++)
+ for (const auto& point: *scene)
{
- float b_x = scene->points[i].x / scene->points[i].z;
+ float b_x = point.x / point.z;
if (b_x > max_u)
max_u = b_x;
if (b_x < min_u)
min_u = b_x;
- float b_y = scene->points[i].y / scene->points[i].z;
+ float b_y = point.y / point.z;
if (b_y > max_v)
max_v = b_y;
if (b_y < min_v)
}
depth_ = new float[cx_ * cy_];
- for (int i = 0; i < (cx_ * cy_); i++)
- depth_[i] = std::numeric_limits<float>::quiet_NaN ();
+ std::fill_n(depth_, cx * cy, std::numeric_limits<float>::quiet_NaN());
- for (std::size_t i = 0; i < scene->points.size (); i++)
+ for (const auto& point: *scene)
{
- float x = scene->points[i].x;
- float y = scene->points[i].y;
- float z = scene->points[i].z;
- int u = static_cast<int> (f_ * x / z + cx);
- int v = static_cast<int> (f_ * y / z + cy);
+ const float& x = point.x;
+ const float& y = point.y;
+ const float& z = point.z;
+ const int u = static_cast<int> (f_ * x / z + cx);
+ const int v = static_cast<int> (f_ * y / z + cy);
if (u >= cx_ || v >= cy_ || u < 0 || v < 0)
continue;
if (cloud != nullptr)
{
- colored_cloud->height += static_cast<std::uint32_t> (cloud->points.size ());
+ colored_cloud->height += cloud->size ();
point.r = 255;
point.g = 255;
point.b = 255;
- for (std::size_t i_point = 0; i_point < cloud->points.size (); i_point++)
+ for (const auto& i_point: *cloud)
{
- point.x = cloud->points[i_point].x;
- point.y = cloud->points[i_point].y;
- point.z = cloud->points[i_point].z;
+ point.x = i_point.x;
+ point.y = i_point.y;
+ point.z = i_point.z;
colored_cloud->points.push_back (point);
}
}
point.z = i_vote.z;
colored_cloud->points.push_back (point);
}
- colored_cloud->height += static_cast<std::uint32_t> (votes_->points.size ());
+ colored_cloud->height += votes_->size ();
return (colored_cloud);
}
for (int i = 0; i < NUM_INIT_PTS; i++)
{
Eigen::Vector3f old_center;
- Eigen::Vector3f curr_center;
- curr_center (0) = votes_->points[votes_->points.size () * i / NUM_INIT_PTS].x;
- curr_center (1) = votes_->points[votes_->points.size () * i / NUM_INIT_PTS].y;
- curr_center (2) = votes_->points[votes_->points.size () * i / NUM_INIT_PTS].z;
+ const auto idx = votes_->size() * i / NUM_INIT_PTS;
+ Eigen::Vector3f curr_center = (*votes_)[idx].getVector3fMap();
do
{
if (tree_ == nullptr)
tree_.reset (new pcl::KdTreeFLANN<pcl::InterestPoint>);
tree_->setInputCloud (votes_);
- k_ind_.resize ( votes_->points.size (), -1 );
- k_sqr_dist_.resize ( votes_->points.size (), 0.0f );
+ k_ind_.resize ( votes_->size (), -1 );
+ k_sqr_dist_.resize ( votes_->size (), 0.0f );
tree_is_valid_ = true;
}
}
for (std::size_t j = 0; j < n_pts; j++)
{
- double kernel = votes_->points[k_ind_[j]].strength * std::exp (-k_sqr_dist_[j] / (in_sigma_dist * in_sigma_dist));
- Eigen::Vector3f vote_vec (votes_->points[k_ind_[j]].x, votes_->points[k_ind_[j]].y, votes_->points[k_ind_[j]].z);
+ double kernel = (*votes_)[k_ind_[j]].strength * std::exp (-k_sqr_dist_[j] / (in_sigma_dist * in_sigma_dist));
+ Eigen::Vector3f vote_vec ((*votes_)[k_ind_[j]].x, (*votes_)[k_ind_[j]].y, (*votes_)[k_ind_[j]].z);
wgh_sum += vote_vec * static_cast<float> (kernel);
denom += kernel;
}
std::size_t num_of_pts = tree_->radiusSearch (pt, 3 * sigma_dist, k_ind_, k_sqr_dist_);
for (std::size_t j = 0; j < num_of_pts; j++)
- sum_vote += votes_->points[k_ind_[j]].strength * std::exp (-k_sqr_dist_[j] / (sigma_dist * sigma_dist));
+ sum_vote += (*votes_)[k_ind_[j]].strength * std::exp (-k_sqr_dist_[j] / (sigma_dist * sigma_dist));
return (sum_vote);
}
template <typename PointT> unsigned int
pcl::features::ISMVoteList<PointT>::getNumberOfVotes ()
{
- return (static_cast<unsigned int> (votes_->points.size ()));
+ return (static_cast<unsigned int> (votes_->size ()));
}
//////////////////////////////////////////////////////////////////////////////////////////////
{
Eigen::VectorXf curr_descriptor (FeatureSize);
for (int i_dim = 0; i_dim < FeatureSize; i_dim++)
- curr_descriptor (i_dim) = feature_cloud->points[i_point].histogram[i_dim];
+ curr_descriptor (i_dim) = (*feature_cloud)[i_point].histogram[i_dim];
float descriptor_sum = curr_descriptor.sum ();
if (descriptor_sum < std::numeric_limits<float>::epsilon ())
const unsigned int n_words = static_cast<unsigned int> (model->clusters_[min_dist_idx].size ());
//compute coord system transform
- Eigen::Matrix3f transform = alignYCoordWithNormal (sampled_normal_cloud->points[i_point]);
+ Eigen::Matrix3f transform = alignYCoordWithNormal ((*sampled_normal_cloud)[i_point]);
for (unsigned int i_word = 0; i_word < n_words; i_word++)
{
unsigned int index = model->clusters_[min_dist_idx][i_word];
applyTransform (direction, transform.transpose ());
pcl::InterestPoint vote;
- Eigen::Vector3f vote_pos = sampled_point_cloud->points[i_point].getVector3fMap () + direction;
+ Eigen::Vector3f vote_pos = (*sampled_point_cloud)[i_point].getVector3fMap () + direction;
vote.x = vote_pos[0];
vote.y = vote_pos[1];
vote.z = vote_pos[2];
float power = statistical_weight * learned_weight;
vote.strength = power;
if (vote.strength > std::numeric_limits<float>::epsilon ())
- out_votes->addVote (vote, sampled_point_cloud->points[i_point], i_class);
+ out_votes->addVote (vote, (*sampled_point_cloud)[i_point], i_class);
}
}//next point
{
//compute the center of the training object
Eigen::Vector3f models_center (0.0f, 0.0f, 0.0f);
- const std::size_t num_of_points = training_clouds_[i_cloud]->points.size ();
+ const auto num_of_points = training_clouds_[i_cloud]->size ();
for (auto point_j = training_clouds_[i_cloud]->begin (); point_j != training_clouds_[i_cloud]->end (); point_j++)
models_center += point_j->getVector3fMap ();
models_center /= static_cast<float> (num_of_points);
int point_index = 0;
for (auto point_i = sampled_point_cloud->points.cbegin (); point_i != sampled_point_cloud->points.cend (); point_i++, point_index++)
{
- float descriptor_sum = Eigen::VectorXf::Map (feature_cloud->points[point_index].histogram, FeatureSize).sum ();
+ float descriptor_sum = Eigen::VectorXf::Map ((*feature_cloud)[point_index].histogram, FeatureSize).sum ();
if (descriptor_sum < std::numeric_limits<float>::epsilon ())
continue;
histograms.insert ( histograms.end (), feature_cloud->begin () + point_index, feature_cloud->begin () + point_index + 1 );
int dist = static_cast<int> (std::distance (sampled_point_cloud->points.cbegin (), point_i));
- Eigen::Matrix3f new_basis = alignYCoordWithNormal (sampled_normal_cloud->points[dist]);
+ Eigen::Matrix3f new_basis = alignYCoordWithNormal ((*sampled_normal_cloud)[dist]);
Eigen::Vector3f zero;
zero (0) = 0.0;
zero (1) = 0.0;
applyTransform (new_dir, new_basis);
PointT point (new_dir[0], new_dir[1], new_dir[2]);
- LocationInfo info (static_cast<unsigned int> (i_cloud), point, *point_i, sampled_normal_cloud->points[dist]);
+ LocationInfo info (static_cast<unsigned int> (i_cloud), point, *point_i, (*sampled_normal_cloud)[dist]);
locations.insert(locations.end (), info);
}
}//next training cloud
for (unsigned int i_object = 0; i_object < number_of_objects; i_object++)
{
float max_distance = 0.0f;
- unsigned int number_of_points = static_cast<unsigned int> (training_clouds_[i_object]->points.size ());
+ const auto number_of_points = training_clouds_[i_object]->size ();
for (unsigned int i_point = 0; i_point < number_of_points - 1; i_point++)
for (unsigned int j_point = i_point + 1; j_point < number_of_points; j_point++)
{
- float curr_distance = 0.0f;
- curr_distance += training_clouds_[i_object]->points[i_point].x * training_clouds_[i_object]->points[j_point].x;
- curr_distance += training_clouds_[i_object]->points[i_point].y * training_clouds_[i_object]->points[j_point].y;
- curr_distance += training_clouds_[i_object]->points[i_point].z * training_clouds_[i_object]->points[j_point].z;
- if (curr_distance > max_distance)
- max_distance = curr_distance;
+ float curr_distance = 0.0f;
+ curr_distance += (*training_clouds_[i_object])[i_point].x * (*training_clouds_[i_object])[j_point].x;
+ curr_distance += (*training_clouds_[i_object])[i_point].y * (*training_clouds_[i_object])[j_point].y;
+ curr_distance += (*training_clouds_[i_object])[i_point].z * (*training_clouds_[i_object])[j_point].z;
+ if (curr_distance > max_distance)
+ max_distance = curr_distance;
}
max_distance = static_cast<float> (sqrt (max_distance));
unsigned int i_class = training_classes_[i_object];
//extract indices of points from source cloud which are closest to grid points
const float max_value = std::numeric_limits<float>::max ();
- const std::size_t num_source_points = in_point_cloud->points.size ();
- const std::size_t num_sample_points = temp_cloud.points.size ();
+ const auto num_source_points = in_point_cloud->size ();
+ const auto num_sample_points = temp_cloud.size ();
std::vector<float> dist_to_grid_center (num_sample_points, max_value);
std::vector<int> sampling_indices (num_sample_points, -1);
for (std::size_t i_point = 0; i_point < num_source_points; i_point++)
{
- int index = grid.getCentroidIndex (in_point_cloud->points[i_point]);
+ int index = grid.getCentroidIndex ((*in_point_cloud)[i_point]);
if (index == -1)
continue;
- PointT pt_1 = in_point_cloud->points[i_point];
- PointT pt_2 = temp_cloud.points[index];
+ PointT pt_1 = (*in_point_cloud)[i_point];
+ PointT pt_2 = temp_cloud[index];
float distance = (pt_1.x - pt_2.x) * (pt_1.x - pt_2.x) + (pt_1.y - pt_2.y) * (pt_1.y - pt_2.y) + (pt_1.z - pt_2.z) * (pt_1.z - pt_2.z);
if (distance < dist_to_grid_center[index])
std::size_t counter = 0;
for (std::size_t j = 0; j < template_point_cloud.size (); ++j)
{
- const PointXYZRGBA & p = template_point_cloud.points[j];
+ const PointXYZRGBA & p = template_point_cloud[j];
if (!std::isfinite (p.x) || !std::isfinite (p.y) || !std::isfinite (p.z))
continue;
for (std::size_t j = 0; j < template_point_cloud.size (); ++j)
{
- PointXYZRGBA p = template_point_cloud.points[j];
+ PointXYZRGBA p = template_point_cloud[j];
if (!std::isfinite (p.x) || !std::isfinite (p.y) || !std::isfinite (p.z))
continue;
p.y -= center_y;
p.z -= center_z;
- template_point_cloud.points[j] = p;
+ template_point_cloud[j] = p;
}
}
std::size_t counter = 0;
for (std::size_t j = 0; j < template_point_cloud.size (); ++j)
{
- const PointXYZRGBA & p = template_point_cloud.points[j];
+ const PointXYZRGBA & p = template_point_cloud[j];
if (!std::isfinite (p.x) || !std::isfinite (p.y) || !std::isfinite (p.z))
continue;
for (std::size_t j = 0; j < template_point_cloud.size (); ++j)
{
- PointXYZRGBA p = template_point_cloud.points[j];
+ PointXYZRGBA p = template_point_cloud[j];
if (!std::isfinite (p.x) || !std::isfinite (p.y) || !std::isfinite (p.z))
continue;
p.y -= center_y;
p.z -= center_z;
- template_point_cloud.points[j] = p;
+ template_point_cloud[j] = p;
}
}
std::size_t counter = 0;
for (std::size_t j = 0; j < template_point_cloud.size (); ++j)
{
- const PointXYZRGBA & p = template_point_cloud.points[j];
+ const PointXYZRGBA & p = template_point_cloud[j];
if (!std::isfinite (p.x) || !std::isfinite (p.y) || !std::isfinite (p.z))
continue;
for (std::size_t j = 0; j < template_point_cloud.size (); ++j)
{
- PointXYZRGBA p = template_point_cloud.points[j];
+ PointXYZRGBA p = template_point_cloud[j];
if (!std::isfinite (p.x) || !std::isfinite (p.y) || !std::isfinite (p.z))
continue;
p.y -= center_y;
p.z -= center_z;
- template_point_cloud.points[j] = p;
+ template_point_cloud[j] = p;
}
}
cloud.height = template_point_cloud.height;
for (std::size_t point_index = 0; point_index < nr_points; ++point_index)
{
- pcl::PointXYZRGBA point = template_point_cloud.points[point_index];
+ pcl::PointXYZRGBA point = template_point_cloud[point_index];
point.x += translation_x;
point.y += translation_y;
point.z += translation_z;
- cloud.points[point_index] = point;
+ cloud[point_index] = point;
}
}
#include <pcl/pcl_macros.h>
-#include <cstring>
#include <vector>
namespace pcl {
#include <ctime>
#include <string>
#include <list>
-#include <set>
#include <map>
namespace pcl
{
namespace recognition
{
- class ORROctree;
-
class PCL_EXPORTS ORROctreeZProjection
{
public:
for ( int i = 0 ; i < num_source_points ; ++i )
{
// Transform the i-th source point based on the current transform matrix
- aux::transform (guess_and_result, source_points.points[i], transformed_source_point);
+ aux::transform (guess_and_result, source_points[i], transformed_source_point);
// Perform the closest point search
kdtree_.nearestKSearch (transformed_source_point, 1, target_index, sqr_dist_to_target);
{
const int index = y * width + x;
- const float px = input_->points[index].x;
- const float py = input_->points[index].y;
- const float pz = input_->points[index].z;
+ const float px = (*input_)[index].x;
+ const float py = (*input_)[index].y;
+ const float pz = (*input_)[index].z;
if (std::isnan(px) || pz > 2.0f)
{
- surface_normals_.points[index].normal_x = bad_point;
- surface_normals_.points[index].normal_y = bad_point;
- surface_normals_.points[index].normal_z = bad_point;
- surface_normals_.points[index].curvature = bad_point;
+ surface_normals_[index].normal_x = bad_point;
+ surface_normals_[index].normal_y = bad_point;
+ surface_normals_[index].normal_z = bad_point;
+ surface_normals_[index].curvature = bad_point;
quantized_surface_normals_ (x, y) = 0;
const std::size_t index2 = v * width + u;
- const float qx = input_->points[index2].x;
- const float qy = input_->points[index2].y;
- const float qz = input_->points[index2].z;
+ const float qx = (*input_)[index2].x;
+ const float qy = (*input_)[index2].y;
+ const float qz = (*input_)[index2].z;
if (std::isnan(qx)) continue;
if (length <= 0.0f)
{
- surface_normals_.points[index].normal_x = bad_point;
- surface_normals_.points[index].normal_y = bad_point;
- surface_normals_.points[index].normal_z = bad_point;
- surface_normals_.points[index].curvature = bad_point;
+ surface_normals_[index].normal_x = bad_point;
+ surface_normals_[index].normal_y = bad_point;
+ surface_normals_[index].normal_z = bad_point;
+ surface_normals_[index].curvature = bad_point;
quantized_surface_normals_ (x, y) = 0;
}
const float normal_y = ny * normInv;
const float normal_z = nz * normInv;
- surface_normals_.points[index].normal_x = normal_x;
- surface_normals_.points[index].normal_y = normal_y;
- surface_normals_.points[index].normal_z = normal_z;
- surface_normals_.points[index].curvature = bad_point;
+ surface_normals_[index].normal_x = normal_x;
+ surface_normals_[index].normal_y = normal_y;
+ surface_normals_[index].normal_z = normal_z;
+ surface_normals_[index].curvature = bad_point;
float angle = 11.25f + std::atan2 (normal_y, normal_x)*180.0f/3.14f;
{
for (int col_index = 0; col_index < width; ++col_index)
{
- const float value = input_->points[row_index*width + col_index].z;
+ const float value = (*input_)[row_index*width + col_index].z;
if (std::isfinite (value))
{
lp_depth[row_index*width + col_index] = static_cast<unsigned short> (value * 1000.0f);
for (int l_x = l_r; l_x < l_W - l_r - 1; ++l_x)
{
long l_d = lp_line[0];
- //float l_d = input_->points[(l_y * l_W + l_r) + l_x].z;
- //float px = input_->points[(l_y * l_W + l_r) + l_x].x;
- //float py = input_->points[(l_y * l_W + l_r) + l_x].y;
+ //float l_d = (*input_)[(l_y * l_W + l_r) + l_x].z;
+ //float px = (*input_)[(l_y * l_W + l_r) + l_x].x;
+ //float py = (*input_)[(l_y * l_W + l_r) + l_x].y;
if (l_d < distance_threshold)
{
// //b[1] += fj * delta;
- // const double delta = 1000.0f * (input_->points[(l_y * l_W + l_r) + l_x + offsets[index]].z - l_d);
+ // const double delta = 1000.0f * ((*input_)[(l_y * l_W + l_r) + l_x + offsets[index]].z - l_d);
// const double i = offsets_i[index];
// const double j = offsets_j[index];
- // //const float i = input_->points[(l_y * l_W + l_r) + l_x + offsets[index]].x - px;//offsets_i[index];
- // //const float j = input_->points[(l_y * l_W + l_r) + l_x + offsets[index]].y - py;//offsets_j[index];
+ // //const float i = (*input_)[(l_y * l_W + l_r) + l_x + offsets[index]].x - px;//offsets_i[index];
+ // //const float j = (*input_)[(l_y * l_W + l_r) + l_x + offsets[index]].y - py;//offsets_j[index];
// double * A = l_A;
// double * b = l_b;
// const double threshold = difference_threshold;
output.resize (width, height);
// compute distance map
- //float *distance_map = new float[input_->points.size ()];
+ //float *distance_map = new float[input_->size ()];
const unsigned char * mask_map = input.getData ();
float * distance_map = output.getData ();
for (std::size_t index = 0; index < width*height; ++index)
#include "pcl/point_types.h"
#include "pcl/impl/instantiate.hpp"
-#include "pcl/recognition/cg/geometric_consistency.h"
#include "pcl/recognition/impl/cg/geometric_consistency.hpp"
#ifdef PCL_ONLY_CORE_POINT_TYPES
int element_stride = sizeof(pcl::PointXYZ) / sizeof(float);
int row_stride = element_stride * cloud->width;
- const float *data = reinterpret_cast<const float*> (&cloud->points[0]);
+ const float *data = reinterpret_cast<const float*> (&(*cloud)[0]);
integral_image_depth->setInput (data + 2, cloud->width, cloud->height, element_stride, row_stride);
//Compute normals and normal integral images
if (USE_NORMALS_)
{
integral_image_normal_x.reset (new pcl::IntegralImage2D<float, 1> (false));
- const float *data_nx = reinterpret_cast<const float*> (&normals->points[0]);
+ const float *data_nx = reinterpret_cast<const float*> (&(*normals)[0]);
integral_image_normal_x->setInput (data_nx, normals->width, normals->height, element_stride_normal, row_stride_normal);
integral_image_normal_y.reset (new pcl::IntegralImage2D<float, 1> (false));
- const float *data_ny = reinterpret_cast<const float*> (&normals->points[0]);
+ const float *data_ny = reinterpret_cast<const float*> (&(*normals)[0]);
integral_image_normal_y->setInput (data_ny + 1, normals->width, normals->height, element_stride_normal, row_stride_normal);
integral_image_normal_z.reset (new pcl::IntegralImage2D<float, 1> (false));
- const float *data_nz = reinterpret_cast<const float*> (&normals->points[0]);
+ const float *data_nz = reinterpret_cast<const float*> (&(*normals)[0]);
integral_image_normal_z->setInput (data_nz + 2, normals->width, normals->height, element_stride_normal, row_stride_normal);
}
pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_intensity(new pcl::PointCloud<pcl::PointXYZI>);
cloud_intensity->width = cloud->width;
cloud_intensity->height = cloud->height;
- cloud_intensity->points.resize(cloud->points.size());
+ cloud_intensity->points.resize(cloud->size());
cloud_intensity->is_dense = cloud->is_dense;
- for (int jjj = 0; jjj < static_cast<int>(cloud->points.size()); jjj++)
+ for (int jjj = 0; jjj < static_cast<int>(cloud->size()); jjj++)
{
- cloud_intensity->points[jjj].getVector4fMap() = cloud->points[jjj].getVector4fMap();
- cloud_intensity->points[jjj].intensity = 0.f;
+ (*cloud_intensity)[jjj].getVector4fMap() = (*cloud)[jjj].getVector4fMap();
+ (*cloud_intensity)[jjj].intensity = 0.f;
int row, col;
col = jjj % cloud->width;
row = jjj / cloud->width;
//std::cout << row << " " << col << std::endl;
if (check_inside(col, row, min_col, max_col, min_row, max_row))
{
- cloud_intensity->points[jjj].intensity = 1.f;
+ (*cloud_intensity)[jjj].intensity = 1.f;
}
}
int element_stride = sizeof(pcl::PointXYZ) / sizeof(float);
int row_stride = element_stride * cloud->width;
- const float *data = reinterpret_cast<const float*> (&cloud->points[0]);
+ const float *data = reinterpret_cast<const float*> (&(*cloud)[0]);
integral_image_depth->setInput (data + 2, cloud->width, cloud->height, element_stride, row_stride);
//Compute normals and normal integral images
if (use_normals_)
{
integral_image_normal_x.reset (new pcl::IntegralImage2D<float, 1> (false));
- const float *data_nx = reinterpret_cast<const float*> (&normals->points[0]);
+ const float *data_nx = reinterpret_cast<const float*> (&(*normals)[0]);
integral_image_normal_x->setInput (data_nx, normals->width, normals->height, element_stride_normal, row_stride_normal);
integral_image_normal_y.reset (new pcl::IntegralImage2D<float, 1> (false));
- const float *data_ny = reinterpret_cast<const float*> (&normals->points[0]);
+ const float *data_ny = reinterpret_cast<const float*> (&(*normals)[0]);
integral_image_normal_y->setInput (data_ny + 1, normals->width, normals->height, element_stride_normal, row_stride_normal);
integral_image_normal_z.reset (new pcl::IntegralImage2D<float, 1> (false));
- const float *data_nz = reinterpret_cast<const float*> (&normals->points[0]);
+ const float *data_nz = reinterpret_cast<const float*> (&(*normals)[0]);
integral_image_normal_z->setInput (data_nz + 2, normals->width, normals->height, element_stride_normal, row_stride_normal);
}
pcl::TernaryTreeMissingDataBranchEstimator * btt = new pcl::TernaryTreeMissingDataBranchEstimator ();
face_detection::PoseClassRegressionVarianceStatsEstimator<float, NodeType, std::vector<face_detection::TrainingExample>, int> rse (btt);
- std::vector<float> weights;
- weights.resize (cloud->points.size ());
- for (std::size_t i = 0; i < cloud->points.size (); i++)
- weights[i] = 0;
+ std::vector<float> weights(cloud->size(), 0.f);
int w_size_2 = static_cast<int> (w_size_ / 2);
if (face_heat_map_)
{
face_heat_map_.reset (new pcl::PointCloud<pcl::PointXYZI>);
- face_heat_map_->resize (cloud->points.size ());
+ face_heat_map_->resize (cloud->size ());
face_heat_map_->height = 1;
- face_heat_map_->width = static_cast<unsigned int>(cloud->points.size ());
+ face_heat_map_->width = cloud->size ();
face_heat_map_->is_dense = false;
- for (std::size_t i = 0; i < cloud->points.size (); i++)
+ for (std::size_t i = 0; i < cloud->size (); i++)
{
- face_heat_map_->points[i].getVector4fMap () = cloud->points[i].getVector4fMap ();
- face_heat_map_->points[i].intensity = weights[i];
+ (*face_heat_map_)[i].getVector4fMap () = (*cloud)[i].getVector4fMap ();
+ (*face_heat_map_)[i].intensity = weights[i];
}
}
}
#include <pcl/recognition/quantizable_modality.h>
#include <cstddef>
-#include <cstring>
//////////////////////////////////////////////////////////////////////////////////////////////
pcl::QuantizableModality::QuantizableModality ()
#include <pcl/console/print.h>
#include <cmath>
-using namespace std;
using namespace pcl;
using namespace console;
using namespace pcl::recognition;
#endif
// Try to insert a new model entry
- pair<map<string,Model*>::iterator, bool> result = models_.insert (pair<string,Model*> (object_name, static_cast<Model*> (nullptr)));
+ std::pair<std::map<std::string,Model*>::iterator, bool> result = models_.insert (std::pair<std::string,Model*> (object_name, static_cast<Model*> (nullptr)));
// Check if 'object_name' is unique
if (!result.second)
const ORROctree& octree = new_model->getOctree ();
const std::vector<ORROctree::Node*> &full_leaves = octree.getFullLeaves ();
- list<ORROctree::Node*> inter_leaves;
+ std::list<ORROctree::Node*> inter_leaves;
int num_of_pairs = 0;
// Run through all full leaves
#include <pcl/common/random.h>
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
-using namespace std;
using namespace pcl::common;
pcl::recognition::ObjRecRANSAC::ObjRecRANSAC (float pair_width, float voxel_size)
//===============================================================================================================================================
void
-pcl::recognition::ObjRecRANSAC::recognize (const PointCloudIn& scene, const PointCloudN& normals, list<ObjRecRANSAC::Output>& recognized_objects, double success_probability)
+pcl::recognition::ObjRecRANSAC::recognize (const PointCloudIn& scene, const PointCloudN& normals, std::list<ObjRecRANSAC::Output>& recognized_objects, double success_probability)
{
// Clear some stuff
this->clearTestData ();
return;
// Generate hypotheses from the sampled opps
- list<HypothesisBase> pre_hypotheses;
+ std::list<HypothesisBase> pre_hypotheses;
int num_hypotheses = this->generateHypotheses (sampled_oriented_point_pairs_, pre_hypotheses);
// Cluster the hypotheses
void
pcl::recognition::ObjRecRANSAC::sampleOrientedPointPairs (int num_iterations, const std::vector<ORROctree::Node*>& full_scene_leaves,
- list<OrientedPointPair>& output) const
+ std::list<OrientedPointPair>& output) const
{
#ifdef OBJ_REC_RANSAC_VERBOSE
printf ("ObjRecRANSAC::%s(): sampling oriented point pairs (opps) ... ", __func__); fflush (stdout);
//===============================================================================================================================================
int
-pcl::recognition::ObjRecRANSAC::generateHypotheses (const list<OrientedPointPair>& pairs, list<HypothesisBase>& out) const
+pcl::recognition::ObjRecRANSAC::generateHypotheses (const std::list<OrientedPointPair>& pairs, std::list<HypothesisBase>& out) const
{
#ifdef OBJ_REC_RANSAC_VERBOSE
printf("ObjRecRANSAC::%s(): generating hypotheses ... ", __func__); fflush (stdout);
//===============================================================================================================================================
int
-pcl::recognition::ObjRecRANSAC::groupHypotheses(list<HypothesisBase>& hypotheses, int num_hypotheses,
+pcl::recognition::ObjRecRANSAC::groupHypotheses(std::list<HypothesisBase>& hypotheses, int num_hypotheses,
RigidTransformSpace& transform_space, HypothesisOctree& grouped_hypotheses) const
{
#ifdef OBJ_REC_RANSAC_VERBOSE
transform_space.addRigidTransform (hypothesis.obj_model_, transformed_point, hypothesis.rigid_transform_);
}
- list<RotationSpace*>& rotation_spaces = transform_space.getRotationSpaces ();
+ std::list<RotationSpace*>& rotation_spaces = transform_space.getRotationSpaces ();
int num_accepted = 0;
#ifdef OBJ_REC_RANSAC_VERBOSE
// Now take the best hypothesis from each rotation space
for (const auto &rotation_space : rotation_spaces)
{
- const map<string, ModelLibrary::Model*>& models = model_library_.getModels ();
+ const std::map<std::string, ModelLibrary::Model*>& models = model_library_.getModels ();
Hypothesis best_hypothesis;
best_hypothesis.match_confidence_ = 0.0f;
graph.getNodes ()[i]->setData ((*hypo)->getData ());
// Get the neighbors of the current rotation space
- const set<HypothesisOctree::Node*>& neighbors = (*hypo)->getNeighbors ();
+ const std::set<HypothesisOctree::Node*>& neighbors = (*hypo)->getNeighbors ();
for (const auto &neighbor : neighbors)
graph.insertDirectedEdge ((*hypo)->getData ().getLinearId (), neighbor->getData ().getLinearId ());
printf ("ObjRecRANSAC::%s(): building the graph ... ", __func__); fflush (stdout);
#endif
- list<ORRGraph<Hypothesis>::Node*> on_nodes, off_nodes;
+ std::list<ORRGraph<Hypothesis>::Node*> on_nodes, off_nodes;
graph.computeMaximalOnOffPartition (on_nodes, off_nodes);
// Copy the data from the on_nodes to the list 'out'
graph.getNodes ()[lin_id]->setData ((*obj)->getData ());
}
- using ordered_int_pair = pair<int,int>;
+ using ordered_int_pair = std::pair<int,int>;
// This is one is to make sure that we do not compute the same set intersection twice
- set<ordered_int_pair, bool(*)(const ordered_int_pair&, const ordered_int_pair&)> ordered_hypotheses_ids (aux::compareOrderedPairs<int>);
+ std::set<ordered_int_pair, bool(*)(const ordered_int_pair&, const ordered_int_pair&)> ordered_hypotheses_ids (aux::compareOrderedPairs<int>);
// Project the hypotheses onto the "range image" and store in each pixel the corresponding hypothesis id
for (const auto &bounded_object : *bounded_objects)
hypo1->computeBounds (bounds);
// Check if these bounds intersect other hypotheses' bounds
- list<BVHH::BoundedObject*> intersected_objects;
+ std::list<BVHH::BoundedObject*> intersected_objects;
bvh.intersect (bounds, intersected_objects);
for (const auto &intersected_object : intersected_objects)
Hypothesis *hypo2 = intersected_object->getData ();
// Build an ordered int pair out of the hypotheses ids
- pair<int,int> id_pair;
+ std::pair<int,int> id_pair;
if ( hypo1->getLinearId () < hypo2->getLinearId () )
{
id_pair.first = hypo1->getLinearId ();
}
// Make sure that we do not compute the same set intersection twice
- pair<set<ordered_int_pair, bool(*)(const ordered_int_pair&, const ordered_int_pair&)>::iterator, bool> res = ordered_hypotheses_ids.insert (id_pair);
+ std::pair<std::set<ordered_int_pair, bool(*)(const ordered_int_pair&, const ordered_int_pair&)>::iterator, bool> res = ordered_hypotheses_ids.insert (id_pair);
if ( !res.second )
- continue; // We've already computed that set intersection -> check the next pair
+ continue; // We've already computed that std::set intersection -> check the next pair
// Do the more involved intersection test based on a set intersection of the range image pixels which explained by the hypotheses
- set<int> id_intersection;
+ std::set<int> id_intersection;
// Compute the ids intersection of 'obj' and 'it'
std::set_intersection (hypo1->explained_pixels_.begin (),
//===============================================================================================================================================
void
-pcl::recognition::ObjRecRANSAC::filterGraphOfConflictingHypotheses (ORRGraph<Hypothesis*>& graph, list<ObjRecRANSAC::Output>& recognized_objects) const
+pcl::recognition::ObjRecRANSAC::filterGraphOfConflictingHypotheses (ORRGraph<Hypothesis*>& graph, std::list<ObjRecRANSAC::Output>& recognized_objects) const
{
#ifdef OBJ_REC_RANSAC_VERBOSE
printf ("ObjRecRANSAC::%s(): filtering the conflict graph ... ", __func__); fflush (stdout);
}
// Leave the fitest leaves on, such that there are no neighboring ON nodes
- list<ORRGraph<Hypothesis*>::Node*> on_nodes, off_nodes;
+ std::list<ORRGraph<Hypothesis*>::Node*> on_nodes, off_nodes;
graph.computeMaximalOnOffPartition (on_nodes, off_nodes);
// The ON nodes correspond to accepted solutions
hypothesis->explained_pixels_.insert (pixel->getId ());
// Compute the match based on the normal agreement
- const set<ORROctree::Node*, bool(*)(ORROctree::Node*,ORROctree::Node*)>* nodes = scene_octree_proj_.getOctreeNodes (transformed_point);
+ const std::set<ORROctree::Node*, bool(*)(ORROctree::Node*,ORROctree::Node*)>* nodes = scene_octree_proj_.getOctreeNodes (transformed_point);
- set<ORROctree::Node*, bool(*)(ORROctree::Node*,ORROctree::Node*)>::const_iterator n = nodes->begin ();
+ std::set<ORROctree::Node*, bool(*)(ORROctree::Node*,ORROctree::Node*)>::const_iterator n = nodes->begin ();
ORROctree::Node *closest_node = *n;
float min_sqr_dist = aux::sqrDistance3 (closest_node->getData ()->getPoint (), transformed_point);
#include <pcl/recognition/ransac_based/orr_octree.h>
#include <pcl/common/common.h>
#include <pcl/common/random.h>
-#include <cstdlib>
#include <cmath>
#include <algorithm>
#include <list>
-using namespace std;
using namespace pcl::recognition;
pcl::recognition::ORROctree::ORROctree ()
void
pcl::recognition::ORROctree::getFullLeavesIntersectedBySphere (const float* p, float radius, std::list<ORROctree::Node*>& out) const
{
- list<ORROctree::Node*> nodes;
+ std::list<ORROctree::Node*> nodes;
nodes.push_back (root_);
ORROctree::Node *node, *child;
pcl::common::UniformGenerator<int> randgen (0, 1, static_cast<std::uint32_t> (time (nullptr)));
- list<ORROctree::Node*> nodes;
+ std::list<ORROctree::Node*> nodes;
nodes.push_back (root_);
while ( !nodes.empty () )
#include <array>
#include <vector>
-using namespace std;
//=========================================================================================================================================
for (const auto &full_set : full_sets_)
{
// Get the first node in the set
- set<ORROctree::Node*, bool(*)(ORROctree::Node*,ORROctree::Node*)>::iterator node = full_set->get_nodes ().begin ();
+ std::set<ORROctree::Node*, bool(*)(ORROctree::Node*,ORROctree::Node*)>::iterator node = full_set->get_nodes ().begin ();
// Initialize
float best_min = (*node)->getBounds ()[4];
float best_max = (*node)->getBounds ()[5];
}
std::vector<int> indices (1);
std::vector<float> distances (1);
- if (tree_->nearestKSearch (input_->points[index], 1, indices, distances))
+ if (tree_->nearestKSearch ((*input_)[index], 1, indices, distances))
return (distances[0]);
return (std::numeric_limits<double>::max ());
}
getCorrespondenceScore (const pcl::Correspondence &corr) override
{
// Get the source and the target feature from the list
- const PointT &src = input_->points[corr.index_query];
- const PointT &tgt = target_->points[corr.index_match];
+ const PointT &src = (*input_)[corr.index_query];
+ const PointT &tgt = (*target_)[corr.index_match];
return ((src.getVector4fMap () - tgt.getVector4fMap ()).squaredNorm ());
}
inline double
getCorrespondenceScoreFromNormals (const pcl::Correspondence &corr) override
{
- //assert ( (input_normals_->points.size () != 0) && (target_normals_->points.size () != 0) && "Normals are not set for the input and target point clouds");
+ //assert ( (input_normals_->size () != 0) && (target_normals_->size () != 0) && "Normals are not set for the input and target point clouds");
assert (input_normals_ && target_normals_ && "Normals are not set for the input and target point clouds");
- const NormalT &src = input_normals_->points[corr.index_query];
- const NormalT &tgt = target_normals_->points[corr.index_match];
+ const NormalT &src = (*input_normals_)[corr.index_query];
+ const NormalT &tgt = (*target_normals_)[corr.index_match];
return (double ((src.normal[0] * tgt.normal[0]) + (src.normal[1] * tgt.normal[1]) + (src.normal[2] * tgt.normal[2])));
}
#include <pcl/registration/correspondence_rejection.h>
#include <pcl/point_cloud.h>
#include <pcl/point_representation.h>
-#include <pcl/registration/boost.h>
#include <unordered_map>
{
if (!source_features_ || !target_features_)
return (false);
- return (source_features_->points.size () > 0 &&
- target_features_->points.size () > 0);
+ return (source_features_->size () > 0 &&
+ target_features_->size () > 0);
}
/** \brief Provide a boost shared pointer to a PointRepresentation to be used when comparing features
feature_representation_.reset (new DefaultFeatureRepresentation<FeatureT>);
// Get the source and the target feature from the list
- const FeatureT &feat_src = source_features_->points[index];
- const FeatureT &feat_tgt = target_features_->points[index];
+ const FeatureT &feat_src = (*source_features_)[index];
+ const FeatureT &feat_tgt = (*target_features_)[index];
// Check if the representations are valid
if (!feature_representation_->isValid (feat_src) || !feature_representation_->isValid (feat_tgt))
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/registration/correspondence_rejection.h>
-#include <pcl/sample_consensus/ransac.h>
-#include <pcl/sample_consensus/sac_model_registration.h>
#include <pcl/common/transforms.h>
namespace pcl
#pragma once
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/registration/correspondence_rejection_sample_consensus.h>
namespace pcl
#pragma once
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/registration/registration.h>
#include <pcl/registration/transformation_estimation_svd.h>
// PCL includes
#include <pcl/memory.h> // for dynamic_pointer_cast, pcl::make_shared, shared_ptr
-#include <pcl/sample_consensus/ransac.h>
-#include <pcl/sample_consensus/sac_model_registration.h>
#include <pcl/registration/registration.h>
#include <pcl/registration/transformation_estimation_svd.h>
#include <pcl/registration/transformation_estimation_point_to_plane_lls.h>
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
{
- tree_->nearestKSearch (input_->points[*idx], 1, index, distance);
+ tree_->nearestKSearch ((*input_)[*idx], 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
{
// Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint (input_->points[*idx], pt);
+ copyPoint ((*input_)[*idx], pt);
tree_->nearestKSearch (pt, 1, index, distance);
if (distance[0] > max_dist_sqr)
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
{
- tree_->nearestKSearch (input_->points[*idx], 1, index, distance);
+ tree_->nearestKSearch ((*input_)[*idx], 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
target_idx = index[0];
- tree_reciprocal_->nearestKSearch (target_->points[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
if (distance_reciprocal[0] > max_dist_sqr || *idx != index_reciprocal[0])
continue;
for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
{
// Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint (input_->points[*idx], pt_src);
+ copyPoint ((*input_)[*idx], pt_src);
tree_->nearestKSearch (pt_src, 1, index, distance);
if (distance[0] > max_dist_sqr)
target_idx = index[0];
// Copy the target data to a target PointSource format so we can search in the tree_reciprocal
- copyPoint (target_->points[target_idx], pt_tgt);
+ copyPoint ((*target_)[target_idx], pt_tgt);
tree_reciprocal_->nearestKSearch (pt_tgt, 1, index_reciprocal, distance_reciprocal);
if (distance_reciprocal[0] > max_dist_sqr || *idx != index_reciprocal[0])
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
float min_dist = std::numeric_limits<float>::max ();
// Find the best correspondence
for (std::size_t j = 0; j < nn_indices.size (); j++)
{
- float cos_angle = source_normals_->points[*idx_i].normal_x * target_normals_->points[nn_indices[j]].normal_x +
- source_normals_->points[*idx_i].normal_y * target_normals_->points[nn_indices[j]].normal_y +
- source_normals_->points[*idx_i].normal_z * target_normals_->points[nn_indices[j]].normal_z ;
+ float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
if (dist < min_dist)
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
float min_dist = std::numeric_limits<float>::max ();
{
PointSource pt_src;
// Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint (input_->points[*idx_i], pt_src);
+ copyPoint ((*input_)[*idx_i], pt_src);
- float cos_angle = source_normals_->points[*idx_i].normal_x * target_normals_->points[nn_indices[j]].normal_x +
- source_normals_->points[*idx_i].normal_y * target_normals_->points[nn_indices[j]].normal_y +
- source_normals_->points[*idx_i].normal_z * target_normals_->points[nn_indices[j]].normal_z ;
+ float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
if (dist < min_dist)
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
float min_dist = std::numeric_limits<float>::max ();
// Find the best correspondence
for (std::size_t j = 0; j < nn_indices.size (); j++)
{
- float cos_angle = source_normals_->points[*idx_i].normal_x * target_normals_->points[nn_indices[j]].normal_x +
- source_normals_->points[*idx_i].normal_y * target_normals_->points[nn_indices[j]].normal_y +
- source_normals_->points[*idx_i].normal_z * target_normals_->points[nn_indices[j]].normal_z ;
+ float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
if (dist < min_dist)
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch (target_->points[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
if (*idx_i != index_reciprocal[0])
continue;
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
float min_dist = std::numeric_limits<float>::max ();
{
PointSource pt_src;
// Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint (input_->points[*idx_i], pt_src);
+ copyPoint ((*input_)[*idx_i], pt_src);
- float cos_angle = source_normals_->points[*idx_i].normal_x * target_normals_->points[nn_indices[j]].normal_x +
- source_normals_->points[*idx_i].normal_y * target_normals_->points[nn_indices[j]].normal_y +
- source_normals_->points[*idx_i].normal_z * target_normals_->points[nn_indices[j]].normal_z ;
+ float cos_angle = (*source_normals_)[*idx_i].normal_x * (*target_normals_)[nn_indices[j]].normal_x +
+ (*source_normals_)[*idx_i].normal_y * (*target_normals_)[nn_indices[j]].normal_y +
+ (*source_normals_)[*idx_i].normal_z * (*target_normals_)[nn_indices[j]].normal_z ;
float dist = nn_dists[j] * (2.0f - cos_angle * cos_angle);
if (dist < min_dist)
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch (target_->points[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
if (*idx_i != index_reciprocal[0])
continue;
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
double min_dist = std::numeric_limits<double>::max ();
{
// computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
- pt.x = target_->points[nn_indices[j]].x - input_->points[*idx_i].x;
- pt.y = target_->points[nn_indices[j]].y - input_->points[*idx_i].y;
- pt.z = target_->points[nn_indices[j]].z - input_->points[*idx_i].z;
+ pt.x = (*target_)[nn_indices[j]].x - (*input_)[*idx_i].x;
+ pt.y = (*target_)[nn_indices[j]].y - (*input_)[*idx_i].y;
+ pt.z = (*target_)[nn_indices[j]].z - (*input_)[*idx_i].z;
- const NormalT &normal = source_normals_->points[*idx_i];
+ const NormalT &normal = (*source_normals_)[*idx_i];
Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
Eigen::Vector3d V (pt.x, pt.y, pt.z);
Eigen::Vector3d C = N.cross (V);
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
double min_dist = std::numeric_limits<double>::max ();
{
PointSource pt_src;
// Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint (input_->points[*idx_i], pt_src);
+ copyPoint ((*input_)[*idx_i], pt_src);
// computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
- pt.x = target_->points[nn_indices[j]].x - pt_src.x;
- pt.y = target_->points[nn_indices[j]].y - pt_src.y;
- pt.z = target_->points[nn_indices[j]].z - pt_src.z;
+ pt.x = (*target_)[nn_indices[j]].x - pt_src.x;
+ pt.y = (*target_)[nn_indices[j]].y - pt_src.y;
+ pt.z = (*target_)[nn_indices[j]].z - pt_src.z;
- const NormalT &normal = source_normals_->points[*idx_i];
+ const NormalT &normal = (*source_normals_)[*idx_i];
Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
Eigen::Vector3d V (pt.x, pt.y, pt.z);
Eigen::Vector3d C = N.cross (V);
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
double min_dist = std::numeric_limits<double>::max ();
{
// computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
- pt.x = target_->points[nn_indices[j]].x - input_->points[*idx_i].x;
- pt.y = target_->points[nn_indices[j]].y - input_->points[*idx_i].y;
- pt.z = target_->points[nn_indices[j]].z - input_->points[*idx_i].z;
+ pt.x = (*target_)[nn_indices[j]].x - (*input_)[*idx_i].x;
+ pt.y = (*target_)[nn_indices[j]].y - (*input_)[*idx_i].y;
+ pt.z = (*target_)[nn_indices[j]].z - (*input_)[*idx_i].z;
- const NormalT &normal = source_normals_->points[*idx_i];
+ const NormalT &normal = (*source_normals_)[*idx_i];
Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
Eigen::Vector3d V (pt.x, pt.y, pt.z);
Eigen::Vector3d C = N.cross (V);
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch (target_->points[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
if (*idx_i != index_reciprocal[0])
continue;
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx_i = indices_->begin (); idx_i != indices_->end (); ++idx_i)
{
- tree_->nearestKSearch (input_->points[*idx_i], k_, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*input_)[*idx_i], k_, nn_indices, nn_dists);
// Among the K nearest neighbours find the one with minimum perpendicular distance to the normal
double min_dist = std::numeric_limits<double>::max ();
{
PointSource pt_src;
// Copy the source data to a target PointTarget format so we can search in the tree
- copyPoint (input_->points[*idx_i], pt_src);
+ copyPoint ((*input_)[*idx_i], pt_src);
// computing the distance between a point and a line in 3d.
// Reference - http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
- pt.x = target_->points[nn_indices[j]].x - pt_src.x;
- pt.y = target_->points[nn_indices[j]].y - pt_src.y;
- pt.z = target_->points[nn_indices[j]].z - pt_src.z;
+ pt.x = (*target_)[nn_indices[j]].x - pt_src.x;
+ pt.y = (*target_)[nn_indices[j]].y - pt_src.y;
+ pt.z = (*target_)[nn_indices[j]].z - pt_src.z;
- const NormalT &normal = source_normals_->points[*idx_i];
+ const NormalT &normal = (*source_normals_)[*idx_i];
Eigen::Vector3d N (normal.normal_x, normal.normal_y, normal.normal_z);
Eigen::Vector3d V (pt.x, pt.y, pt.z);
Eigen::Vector3d C = N.cross (V);
// Check if the correspondence is reciprocal
target_idx = nn_indices[min_index];
- tree_reciprocal_->nearestKSearch (target_->points[target_idx], 1, index_reciprocal, distance_reciprocal);
+ tree_reciprocal_->nearestKSearch ((*target_)[target_idx], 1, index_reciprocal, distance_reciprocal);
if (*idx_i != index_reciprocal[0])
continue;
for (std::vector<int>::const_iterator src_it = indices_->begin (); src_it != indices_->end (); ++src_it)
{
- if (isFinite (input_->points[*src_it]))
+ if (isFinite ((*input_)[*src_it]))
{
- Eigen::Vector4f p_src (src_to_tgt_transformation_ * input_->points[*src_it].getVector4fMap ());
+ Eigen::Vector4f p_src (src_to_tgt_transformation_ * (*input_)[*src_it].getVector4fMap ());
Eigen::Vector3f p_src3 (p_src[0], p_src[1], p_src[2]);
Eigen::Vector3f uv (projection_matrix_ * p_src3);
#ifndef PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_FEATURES_HPP_
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_FEATURES_HPP_
+#include <boost/pointer_cast.hpp> // for static_pointer_cast
namespace pcl
{
#define PCL_REGISTRATION_IMPL_CORRESPONDENCE_REJECTION_SAMPLE_CONSENSUS_HPP_
#include <unordered_map>
+#include <pcl/sample_consensus/ransac.h>
+#include <pcl/sample_consensus/sac_model_registration.h>
namespace pcl
for (int i = 0; i < m; ++i)
{
// The last coordinate, p_src[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_src = gicp_->tmp_src_->points[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
+ Vector4fMapConst p_src = (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
// The last coordinate, p_tgt[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_tgt = gicp_->tmp_tgt_->points[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
+ Vector4fMapConst p_tgt = (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
Eigen::Vector4f pp (transformation_matrix * p_src);
// Estimate the distance (cost function)
// The last coordinate is still guaranteed to be set to 1.0
for (int i = 0; i < m; ++i)
{
// The last coordinate, p_src[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_src = gicp_->tmp_src_->points[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
+ Vector4fMapConst p_src = (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
// The last coordinate, p_tgt[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_tgt = gicp_->tmp_tgt_->points[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
+ Vector4fMapConst p_tgt = (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
Eigen::Vector4f pp (transformation_matrix * p_src);
// The last coordinate is still guaranteed to be set to 1.0
for (int i = 0; i < m; ++i)
{
// The last coordinate, p_src[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_src = gicp_->tmp_src_->points[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
+ Vector4fMapConst p_src = (*gicp_->tmp_src_)[(*gicp_->tmp_idx_src_)[i]].getVector4fMap ();
// The last coordinate, p_tgt[3] is guaranteed to be set to 1.0 in registration.hpp
- Vector4fMapConst p_tgt = gicp_->tmp_tgt_->points[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
+ Vector4fMapConst p_tgt = (*gicp_->tmp_tgt_)[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
Eigen::Vector4f pp (transformation_matrix * p_src);
// The last coordinate is still guaranteed to be set to 1.0
Eigen::Vector3d res (pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
{
pcl::IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
- using namespace std;
// Difference between consecutive transforms
double delta = 0;
// Get the size of the target
#pragma omp parallel for \
default(none) \
shared(tree, cloud) \
- private(ids, dists_sqr) \
+ firstprivate(ids, dists_sqr) \
reduction(+:mean_dist, num) \
firstprivate(s, max_dist_sqr) \
num_threads(nr_threads)
for (int i = 0; i < 1000; i++)
{
- tree.nearestKSearch (cloud->points[rand () % s], 2, ids, dists_sqr);
+ tree.nearestKSearch ((*cloud)[rand () % s], 2, ids, dists_sqr);
if (dists_sqr[1] < max_dist_sqr)
{
mean_dist += std::sqrt (dists_sqr[1]);
std::vector <float> dists_sqr (2);
pcl::utils::ignore(nr_threads);
+#if OPENMP_LEGACY_CONST_DATA_SHARING_RULE
#pragma omp parallel for \
default(none) \
shared(tree, cloud, indices) \
- private(ids, dists_sqr) \
+ firstprivate(ids, dists_sqr) \
+ reduction(+:mean_dist, num) \
+ num_threads(nr_threads)
+#else
+#pragma omp parallel for \
+ default(none) \
+ shared(tree, cloud, indices, s, max_dist_sqr) \
+ firstprivate(ids, dists_sqr) \
reduction(+:mean_dist, num) \
- firstprivate(s, max_dist_sqr) \
num_threads(nr_threads)
+#endif
for (int i = 0; i < 1000; i++)
{
- tree.nearestKSearch (cloud->points[indices[rand () % s]], 2, ids, dists_sqr);
+ tree.nearestKSearch ((*cloud)[indices[rand () % s]], 2, ids, dists_sqr);
if (dists_sqr[1] < max_dist_sqr)
{
mean_dist += std::sqrt (dists_sqr[1]);
pcl::compute3DCentroid (*target_, base_triple, centre_pt);
// loop over all points in source cloud to find most suitable fourth point
- const PointTarget *pt1 = &(target_->points[base_indices[0]]);
- const PointTarget *pt2 = &(target_->points[base_indices[1]]);
- const PointTarget *pt3 = &(target_->points[base_indices[2]]);
+ const PointTarget *pt1 = &((*target_)[base_indices[0]]);
+ const PointTarget *pt2 = &((*target_)[base_indices[1]]);
+ const PointTarget *pt3 = &((*target_)[base_indices[2]]);
for (const int &target_index : *target_indices_)
{
- const PointTarget *pt4 = &(target_->points[target_index]);
+ const PointTarget *pt4 = &((*target_)[target_index]);
float d1 = pcl::squaredEuclideanDistance (*pt4, *pt1);
float d2 = pcl::squaredEuclideanDistance (*pt4, *pt2);
int *index2 = &(*target_indices_)[rand () % nr_points];
int *index3 = &(*target_indices_)[rand () % nr_points];
- Eigen::Vector3f u = target_->points[*index2].getVector3fMap () - target_->points[*index1].getVector3fMap ();
- Eigen::Vector3f v = target_->points[*index3].getVector3fMap () - target_->points[*index1].getVector3fMap ();
+ Eigen::Vector3f u = (*target_)[*index2].getVector3fMap () - (*target_)[*index1].getVector3fMap ();
+ Eigen::Vector3f v = (*target_)[*index3].getVector3fMap () - (*target_)[*index1].getVector3fMap ();
float t = u.cross (v).squaredNorm (); // triangle area (0.5 * sqrt(t)) should be maximal
// check for most suitable point triple
float (&ratio)[2])
{
// get point vectors
- Eigen::Vector3f u = target_->points[base_indices[1]].getVector3fMap () - target_->points[base_indices[0]].getVector3fMap ();
- Eigen::Vector3f v = target_->points[base_indices[3]].getVector3fMap () - target_->points[base_indices[2]].getVector3fMap ();
- Eigen::Vector3f w = target_->points[base_indices[0]].getVector3fMap () - target_->points[base_indices[2]].getVector3fMap ();
+ Eigen::Vector3f u = (*target_)[base_indices[1]].getVector3fMap () - (*target_)[base_indices[0]].getVector3fMap ();
+ Eigen::Vector3f v = (*target_)[base_indices[3]].getVector3fMap () - (*target_)[base_indices[2]].getVector3fMap ();
+ Eigen::Vector3f w = (*target_)[base_indices[0]].getVector3fMap () - (*target_)[base_indices[2]].getVector3fMap ();
// calculate segment distances
float a = u.dot (u);
const float max_norm_diff = 0.5f * max_norm_diff_ * M_PI / 180.f;
// calculate reference segment distance and normal angle
- float ref_dist = pcl::euclideanDistance (target_->points[idx1], target_->points[idx2]);
- float ref_norm_angle = (use_normals_ ? (target_normals_->points[idx1].getNormalVector3fMap () -
- target_normals_->points[idx2].getNormalVector3fMap ()).norm () : 0.f);
+ float ref_dist = pcl::euclideanDistance ((*target_)[idx1], (*target_)[idx2]);
+ float ref_norm_angle = (use_normals_ ? ((*target_normals_)[idx1].getNormalVector3fMap () -
+ (*target_normals_)[idx2].getNormalVector3fMap ()).norm () : 0.f);
// loop over all pairs of points in source point cloud
auto it_out = source_indices_->begin (), it_out_e = source_indices_->end () - 1;
// add here normal evaluation if normals are given
if (use_normals_)
{
- const NormalT *pt1_n = &(source_normals_->points[*it_out]);
- const NormalT *pt2_n = &(source_normals_->points[*it_in]);
+ const NormalT *pt1_n = &((*source_normals_)[*it_out]);
+ const NormalT *pt2_n = &((*source_normals_)[*it_in]);
float norm_angle_1 = (pt1_n->getNormalVector3fMap () - pt2_n->getNormalVector3fMap ()).norm ();
float norm_angle_2 = (pt1_n->getNormalVector3fMap () + pt2_n->getNormalVector3fMap ()).norm ();
{
// calculate edge lengths of base
float dist_base[4];
- dist_base[0] = pcl::euclideanDistance (target_->points[base_indices[0]], target_->points[base_indices[2]]);
- dist_base[1] = pcl::euclideanDistance (target_->points[base_indices[0]], target_->points[base_indices[3]]);
- dist_base[2] = pcl::euclideanDistance (target_->points[base_indices[1]], target_->points[base_indices[2]]);
- dist_base[3] = pcl::euclideanDistance (target_->points[base_indices[1]], target_->points[base_indices[3]]);
+ dist_base[0] = pcl::euclideanDistance ((*target_)[base_indices[0]], (*target_)[base_indices[2]]);
+ dist_base[1] = pcl::euclideanDistance ((*target_)[base_indices[0]], (*target_)[base_indices[3]]);
+ dist_base[2] = pcl::euclideanDistance ((*target_)[base_indices[1]], (*target_)[base_indices[2]]);
+ dist_base[3] = pcl::euclideanDistance ((*target_)[base_indices[1]], (*target_)[base_indices[3]]);
// loop over first point pair correspondences and store intermediate points 'e' in new point cloud
PointCloudSourcePtr cloud_e (new PointCloudSource);
PointCloudSourceIterator it_pt = cloud_e->begin ();
for (const auto &pair : pairs_a)
{
- const PointSource *pt1 = &(input_->points[pair.index_match]);
- const PointSource *pt2 = &(input_->points[pair.index_query]);
+ const PointSource *pt1 = &((*input_)[pair.index_match]);
+ const PointSource *pt2 = &((*input_)[pair.index_query]);
// calculate intermediate points using both ratios from base (r1,r2)
for (int i = 0; i < 2; i++, it_pt++)
// loop over second point pair correspondences
for (const auto &pair : pairs_b)
{
- const PointTarget *pt1 = &(input_->points[pair.index_match]);
- const PointTarget *pt2 = &(input_->points[pair.index_query]);
+ const PointTarget *pt1 = &((*input_)[pair.index_match]);
+ const PointTarget *pt2 = &((*input_)[pair.index_query]);
// calculate intermediate points using both ratios from base (r1,r2)
for (const float &r : ratio)
const std::vector <int> &match_indices,
const float (&dist_ref)[4])
{
- float d0 = pcl::euclideanDistance (input_->points[match_indices[0]], input_->points[match_indices[2]]);
- float d1 = pcl::euclideanDistance (input_->points[match_indices[0]], input_->points[match_indices[3]]);
- float d2 = pcl::euclideanDistance (input_->points[match_indices[1]], input_->points[match_indices[2]]);
- float d3 = pcl::euclideanDistance (input_->points[match_indices[1]], input_->points[match_indices[3]]);
+ float d0 = pcl::euclideanDistance ((*input_)[match_indices[0]], (*input_)[match_indices[2]]);
+ float d1 = pcl::euclideanDistance ((*input_)[match_indices[0]], (*input_)[match_indices[3]]);
+ float d2 = pcl::euclideanDistance ((*input_)[match_indices[1]], (*input_)[match_indices[2]]);
+ float d3 = pcl::euclideanDistance ((*input_)[match_indices[1]], (*input_)[match_indices[3]]);
// check edge distances of match w.r.t the base
return (std::abs (d0 - dist_ref[0]) < max_edge_diff_ && std::abs (d1 - dist_ref[1]) < max_edge_diff_ &&
auto it_match_orig = match_indices.begin ();
for (auto it_base = base_indices.cbegin (), it_base_e = base_indices.cend (); it_base != it_base_e; it_base++, it_match_orig++)
{
- float dist_sqr_1 = pcl::squaredEuclideanDistance (target_->points[*it_base], centre_pt_base);
+ float dist_sqr_1 = pcl::squaredEuclideanDistance ((*target_)[*it_base], centre_pt_base);
float best_diff_sqr = FLT_MAX;
int best_index = -1;
for (const int &match_index : copy)
{
// calculate difference of distances to centre point
- float dist_sqr_2 = pcl::squaredEuclideanDistance (input_->points[match_index], centre_pt_match);
+ float dist_sqr_2 = pcl::squaredEuclideanDistance ((*input_)[match_index], centre_pt_match);
float diff_sqr = std::abs(dist_sqr_1 - dist_sqr_2);
if (diff_sqr < best_diff_sqr)
const PointCloudSource &cloud, int nr_samples, float min_sample_distance,
std::vector<int> &sample_indices)
{
- if (nr_samples > static_cast<int> (cloud.points.size ()))
+ if (nr_samples > static_cast<int> (cloud.size ()))
{
PCL_ERROR ("[pcl::%s::selectSamples] ", getClassName ().c_str ());
- PCL_ERROR ("The number of samples (%d) must not be greater than the number of points (%lu)!\n",
- nr_samples, cloud.points.size ());
+ PCL_ERROR("The number of samples (%d) must not be greater than the number of "
+ "points (%zu)!\n",
+ nr_samples,
+ static_cast<std::size_t>(cloud.size()));
return;
}
// Iteratively draw random samples until nr_samples is reached
- int iterations_without_a_sample = 0;
- int max_iterations_without_a_sample = static_cast<int> (3 * cloud.points.size ());
+ index_t iterations_without_a_sample = 0;
+ const auto max_iterations_without_a_sample = 3 * cloud.size ();
sample_indices.clear ();
while (static_cast<int> (sample_indices.size ()) < nr_samples)
{
// Choose a sample at random
- int sample_index = getRandomIndex (static_cast<int> (cloud.points.size ()));
+ int sample_index = getRandomIndex (static_cast<int> (cloud.size ()));
// Check to see if the sample is 1) unique and 2) far away from the other samples
bool valid_sample = true;
for (const int &sample_idx : sample_indices)
{
- float distance_between_samples = euclideanDistance (cloud.points[sample_index], cloud.points[sample_idx]);
+ float distance_between_samples = euclideanDistance (cloud[sample_index], cloud[sample_idx]);
if (sample_index == sample_idx || distance_between_samples < min_sample_distance)
{
++iterations_without_a_sample;
// If no valid samples can be found, relax the inter-sample distance requirements
- if (iterations_without_a_sample >= max_iterations_without_a_sample)
+ if (static_cast<std::size_t>(iterations_without_a_sample) >= max_iterations_without_a_sample)
{
PCL_WARN ("[pcl::%s::selectSamples] ", getClassName ().c_str ());
- PCL_WARN ("No valid sample found after %d iterations. Relaxing min_sample_distance_ to %f\n",
- iterations_without_a_sample, 0.5*min_sample_distance);
+ PCL_WARN ("No valid sample found after %zu iterations. Relaxing min_sample_distance_ to %f\n",
+ static_cast<std::size_t>(iterations_without_a_sample), 0.5*min_sample_distance);
min_sample_distance_ *= 0.5f;
min_sample_distance = min_sample_distance_;
corresponding_indices.resize (sample_indices.size ());
for (std::size_t i = 0; i < sample_indices.size (); ++i)
{
- // Find the k features nearest to input_features.points[sample_indices[i]]
+ // Find the k features nearest to input_features[sample_indices[i]]
feature_tree_->nearestKSearch (input_features, sample_indices[i], k_correspondences_, nn_indices, nn_distances);
// Select one at random and add it to corresponding_indices
const ErrorFunctor & compute_error = *error_functor_;
float error = 0;
- for (int i = 0; i < static_cast<int> (cloud.points.size ()); ++i)
+ for (int i = 0; i < static_cast<int> (cloud.size ()); ++i)
{
- // Find the distance between cloud.points[i] and its nearest neighbor in the target point cloud
+ // Find the distance between cloud[i] and its nearest neighbor in the target point cloud
tree_->nearestKSearch (cloud, i, 1, nn_index, nn_distance);
// Compute the error
for (int ici = 0; ici != static_cast<int> (corrs->size ()); ++ici) // ici = input correspondence iterator
{
// Compound the point pair onto the current pose
- Eigen::Vector3f source_compounded = pcl::getTransformation (source_pose (0), source_pose (1), source_pose (2), source_pose (3), source_pose (4), source_pose (5)) * source_cloud->points[(*corrs)[ici].index_query].getVector3fMap ();
- Eigen::Vector3f target_compounded = pcl::getTransformation (target_pose (0), target_pose (1), target_pose (2), target_pose (3), target_pose (4), target_pose (5)) * target_cloud->points[(*corrs)[ici].index_match].getVector3fMap ();
+ Eigen::Vector3f source_compounded = pcl::getTransformation (source_pose (0), source_pose (1), source_pose (2), source_pose (3), source_pose (4), source_pose (5)) * (*source_cloud)[(*corrs)[ici].index_query].getVector3fMap ();
+ Eigen::Vector3f target_compounded = pcl::getTransformation (target_pose (0), target_pose (1), target_pose (2), target_pose (3), target_pose (4), target_pose (5)) * (*target_cloud)[(*corrs)[ici].index_match].getVector3fMap ();
// NaN points can not be passed to the remaining computational pipeline
if (!std::isfinite (source_compounded (0)) || !std::isfinite (source_compounded (1)) || !std::isfinite (source_compounded (2)) || !std::isfinite (target_compounded (0)) || !std::isfinite (target_compounded (1)) || !std::isfinite (target_compounded (2)))
if (delta_p_norm == 0 || std::isnan(delta_p_norm))
{
- trans_probability_ = score / static_cast<double> (input_->points.size ());
+ trans_probability_ = score / static_cast<double> (input_->size ());
converged_ = delta_p_norm == delta_p_norm;
return;
}
// Store transformation probability. The realtive differences within each scan registration are accurate
// but the normalization constants need to be modified for it to be globally accurate
- trans_probability_ = score / static_cast<double> (input_->points.size ());
+ trans_probability_ = score / static_cast<double> (input_->size ());
}
computeAngleDerivatives (p);
// Update gradient and hessian for each point, line 17 in Algorithm 2 [Magnusson 2009]
- for (std::size_t idx = 0; idx < input_->points.size (); idx++)
+ for (std::size_t idx = 0; idx < input_->size (); idx++)
{
- x_trans_pt = trans_cloud.points[idx];
+ x_trans_pt = trans_cloud[idx];
// Find nieghbors (Radius search has been experimentally faster than direct neighbor checking.
std::vector<TargetGridLeafConstPtr> neighborhood;
for (typename std::vector<TargetGridLeafConstPtr>::iterator neighborhood_it = neighborhood.begin (); neighborhood_it != neighborhood.end (); ++neighborhood_it)
{
cell = *neighborhood_it;
- x_pt = input_->points[idx];
+ x_pt = (*input_)[idx];
x = Eigen::Vector3d (x_pt.x, x_pt.y, x_pt.z);
x_trans = Eigen::Vector3d (x_trans_pt.x, x_trans_pt.y, x_trans_pt.z);
// Precompute Angular Derivatives unessisary because only used after regular derivative calculation
// Update hessian for each point, line 17 in Algorithm 2 [Magnusson 2009]
- for (std::size_t idx = 0; idx < input_->points.size (); idx++)
+ for (std::size_t idx = 0; idx < input_->size (); idx++)
{
- x_trans_pt = trans_cloud.points[idx];
+ x_trans_pt = trans_cloud[idx];
// Find nieghbors (Radius search has been experimentally faster than direct neighbor checking.
std::vector<TargetGridLeafConstPtr> neighborhood;
cell = *neighborhood_it;
{
- x_pt = input_->points[idx];
+ x_pt = (*input_)[idx];
x = Eigen::Vector3d (x_pt.x, x_pt.y, x_pt.z);
x_trans = Eigen::Vector3d (x_trans_pt.x, x_trans_pt.y, x_trans_pt.z);
double g_u = auxilaryFunction_dPsiMT (d_phi_0, d_phi_0, mu);
// Check used to allow More-Thuente step length calculation to be skipped by making step_min == step_max
- bool interval_converged = (step_max - step_min) > 0, open_interval = true;
+ bool interval_converged = (step_max - step_min) < 0, open_interval = true;
double a_t = step_init;
a_t = std::min (a_t, step_max);
PCL_ERROR("[pcl::PPFRegistration::computeTransformation] setting initial transform (guess) not implemented!\n");
}
- PoseWithVotesList voted_poses;
- std::vector <std::vector <unsigned int> > accumulator_array;
- accumulator_array.resize (input_->points.size ());
+ const auto aux_size = static_cast<std::size_t>(
+ std::floor(2 * M_PI / search_method_->getAngleDiscretizationStep()));
+
+ const std::vector<unsigned int> tmp_vec(aux_size, 0);
+ std::vector<std::vector<unsigned int>> accumulator_array(input_->size(), tmp_vec);
- std::size_t aux_size = static_cast<std::size_t> (std::floor (2 * M_PI / search_method_->getAngleDiscretizationStep ()));
- for (std::size_t i = 0; i < input_->points.size (); ++i)
- {
- std::vector<unsigned int> aux (aux_size);
- accumulator_array[i] = aux;
- }
PCL_INFO ("Accumulator array size: %u x %u.\n", accumulator_array.size (), accumulator_array.back ().size ());
+ PoseWithVotesList voted_poses;
// Consider every <scene_reference_point_sampling_rate>-th point as the reference point => fix s_r
float f1, f2, f3, f4;
- for (std::size_t scene_reference_index = 0; scene_reference_index < target_->points.size (); scene_reference_index += scene_reference_point_sampling_rate_)
+ for (std::size_t scene_reference_index = 0; scene_reference_index < target_->size (); scene_reference_index += scene_reference_point_sampling_rate_)
{
- Eigen::Vector3f scene_reference_point = target_->points[scene_reference_index].getVector3fMap (),
- scene_reference_normal = target_->points[scene_reference_index].getNormalVector3fMap ();
+ Eigen::Vector3f scene_reference_point = (*target_)[scene_reference_index].getVector3fMap (),
+ scene_reference_normal = (*target_)[scene_reference_index].getNormalVector3fMap ();
float rotation_angle_sg = std::acos (scene_reference_normal.dot (Eigen::Vector3f::UnitX ()));
bool parallel_to_x_sg = (scene_reference_normal.y() == 0.0f && scene_reference_normal.z() == 0.0f);
// For every other point in the scene => now have pair (s_r, s_i) fixed
std::vector<int> indices;
std::vector<float> distances;
- scene_search_tree_->radiusSearch (target_->points[scene_reference_index],
+ scene_search_tree_->radiusSearch ((*target_)[scene_reference_index],
search_method_->getModelDiameter () /2,
indices,
distances);
for(const std::size_t &scene_point_index : indices)
-// for(std::size_t i = 0; i < target_->points.size (); ++i)
+// for(std::size_t i = 0; i < target_->size (); ++i)
{
//size_t scene_point_index = i;
if (scene_reference_index != scene_point_index)
{
- if (/*pcl::computePPFPairFeature*/pcl::computePairFeatures (target_->points[scene_reference_index].getVector4fMap (),
- target_->points[scene_reference_index].getNormalVector4fMap (),
- target_->points[scene_point_index].getVector4fMap (),
- target_->points[scene_point_index].getNormalVector4fMap (),
+ if (/*pcl::computePPFPairFeature*/pcl::computePairFeatures ((*target_)[scene_reference_index].getVector4fMap (),
+ (*target_)[scene_reference_index].getNormalVector4fMap (),
+ (*target_)[scene_point_index].getVector4fMap (),
+ (*target_)[scene_point_index].getNormalVector4fMap (),
f1, f2, f3, f4))
{
std::vector<std::pair<std::size_t, std::size_t> > nearest_indices;
search_method_->nearestNeighborSearch (f1, f2, f3, f4, nearest_indices);
// Compute alpha_s angle
- Eigen::Vector3f scene_point = target_->points[scene_point_index].getVector3fMap ();
+ Eigen::Vector3f scene_point = (*target_)[scene_point_index].getVector3fMap ();
Eigen::Vector3f scene_point_transformed = transform_sg * scene_point;
float alpha_s = std::atan2 ( -scene_point_transformed(2), scene_point_transformed(1));
accumulator_array[i][j] = 0;
}
- Eigen::Vector3f model_reference_point = input_->points[max_votes_i].getVector3fMap (),
- model_reference_normal = input_->points[max_votes_i].getNormalVector3fMap ();
+ Eigen::Vector3f model_reference_point = (*input_)[max_votes_i].getVector3fMap (),
+ model_reference_normal = (*input_)[max_votes_i].getNormalVector3fMap ();
float rotation_angle_mg = std::acos (model_reference_normal.dot (Eigen::Vector3f::UnitX ()));
bool parallel_to_x_mg = (model_reference_normal.y() == 0.0f && model_reference_normal.z() == 0.0f);
Eigen::Vector3f rotation_axis_mg = (parallel_to_x_mg)?(Eigen::Vector3f::UnitY ()):(model_reference_normal.cross (Eigen::Vector3f::UnitX ()). normalized());
nr_dimensions = dimension_range_target_.size ();
- nr_features = input_->points.size ();
+ nr_features = input_->size ();
float D = 0.0f;
for (std::vector<std::pair<float, float> >::iterator range_it = dimension_range_target_.begin (); range_it != dimension_range_target_.end (); ++range_it)
{
if (!initializeHistogram ())
return;
- for (std::size_t feature_i = 0; feature_i < input_->points.size (); ++feature_i)
+ for (const auto& point: *input_)
{
std::vector<float> feature_vector;
- convertFeatureToVector (input_->points[feature_i], feature_vector);
+ convertFeatureToVector (point, feature_vector);
addFeature (feature_vector);
}
// For each point in the source dataset
int nr = 0;
- for (std::size_t i = 0; i < input_transformed.points.size (); ++i)
+ for (const auto& point: input_transformed)
{
// Find its nearest neighbor in the target
- tree_->nearestKSearch (input_transformed.points[i], 1, nn_indices, nn_dists);
+ tree_->nearestKSearch (point, 1, nn_indices, nn_dists);
// Deal with occlusions (incomplete targets)
if (nn_dists[0] <= max_range)
return;
// Resize the output dataset
- if (output.points.size () != indices_->size ())
- output.points.resize (indices_->size ());
+ output.resize (indices_->size ());
// Copy the header
output.header = input_->header;
// Check if the output will be computed for all points or only a subset
- if (indices_->size () != input_->points.size ())
+ if (indices_->size () != input_->size ())
{
- output.width = static_cast<std::uint32_t> (indices_->size ());
+ output.width = indices_->size ();
output.height = 1;
}
else
// Copy the point data to output
for (std::size_t i = 0; i < indices_->size (); ++i)
- output.points[i] = input_->points[(*indices_)[i]];
+ output[i] = (*input_)[(*indices_)[i]];
// Set the internal point representation of choice unless otherwise noted
if (point_representation_ && !force_no_recompute_)
// Right before we estimate the transformation, we set all the point.data[3] values to 1 to aid the rigid
// transformation
for (std::size_t i = 0; i < indices_->size (); ++i)
- output.points[i].data[3] = 1.0;
+ output[i].data[3] = 1.0;
computeTransformation (output, guess);
SampleConsensusPrerejective<PointSource, PointTarget, FeatureT>::selectSamples (
const PointCloudSource &cloud, int nr_samples, std::vector<int> &sample_indices)
{
- if (nr_samples > static_cast<int> (cloud.points.size ()))
+ if (nr_samples > static_cast<int> (cloud.size ()))
{
PCL_ERROR ("[pcl::%s::selectSamples] ", getClassName ().c_str ());
- PCL_ERROR ("The number of samples (%d) must not be greater than the number of points (%lu)!\n",
- nr_samples, cloud.points.size ());
+ PCL_ERROR("The number of samples (%d) must not be greater than the number of "
+ "points (%zu)!\n",
+ nr_samples,
+ static_cast<std::size_t>(cloud.size()));
return;
}
for (int i = 0; i < nr_samples; i++)
{
// Select a random number
- sample_indices[i] = getRandomIndex (static_cast<int> (cloud.points.size ()) - i);
+ sample_indices[i] = getRandomIndex (static_cast<int> (cloud.size ()) - i);
// Run trough list of numbers, starting at the lowest, to avoid duplicates
for (int j = 0; j < i; j++)
transformPointCloud (*input_, input_transformed, final_transformation_);
// For each point in the source dataset
- for (std::size_t i = 0; i < input_transformed.points.size (); ++i)
+ for (std::size_t i = 0; i < input_transformed.size (); ++i)
{
// Find its nearest neighbor in the target
std::vector<int> nn_indices (1);
std::vector<float> nn_dists (1);
- tree_->nearestKSearch (input_transformed.points[i], 1, nn_indices, nn_dists);
+ tree_->nearestKSearch (input_transformed[i], 1, nn_indices, nn_dists);
// Check if point is an inlier
if (nn_dists[0] < max_range)
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = cloud_src.points.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = cloud_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimation2D::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", nr_points, cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimation2D::estimateRigidTransformation] Number "
+ "or points in source (%zu) differs than target (%zu)!\n",
+ static_cast<std::size_t>(nr_points),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.points.size ())
+ if (indices_src.size () != cloud_tgt.size ())
{
- PCL_ERROR ("[pcl::Transformation2D::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::Transformation2D::estimateRigidTransformation] Number or points "
+ "in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (cloud_src.points.size () != 3 || cloud_tgt.points.size () != 3)
+ if (cloud_src.size () != 3 || cloud_tgt.size () != 3)
{
- PCL_ERROR ("[pcl::TransformationEstimation3Point::estimateRigidTransformation] Number of points in source (%lu) and target (%lu) must be 3!\n",
- cloud_src.points.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimation3Point::estimateRigidTransformation] "
+ "Number of points in source (%zu) and target (%zu) must be 3!\n",
+ static_cast<std::size_t>(cloud_src.size()),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (indices_src.size () != 3 || cloud_tgt.points.size () != 3)
+ if (indices_src.size () != 3 || cloud_tgt.size () != 3)
{
- PCL_ERROR ("[pcl::TransformationEstimation3Point::estimateRigidTransformation] Number of indices in source (%lu) and points in target (%lu) must be 3!\n",
- indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR(
+ "[pcl::TransformationEstimation3Point::estimateRigidTransformation] Number of "
+ "indices in source (%zu) and points in target (%zu) must be 3!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = cloud_src.points.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = cloud_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", nr_points, cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number "
+ "or points in source (%zu) differs than target (%zu)!\n",
+ static_cast<std::size_t>(nr_points),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.points.size ())
+ if (indices_src.size () != cloud_tgt.size ())
{
- PCL_ERROR ("[pcl::TransformationDQ::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationDQ::estimateRigidTransformation] Number or points "
+ "in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
{
if (indices_src.size () != indices_tgt.size ())
{
- PCL_ERROR ("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ PCL_ERROR("[pcl::TransformationEstimationDQ::estimateRigidTransformation] Number "
+ "or points in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = cloud_src.points.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = cloud_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationDualQuaternion::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", nr_points, cloud_tgt.points.size ());
+ PCL_ERROR(
+ "[pcl::TransformationEstimationDualQuaternion::estimateRigidTransformation] "
+ "Number or points in source (%zu) differs than target (%zu)!\n",
+ static_cast<std::size_t>(nr_points),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.points.size ())
+ if (indices_src.size () != cloud_tgt.size ())
{
- PCL_ERROR ("[pcl::TransformationDQ::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationDQ::estimateRigidTransformation] Number or points "
+ "in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
{
// <cloud_src,cloud_src> is the source dataset
- if (cloud_src.points.size () != cloud_tgt.points.size ())
+ if (cloud_src.size () != cloud_tgt.size ())
{
PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] ");
- PCL_ERROR ("Number or points in source (%lu) differs than target (%lu)!\n",
- cloud_src.points.size (), cloud_tgt.points.size ());
+ PCL_ERROR("Number or points in source (%zu) differs than target (%zu)!\n",
+ static_cast<std::size_t>(cloud_src.size()),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
- if (cloud_src.points.size () < 4) // need at least 4 samples
+ if (cloud_src.size () < 4) // need at least 4 samples
{
PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] ");
- PCL_ERROR ("Need at least 4 points to estimate a transform! Source and target have %lu points!\n",
- cloud_src.points.size ());
+ PCL_ERROR("Need at least 4 points to estimate a transform! Source and target have "
+ "%zu points!\n",
+ static_cast<std::size_t>(cloud_src.size()));
return;
}
tmp_src_ = &cloud_src;
tmp_tgt_ = &cloud_tgt;
- OptimizationFunctor functor (static_cast<int> (cloud_src.points.size ()), this);
+ OptimizationFunctor functor (static_cast<int> (cloud_src.size ()), this);
Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
//Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm (num_diff);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, MatScalar> lm (num_diff);
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.points.size ())
+ if (indices_src.size () != cloud_tgt.size ())
{
- PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR(
+ "[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] "
+ "Number or points in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
// <cloud_src,cloud_src> is the source dataset
transformation_matrix.setIdentity ();
- const int nr_correspondences = static_cast<const int> (cloud_tgt.points.size ());
+ const auto nr_correspondences = cloud_tgt.size ();
std::vector<int> indices_tgt;
indices_tgt.resize(nr_correspondences);
- for (int i = 0; i < nr_correspondences; ++i)
+ for (std::size_t i = 0; i < nr_correspondences; ++i)
indices_tgt[i] = i;
estimateRigidTransformation(cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
const pcl::Correspondences &correspondences,
Matrix4 &transformation_matrix) const
{
- const int nr_correspondences = static_cast<int> (correspondences.size ());
+ const auto nr_correspondences = correspondences.size ();
std::vector<int> indices_src (nr_correspondences);
std::vector<int> indices_tgt (nr_correspondences);
- for (int i = 0; i < nr_correspondences; ++i)
+ for (std::size_t i = 0; i < nr_correspondences; ++i)
{
indices_src[i] = correspondences[i].index_query;
indices_tgt[i] = correspondences[i].index_match;
// Transform each source point and compute its distance to the corresponding target point
for (int i = 0; i < values (); ++i)
{
- const PointSource & p_src = src_points.points[i];
- const PointTarget & p_tgt = tgt_points.points[i];
+ const PointSource & p_src = src_points[i];
+ const PointTarget & p_tgt = tgt_points[i];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
// Transform each source point and compute its distance to the corresponding target point
for (int i = 0; i < values (); ++i)
{
- const PointSource & p_src = src_points.points[src_indices[i]];
- const PointTarget & p_tgt = tgt_points.points[tgt_indices[i]];
+ const PointSource & p_src = src_points[src_indices[i]];
+ const PointTarget & p_tgt = tgt_points[tgt_indices[i]];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = cloud_src.points.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = cloud_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", nr_points, cloud_tgt.points.size ());
+ PCL_ERROR(
+ "[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] "
+ "Number or points in source (%zu) differs than target (%zu)!\n",
+ static_cast<std::size_t>(nr_points),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = indices_src.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = indices_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR(
+ "[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] "
+ "Number or points in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const std::vector<int> &indices_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = indices_src.size ();
+ const auto nr_points = indices_src.size ();
if (indices_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ PCL_ERROR(
+ "[pcl::TransformationEstimationPointToPlaneLLS::estimateRigidTransformation] "
+ "Number or points in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = cloud_src.points.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = cloud_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLSWeighted::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", nr_points, cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
+ "estimateRigidTransformation] Number or points in source (%zu) differs "
+ "than target (%zu)!\n",
+ static_cast<std::size_t>(nr_points),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = indices_src.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const std::size_t nr_points = indices_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLSWeighted::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimationPointToPlaneLLSWeighted::"
+ "estimateRigidTransformation] Number or points in source (%zu) differs "
+ "than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const std::vector<int> &indices_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = indices_src.size ();
+ const std::size_t nr_points = indices_src.size ();
if (indices_tgt.size () != nr_points)
{
PCL_ERROR ("[pcl::TransformationEstimationPointToPlaneLLSWeighted::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
{
// <cloud_src,cloud_src> is the source dataset
- if (cloud_src.points.size () != cloud_tgt.points.size ())
+ if (cloud_src.size () != cloud_tgt.size ())
{
PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
- PCL_ERROR ("Number or points in source (%lu) differs than target (%lu)!\n",
- cloud_src.points.size (), cloud_tgt.points.size ());
+ PCL_ERROR("Number or points in source (%zu) differs than target (%zu)!\n",
+ static_cast<std::size_t>(cloud_src.size()),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
- if (cloud_src.points.size () < 4) // need at least 4 samples
+ if (cloud_src.size () < 4) // need at least 4 samples
{
PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
- PCL_ERROR ("Need at least 4 points to estimate a transform! Source and target have %lu points!\n",
- cloud_src.points.size ());
+ PCL_ERROR("Need at least 4 points to estimate a transform! Source and target have "
+ "%zu points!\n",
+ static_cast<std::size_t>(cloud_src.size()));
return;
}
if (correspondence_weights_.size () != cloud_src.size ())
{
PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
- PCL_ERROR ("Number of weights (%lu) differs than number of points (%lu)!\n",
- correspondence_weights_.size (), cloud_src.points.size ());
+ PCL_ERROR("Number of weights (%zu) differs than number of points (%zu)!\n",
+ correspondence_weights_.size(),
+ static_cast<std::size_t>(cloud_src.size()));
return;
}
tmp_src_ = &cloud_src;
tmp_tgt_ = &cloud_tgt;
- OptimizationFunctor functor (static_cast<int> (cloud_src.points.size ()), this);
+ OptimizationFunctor functor (static_cast<int> (cloud_src.size ()), this);
Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, MatScalar> lm (num_diff);
int info = lm.minimize (x);
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.points.size ())
+ if (indices_src.size () != cloud_tgt.size ())
{
- PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::registration::TransformationEstimationPointToPlaneWeighted::"
+ "estimateRigidTransformation] Number or points in source (%zu) differs "
+ "than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
if (correspondence_weights_.size () != indices_src.size ())
{
PCL_ERROR ("[pcl::registration::TransformationEstimationPointToPlaneWeighted::estimateRigidTransformation] ");
- PCL_ERROR ("Number of weights (%lu) differs than number of points (%lu)!\n",
- correspondence_weights_.size (), indices_src.size ());
+ PCL_ERROR("Number of weights (%zu) differs than number of points (%zu)!\n",
+ correspondence_weights_.size(),
+ indices_src.size());
return;
}
// <cloud_src,cloud_src> is the source dataset
transformation_matrix.setIdentity ();
- const int nr_correspondences = static_cast<const int> (cloud_tgt.points.size ());
+ const auto nr_correspondences = cloud_tgt.size ();
std::vector<int> indices_tgt;
indices_tgt.resize(nr_correspondences);
- for (int i = 0; i < nr_correspondences; ++i)
+ for (std::size_t i = 0; i < nr_correspondences; ++i)
indices_tgt[i] = i;
estimateRigidTransformation(cloud_src, indices_src, cloud_tgt, indices_tgt, transformation_matrix);
// Transform each source point and compute its distance to the corresponding target point
for (int i = 0; i < values (); ++i)
{
- const PointSource & p_src = src_points.points[i];
- const PointTarget & p_tgt = tgt_points.points[i];
+ const PointSource & p_src = src_points[i];
+ const PointTarget & p_tgt = tgt_points[i];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
// Transform each source point and compute its distance to the corresponding target point
for (int i = 0; i < values (); ++i)
{
- const PointSource & p_src = src_points.points[src_indices[i]];
- const PointTarget & p_tgt = tgt_points.points[tgt_indices[i]];
+ const PointSource & p_src = src_points[src_indices[i]];
+ const PointTarget & p_tgt = tgt_points[tgt_indices[i]];
// Transform the source point based on the current warp parameters
Vector4 p_src_warped;
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- std::size_t nr_points = cloud_src.points.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = cloud_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", nr_points, cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number "
+ "or points in source (%zu) differs than target (%zu)!\n",
+ static_cast<std::size_t>(nr_points),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- if (indices_src.size () != cloud_tgt.points.size ())
+ if (indices_src.size () != cloud_tgt.size ())
{
- PCL_ERROR ("[pcl::TransformationSVD::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationSVD::estimateRigidTransformation] Number or points "
+ "in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
{
if (indices_src.size () != indices_tgt.size ())
{
- PCL_ERROR ("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ PCL_ERROR("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number "
+ "or points in source (%zu) differs than target (%zu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
- const auto nr_points = cloud_src.points.size ();
- if (cloud_tgt.points.size () != nr_points)
+ const auto nr_points = cloud_src.size ();
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::estimateRigidTransformation] Number or points in source (%lu) differs from target (%lu)!\n", nr_points, cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::"
+ "estimateRigidTransformation] Number or points in source (%zu) differs "
+ "from target (%zu)!\n",
+ static_cast<std::size_t>(nr_points),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
Matrix4 &transformation_matrix) const
{
const auto nr_points = indices_src.size ();
- if (cloud_tgt.points.size () != nr_points)
+ if (cloud_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), cloud_tgt.points.size ());
+ PCL_ERROR("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::"
+ "estimateRigidTransformation] Number or points in source (%zu) differs "
+ "than target (%zu)!\n",
+ indices_src.size(),
+ static_cast<std::size_t>(cloud_tgt.size()));
return;
}
const auto nr_points = indices_src.size ();
if (indices_tgt.size () != nr_points)
{
- PCL_ERROR ("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
+ PCL_ERROR("[pcl::TransformationEstimationSymmetricPointToPlaneLLS::"
+ "estimateRigidTransformation] Number or points in source (%zu) differs "
+ "than target (%zu)!\n",
+ indices_src.size(),
+ indices_tgt.size());
return;
}
input_transformed.resize (cloud_src->size ());
for (std::size_t i = 0; i < cloud_src->size (); ++i)
{
- const PointSource &src = cloud_src->points[i];
- PointTarget &tgt = input_transformed.points[i];
+ const PointSource &src = (*cloud_src)[i];
+ PointTarget &tgt = input_transformed[i];
tgt.x = static_cast<float> (transformation_matrix (0, 0) * src.x + transformation_matrix (0, 1) * src.y + transformation_matrix (0, 2) * src.z + transformation_matrix (0, 3));
tgt.y = static_cast<float> (transformation_matrix (1, 0) * src.x + transformation_matrix (1, 1) * src.y + transformation_matrix (1, 2) * src.z + transformation_matrix (1, 3));
tgt.z = static_cast<float> (transformation_matrix (2, 0) * src.x + transformation_matrix (2, 1) * src.y + transformation_matrix (2, 2) * src.z + transformation_matrix (2, 3));
// For each point in the source dataset
int nr = 0;
- for (std::size_t i = 0; i < input_transformed.points.size (); ++i)
+ for (const auto& point: input_transformed)
{
// Find its nearest neighbor in the target
- tree_->nearestKSearch (input_transformed.points[i], 1, nn_indices, nn_dists);
+ tree_->nearestKSearch (point, 1, nn_indices, nn_dists);
// Deal with occlusions (incomplete targets)
if (nn_dists[0] > max_range_)
#pragma once
-#include <pcl/registration/boost.h>
#include <pcl/registration/registration.h>
#include <pcl/features/ppf.h>
#pragma once
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/registration/transformation_estimation.h>
#include <pcl/registration/warp_point_rigid.h>
-#include <pcl/registration/distances.h>
namespace pcl
{
GeneralizedIterativeClosestPoint6D::computeTransformation (PointCloudSource& output, const Eigen::Matrix4f& guess)
{
using namespace pcl;
- using namespace std;
IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
{
// Discretize the feature cloud and insert it in the hash map
feature_hash_map_->clear ();
- unsigned int n = static_cast<unsigned int> (std::sqrt (static_cast<float> (feature_cloud->points.size ())));
+ unsigned int n = static_cast<unsigned int> (std::sqrt (static_cast<float> (feature_cloud->size ())));
int d1, d2, d3, d4;
max_dist_ = -1.0;
alpha_m_.resize (n);
std::vector <float> alpha_m_row (n);
for (std::size_t j = 0; j < n; ++j)
{
- d1 = static_cast<int> (std::floor (feature_cloud->points[i*n+j].f1 / angle_discretization_step_));
- d2 = static_cast<int> (std::floor (feature_cloud->points[i*n+j].f2 / angle_discretization_step_));
- d3 = static_cast<int> (std::floor (feature_cloud->points[i*n+j].f3 / angle_discretization_step_));
- d4 = static_cast<int> (std::floor (feature_cloud->points[i*n+j].f4 / distance_discretization_step_));
+ d1 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f1 / angle_discretization_step_));
+ d2 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f2 / angle_discretization_step_));
+ d3 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f3 / angle_discretization_step_));
+ d4 = static_cast<int> (std::floor ((*feature_cloud)[i*n+j].f4 / distance_discretization_step_));
feature_hash_map_->insert (std::pair<HashKeyStruct, std::pair<std::size_t, std::size_t> > (HashKeyStruct (d1, d2, d3, d4), std::pair<std::size_t, std::size_t> (i, j)));
- alpha_m_row [j] = feature_cloud->points[i*n + j].alpha_m;
+ alpha_m_row [j] = (*feature_cloud)[i*n + j].alpha_m;
- if (max_dist_ < feature_cloud->points[i*n + j].f4)
- max_dist_ = feature_cloud->points[i*n + j].f4;
+ if (max_dist_ < (*feature_cloud)[i*n + j].f4)
+ max_dist_ = (*feature_cloud)[i*n + j].f4;
}
alpha_m_[i] = alpha_m_row;
}
for (std::size_t i = 0; i < indices->size (); ++i)
{
- pcl::Vector4fMapConst pt = cloud->points[(*indices)[i]].getVector4fMap ();
+ pcl::Vector4fMapConst pt = (*cloud)[(*indices)[i]].getVector4fMap ();
Eigen::Vector4f ptdiff = pt - median;
ptdiff[3] = 0;
distances[i] = ptdiff.dot (ptdiff);
for (std::size_t i = 0; i < indices->size (); ++i)
{
- if (cloud->points[(*indices)[i]].x < min_p[0]) min_p[0] = cloud->points[(*indices)[i]].x;
- if (cloud->points[(*indices)[i]].y < min_p[1]) min_p[1] = cloud->points[(*indices)[i]].y;
- if (cloud->points[(*indices)[i]].z < min_p[2]) min_p[2] = cloud->points[(*indices)[i]].z;
+ if ((*cloud)[(*indices)[i]].x < min_p[0]) min_p[0] = (*cloud)[(*indices)[i]].x;
+ if ((*cloud)[(*indices)[i]].y < min_p[1]) min_p[1] = (*cloud)[(*indices)[i]].y;
+ if ((*cloud)[(*indices)[i]].z < min_p[2]) min_p[2] = (*cloud)[(*indices)[i]].z;
- if (cloud->points[(*indices)[i]].x > max_p[0]) max_p[0] = cloud->points[(*indices)[i]].x;
- if (cloud->points[(*indices)[i]].y > max_p[1]) max_p[1] = cloud->points[(*indices)[i]].y;
- if (cloud->points[(*indices)[i]].z > max_p[2]) max_p[2] = cloud->points[(*indices)[i]].z;
+ if ((*cloud)[(*indices)[i]].x > max_p[0]) max_p[0] = (*cloud)[(*indices)[i]].x;
+ if ((*cloud)[(*indices)[i]].y > max_p[1]) max_p[1] = (*cloud)[(*indices)[i]].y;
+ if ((*cloud)[(*indices)[i]].z > max_p[2]) max_p[2] = (*cloud)[(*indices)[i]].z;
}
}
std::vector<float> z (indices->size ());
for (std::size_t i = 0; i < indices->size (); ++i)
{
- x[i] = cloud->points[(*indices)[i]].x;
- y[i] = cloud->points[(*indices)[i]].y;
- z[i] = cloud->points[(*indices)[i]].z;
+ x[i] = (*cloud)[(*indices)[i]].x;
+ y[i] = (*cloud)[(*indices)[i]].y;
+ z[i] = (*cloud)[(*indices)[i]].z;
}
std::sort (x.begin (), x.end ());
std::sort (y.begin (), y.end ());
const double log_probability = std::log (1.0 - probability_);
const double one_over_indices = 1.0 / static_cast<double> (sac_model_->getIndices ()->size ());
- std::size_t n_inliers_count;
unsigned skipped_count = 0;
// suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
}
#if OPENMP_AVAILABLE_RANSAC
-#pragma omp parallel if(threads > 0) num_threads(threads) shared(k, skipped_count, n_best_inliers_count) private(selection, model_coefficients, n_inliers_count) // would be nice to have a default(none)-clause here, but then some compilers complain about the shared const variables
+#pragma omp parallel if(threads > 0) num_threads(threads) shared(k, skipped_count, n_best_inliers_count) firstprivate(selection, model_coefficients) // would be nice to have a default(none)-clause here, but then some compilers complain about the shared const variables
#endif
{
#if OPENMP_AVAILABLE_RANSAC
//if (inliers.empty () && k > 1.0)
// continue;
- n_inliers_count = sac_model_->countWithinDistance (model_coefficients, threshold_); // This functions has to be thread-safe. Most work is done here
+ std::size_t n_inliers_count = sac_model_->countWithinDistance (model_coefficients, threshold_); // This functions has to be thread-safe. Most work is done here
std::size_t n_best_inliers_count_tmp;
#if OPENMP_AVAILABLE_RANSAC
return (false);
}
// Get the values at the two points
- Eigen::Array2d p0 (input_->points[samples[0]].x, input_->points[samples[0]].y);
- Eigen::Array2d p1 (input_->points[samples[1]].x, input_->points[samples[1]].y);
- Eigen::Array2d p2 (input_->points[samples[2]].x, input_->points[samples[2]].y);
+ Eigen::Array2d p0 ((*input_)[samples[0]].x, (*input_)[samples[0]].y);
+ Eigen::Array2d p1 ((*input_)[samples[1]].x, (*input_)[samples[1]].y);
+ Eigen::Array2d p2 ((*input_)[samples[2]].x, (*input_)[samples[2]].y);
// Compute the segment values (in 2d) between p1 and p0
p1 -= p0;
model_coefficients.resize (model_size_);
- Eigen::Vector2d p0 (input_->points[samples[0]].x, input_->points[samples[0]].y);
- Eigen::Vector2d p1 (input_->points[samples[1]].x, input_->points[samples[1]].y);
- Eigen::Vector2d p2 (input_->points[samples[2]].x, input_->points[samples[2]].y);
+ Eigen::Vector2d p0 ((*input_)[samples[0]].x, (*input_)[samples[0]].y);
+ Eigen::Vector2d p1 ((*input_)[samples[1]].x, (*input_)[samples[1]].y);
+ Eigen::Vector2d p2 ((*input_)[samples[2]].x, (*input_)[samples[2]].y);
Eigen::Vector2d u = (p0 + p1) / 2.0;
Eigen::Vector2d v = (p1 + p2) / 2.0;
// Calculate the distance from the point to the circle as the difference between
// dist(point,circle_origin) and circle_radius
distances[i] = std::abs (std::sqrt (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] )
) - model_coefficients[2]);
}
// Calculate the distance from the point to the circle as the difference between
// dist(point,circle_origin) and circle_radius
float distance = std::abs (std::sqrt (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] )
) - model_coefficients[2]);
if (distance < threshold)
{
// Calculate the distance from the point to the circle as the difference between
// dist(point,circle_origin) and circle_radius
float distance = std::abs (std::sqrt (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] )
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] )
) - model_coefficients[2]);
if (distance < threshold)
nr_p++;
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
- for (std::size_t i = 0; i < projected_points.points.size (); ++i)
+ for (std::size_t i = 0; i < projected_points.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
// Iterate through the points and project them to the circle
for (const auto &inlier : inliers)
{
- float dx = input_->points[inlier].x - model_coefficients[0];
- float dy = input_->points[inlier].y - model_coefficients[1];
+ float dx = (*input_)[inlier].x - model_coefficients[0];
+ float dy = (*input_)[inlier].y - model_coefficients[1];
float a = std::sqrt ( (model_coefficients[2] * model_coefficients[2]) / (dx * dx + dy * dy) );
- projected_points.points[inlier].x = a * dx + model_coefficients[0];
- projected_points.points[inlier].y = a * dy + model_coefficients[1];
+ projected_points[inlier].x = a * dx + model_coefficients[0];
+ projected_points[inlier].y = a * dy + model_coefficients[1];
}
}
else
{
// Allocate enough space and copy the basics
projected_points.points.resize (inliers.size ());
- projected_points.width = static_cast<std::uint32_t> (inliers.size ());
+ projected_points.width = inliers.size ();
projected_points.height = 1;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
for (std::size_t i = 0; i < inliers.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[inliers[i]], projected_points[i]));
// Iterate through the points and project them to the circle
for (std::size_t i = 0; i < inliers.size (); ++i)
{
- float dx = input_->points[inliers[i]].x - model_coefficients[0];
- float dy = input_->points[inliers[i]].y - model_coefficients[1];
+ float dx = (*input_)[inliers[i]].x - model_coefficients[0];
+ float dy = (*input_)[inliers[i]].y - model_coefficients[1];
float a = std::sqrt ( (model_coefficients[2] * model_coefficients[2]) / (dx * dx + dy * dy) );
- projected_points.points[i].x = a * dx + model_coefficients[0];
- projected_points.points[i].y = a * dy + model_coefficients[1];
+ projected_points[i].x = a * dx + model_coefficients[0];
+ projected_points[i].y = a * dy + model_coefficients[1];
}
}
}
// Calculate the distance from the point to the circle as the difference between
//dist(point,circle_origin) and circle_radius
if (std::abs (std::sqrt (
- ( input_->points[index].x - model_coefficients[0] ) *
- ( input_->points[index].x - model_coefficients[0] ) +
- ( input_->points[index].y - model_coefficients[1] ) *
- ( input_->points[index].y - model_coefficients[1] )
+ ( (*input_)[index].x - model_coefficients[0] ) *
+ ( (*input_)[index].x - model_coefficients[0] ) +
+ ( (*input_)[index].y - model_coefficients[1] ) *
+ ( (*input_)[index].y - model_coefficients[1] )
) - model_coefficients[2]) > threshold)
return (false);
return (false);
}
// Get the values at the three points
- Eigen::Vector3d p0 (input_->points[samples[0]].x, input_->points[samples[0]].y, input_->points[samples[0]].z);
- Eigen::Vector3d p1 (input_->points[samples[1]].x, input_->points[samples[1]].y, input_->points[samples[1]].z);
- Eigen::Vector3d p2 (input_->points[samples[2]].x, input_->points[samples[2]].y, input_->points[samples[2]].z);
+ Eigen::Vector3d p0 ((*input_)[samples[0]].x, (*input_)[samples[0]].y, (*input_)[samples[0]].z);
+ Eigen::Vector3d p1 ((*input_)[samples[1]].x, (*input_)[samples[1]].y, (*input_)[samples[1]].z);
+ Eigen::Vector3d p2 ((*input_)[samples[2]].x, (*input_)[samples[2]].y, (*input_)[samples[2]].z);
// calculate vectors between points
p1 -= p0;
model_coefficients.resize (model_size_); //needing 7 coefficients: centerX, centerY, centerZ, radius, normalX, normalY, normalZ
- Eigen::Vector3d p0 (input_->points[samples[0]].x, input_->points[samples[0]].y, input_->points[samples[0]].z);
- Eigen::Vector3d p1 (input_->points[samples[1]].x, input_->points[samples[1]].y, input_->points[samples[1]].z);
- Eigen::Vector3d p2 (input_->points[samples[2]].x, input_->points[samples[2]].y, input_->points[samples[2]].z);
+ Eigen::Vector3d p0 ((*input_)[samples[0]].x, (*input_)[samples[0]].y, (*input_)[samples[0]].z);
+ Eigen::Vector3d p1 ((*input_)[samples[1]].x, (*input_)[samples[1]].y, (*input_)[samples[1]].z);
+ Eigen::Vector3d p2 ((*input_)[samples[2]].x, (*input_)[samples[2]].y, (*input_)[samples[2]].z);
Eigen::Vector3d helper_vec01 = p0 - p1;
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z);
+ Eigen::Vector3d P ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z);
// C : Circle Center
Eigen::Vector3d C (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// N : Circle (Plane) Normal
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z);
+ Eigen::Vector3d P ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z);
// C : Circle Center
Eigen::Vector3d C (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// N : Circle (Plane) Normal
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z);
+ Eigen::Vector3d P ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z);
// C : Circle Center
Eigen::Vector3d C (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// N : Circle (Plane) Normal
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
- for (std::size_t i = 0; i < projected_points.points.size (); ++i)
+ for (std::size_t i = 0; i < projected_points.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the plane
for (std::size_t i = 0; i < inliers.size (); ++i)
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P (input_->points[inliers[i]].x, input_->points[inliers[i]].y, input_->points[inliers[i]].z);
+ Eigen::Vector3d P ((*input_)[inliers[i]].x, (*input_)[inliers[i]].y, (*input_)[inliers[i]].z);
// C : Circle Center
Eigen::Vector3d C (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// N : Circle (Plane) Normal
// K : Point on Circle
Eigen::Vector3d K = C + r * helper_vectorP_projC.normalized ();
- projected_points.points[i].x = static_cast<float> (K[0]);
- projected_points.points[i].y = static_cast<float> (K[1]);
- projected_points.points[i].z = static_cast<float> (K[2]);
+ projected_points[i].x = static_cast<float> (K[0]);
+ projected_points[i].y = static_cast<float> (K[1]);
+ projected_points[i].z = static_cast<float> (K[2]);
}
}
else
{
// Allocate enough space and copy the basics
projected_points.points.resize (inliers.size ());
- projected_points.width = std::uint32_t (inliers.size ());
+ projected_points.width = inliers.size ();
projected_points.height = 1;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
for (std::size_t i = 0; i < inliers.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[inliers[i]], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the plane
for (std::size_t i = 0; i < inliers.size (); ++i)
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P (input_->points[inliers[i]].x, input_->points[inliers[i]].y, input_->points[inliers[i]].z);
+ Eigen::Vector3d P ((*input_)[inliers[i]].x, (*input_)[inliers[i]].y, (*input_)[inliers[i]].z);
// C : Circle Center
Eigen::Vector3d C (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// N : Circle (Plane) Normal
// K : Point on Circle
Eigen::Vector3d K = C + r * helper_vectorP_projC.normalized ();
- projected_points.points[i].x = static_cast<float> (K[0]);
- projected_points.points[i].y = static_cast<float> (K[1]);
- projected_points.points[i].z = static_cast<float> (K[2]);
+ projected_points[i].x = static_cast<float> (K[0]);
+ projected_points[i].y = static_cast<float> (K[1]);
+ projected_points[i].z = static_cast<float> (K[2]);
}
}
}
// what i have:
// P : Sample Point
- Eigen::Vector3d P (input_->points[index].x, input_->points[index].y, input_->points[index].z);
+ Eigen::Vector3d P ((*input_)[index].x, (*input_)[index].y, (*input_)[index].z);
// C : Circle Center
Eigen::Vector3d C (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// N : Circle (Plane) Normal
return (false);
}
- Eigen::Vector4f p1 (input_->points[samples[0]].x, input_->points[samples[0]].y, input_->points[samples[0]].z, 0.0f);
- Eigen::Vector4f p2 (input_->points[samples[1]].x, input_->points[samples[1]].y, input_->points[samples[1]].z, 0.0f);
- Eigen::Vector4f p3 (input_->points[samples[2]].x, input_->points[samples[2]].y, input_->points[samples[2]].z, 0.0f);
+ Eigen::Vector4f p1 ((*input_)[samples[0]].x, (*input_)[samples[0]].y, (*input_)[samples[0]].z, 0.0f);
+ Eigen::Vector4f p2 ((*input_)[samples[1]].x, (*input_)[samples[1]].y, (*input_)[samples[1]].z, 0.0f);
+ Eigen::Vector4f p3 ((*input_)[samples[2]].x, (*input_)[samples[2]].y, (*input_)[samples[2]].z, 0.0f);
- Eigen::Vector4f n1 (normals_->points[samples[0]].normal[0], normals_->points[samples[0]].normal[1], normals_->points[samples[0]].normal[2], 0.0f);
- Eigen::Vector4f n2 (normals_->points[samples[1]].normal[0], normals_->points[samples[1]].normal[1], normals_->points[samples[1]].normal[2], 0.0f);
- Eigen::Vector4f n3 (normals_->points[samples[2]].normal[0], normals_->points[samples[2]].normal[1], normals_->points[samples[2]].normal[2], 0.0f);
+ Eigen::Vector4f n1 ((*normals_)[samples[0]].normal[0], (*normals_)[samples[0]].normal[1], (*normals_)[samples[0]].normal[2], 0.0f);
+ Eigen::Vector4f n2 ((*normals_)[samples[1]].normal[0], (*normals_)[samples[1]].normal[1], (*normals_)[samples[1]].normal[2], 0.0f);
+ Eigen::Vector4f n3 ((*normals_)[samples[2]].normal[0], (*normals_)[samples[2]].normal[1], (*normals_)[samples[2]].normal[2], 0.0f);
//calculate apex (intersection of the three planes defined by points and belonging normals
Eigen::Vector4f ortho12 = n1.cross3(n2);
// Iterate through the 3d points and calculate the distances from them to the cone
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0.0f);
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
// Iterate through the 3d points and calculate the distances from them to the cone
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0.0f);
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
// Iterate through the 3d points and calculate the distances from them to the cone
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0.0f);
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
- for (std::size_t i = 0; i < projected_points.points.size (); ++i)
+ for (std::size_t i = 0; i < projected_points.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the cone
for (const auto &inlier : inliers)
{
- Eigen::Vector4f pt (input_->points[inlier].x,
- input_->points[inlier].y,
- input_->points[inlier].z,
+ Eigen::Vector4f pt ((*input_)[inlier].x,
+ (*input_)[inlier].y,
+ (*input_)[inlier].z,
1);
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
- pcl::Vector4fMap pp = projected_points.points[inlier].getVector4fMap ();
+ pcl::Vector4fMap pp = projected_points[inlier].getVector4fMap ();
pp.matrix () = apex + k * axis_dir;
Eigen::Vector4f dir = pt - pp;
{
// Allocate enough space and copy the basics
projected_points.points.resize (inliers.size ());
- projected_points.width = static_cast<std::uint32_t> (inliers.size ());
+ projected_points.width = inliers.size ();
projected_points.height = 1;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
for (std::size_t i = 0; i < inliers.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[inliers[i]], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the cone
for (std::size_t i = 0; i < inliers.size (); ++i)
{
- pcl::Vector4fMap pp = projected_points.points[i].getVector4fMap ();
- pcl::Vector4fMapConst pt = input_->points[inliers[i]].getVector4fMap ();
+ pcl::Vector4fMap pp = projected_points[i].getVector4fMap ();
+ pcl::Vector4fMapConst pt = (*input_)[inliers[i]].getVector4fMap ();
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
// Calculate the projection of the point on the line
// Iterate through the 3d points and calculate the distances from them to the cone
for (const auto &index : indices)
{
- Eigen::Vector4f pt (input_->points[index].x, input_->points[index].y, input_->points[index].z, 0.0f);
+ Eigen::Vector4f pt ((*input_)[index].x, (*input_)[index].y, (*input_)[index].z, 0.0f);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
return (false);
}
- if (std::abs (input_->points[samples[0]].x - input_->points[samples[1]].x) <= std::numeric_limits<float>::epsilon () &&
- std::abs (input_->points[samples[0]].y - input_->points[samples[1]].y) <= std::numeric_limits<float>::epsilon () &&
- std::abs (input_->points[samples[0]].z - input_->points[samples[1]].z) <= std::numeric_limits<float>::epsilon ())
+ if (std::abs ((*input_)[samples[0]].x - (*input_)[samples[1]].x) <= std::numeric_limits<float>::epsilon () &&
+ std::abs ((*input_)[samples[0]].y - (*input_)[samples[1]].y) <= std::numeric_limits<float>::epsilon () &&
+ std::abs ((*input_)[samples[0]].z - (*input_)[samples[1]].z) <= std::numeric_limits<float>::epsilon ())
{
return (false);
}
- Eigen::Vector4f p1 (input_->points[samples[0]].x, input_->points[samples[0]].y, input_->points[samples[0]].z, 0.0f);
- Eigen::Vector4f p2 (input_->points[samples[1]].x, input_->points[samples[1]].y, input_->points[samples[1]].z, 0.0f);
+ Eigen::Vector4f p1 ((*input_)[samples[0]].x, (*input_)[samples[0]].y, (*input_)[samples[0]].z, 0.0f);
+ Eigen::Vector4f p2 ((*input_)[samples[1]].x, (*input_)[samples[1]].y, (*input_)[samples[1]].z, 0.0f);
- Eigen::Vector4f n1 (normals_->points[samples[0]].normal[0], normals_->points[samples[0]].normal[1], normals_->points[samples[0]].normal[2], 0.0f);
- Eigen::Vector4f n2 (normals_->points[samples[1]].normal[0], normals_->points[samples[1]].normal[1], normals_->points[samples[1]].normal[2], 0.0f);
+ Eigen::Vector4f n1 ((*normals_)[samples[0]].normal[0], (*normals_)[samples[0]].normal[1], (*normals_)[samples[0]].normal[2], 0.0f);
+ Eigen::Vector4f n2 ((*normals_)[samples[1]].normal[0], (*normals_)[samples[1]].normal[1], (*normals_)[samples[1]].normal[2], 0.0f);
Eigen::Vector4f w = n1 + p1 - p2;
float a = n1.dot (n1);
// Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
// @note need to revise this.
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0.0f);
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_euclid = std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
{
// Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0.0f);
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_euclid = std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
// Calculate the point's projection on the cylinder axis
{
// Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0.0f);
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x, (*input_)[(*indices_)[i]].y, (*input_)[(*indices_)[i]].z, 0.0f);
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0], (*normals_)[(*indices_)[i]].normal[1], (*normals_)[(*indices_)[i]].normal[2], 0.0f);
double d_euclid = std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]);
// Calculate the point's projection on the cylinder axis
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
- for (std::size_t i = 0; i < projected_points.points.size (); ++i)
+ for (std::size_t i = 0; i < projected_points.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the cylinder
for (const auto &inlier : inliers)
{
- Eigen::Vector4f p (input_->points[inlier].x,
- input_->points[inlier].y,
- input_->points[inlier].z,
+ Eigen::Vector4f p ((*input_)[inlier].x,
+ (*input_)[inlier].y,
+ (*input_)[inlier].z,
1);
float k = (p.dot (line_dir) - ptdotdir) * dirdotdir;
- pcl::Vector4fMap pp = projected_points.points[inlier].getVector4fMap ();
+ pcl::Vector4fMap pp = projected_points[inlier].getVector4fMap ();
pp.matrix () = line_pt + k * line_dir;
Eigen::Vector4f dir = p - pp;
{
// Allocate enough space and copy the basics
projected_points.points.resize (inliers.size ());
- projected_points.width = static_cast<std::uint32_t> (inliers.size ());
+ projected_points.width = inliers.size ();
projected_points.height = 1;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
for (std::size_t i = 0; i < inliers.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[inliers[i]], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the cylinder
for (std::size_t i = 0; i < inliers.size (); ++i)
{
- pcl::Vector4fMap pp = projected_points.points[i].getVector4fMap ();
- pcl::Vector4fMapConst p = input_->points[inliers[i]].getVector4fMap ();
+ pcl::Vector4fMap pp = projected_points[i].getVector4fMap ();
+ pcl::Vector4fMapConst p = (*input_)[inliers[i]].getVector4fMap ();
float k = (p.dot (line_dir) - ptdotdir) * dirdotdir;
// Calculate the projection of the point on the line
// Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
// @note need to revise this.
- Eigen::Vector4f pt (input_->points[index].x, input_->points[index].y, input_->points[index].z, 0.0f);
+ Eigen::Vector4f pt ((*input_)[index].x, (*input_)[index].y, (*input_)[index].z, 0.0f);
if (std::abs (pointToLineDistance (pt, model_coefficients) - model_coefficients[6]) > threshold)
return (false);
}
}
// Make sure that the two sample points are not identical
if (
- (input_->points[samples[0]].x != input_->points[samples[1]].x)
+ ((*input_)[samples[0]].x != (*input_)[samples[1]].x)
||
- (input_->points[samples[0]].y != input_->points[samples[1]].y)
+ ((*input_)[samples[0]].y != (*input_)[samples[1]].y)
||
- (input_->points[samples[0]].z != input_->points[samples[1]].z))
+ ((*input_)[samples[0]].z != (*input_)[samples[1]].z))
{
return (true);
}
return (false);
}
- if (std::abs (input_->points[samples[0]].x - input_->points[samples[1]].x) <= std::numeric_limits<float>::epsilon () &&
- std::abs (input_->points[samples[0]].y - input_->points[samples[1]].y) <= std::numeric_limits<float>::epsilon () &&
- std::abs (input_->points[samples[0]].z - input_->points[samples[1]].z) <= std::numeric_limits<float>::epsilon ())
+ if (std::abs ((*input_)[samples[0]].x - (*input_)[samples[1]].x) <= std::numeric_limits<float>::epsilon () &&
+ std::abs ((*input_)[samples[0]].y - (*input_)[samples[1]].y) <= std::numeric_limits<float>::epsilon () &&
+ std::abs ((*input_)[samples[0]].z - (*input_)[samples[1]].z) <= std::numeric_limits<float>::epsilon ())
{
return (false);
}
model_coefficients.resize (model_size_);
- model_coefficients[0] = input_->points[samples[0]].x;
- model_coefficients[1] = input_->points[samples[0]].y;
- model_coefficients[2] = input_->points[samples[0]].z;
+ model_coefficients[0] = (*input_)[samples[0]].x;
+ model_coefficients[1] = (*input_)[samples[0]].y;
+ model_coefficients[2] = (*input_)[samples[0]].z;
- model_coefficients[3] = input_->points[samples[1]].x - model_coefficients[0];
- model_coefficients[4] = input_->points[samples[1]].y - model_coefficients[1];
- model_coefficients[5] = input_->points[samples[1]].z - model_coefficients[2];
+ model_coefficients[3] = (*input_)[samples[1]].x - model_coefficients[0];
+ model_coefficients[4] = (*input_)[samples[1]].y - model_coefficients[1];
+ model_coefficients[5] = (*input_)[samples[1]].z - model_coefficients[2];
model_coefficients.template tail<3> ().normalize ();
return (true);
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
// Need to estimate sqrt here to keep MSAC and friends general
- distances[i] = sqrt ((line_pt - input_->points[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ());
+ distances[i] = sqrt ((line_pt - (*input_)[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ());
}
}
{
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
- double sqr_distance = (line_pt - input_->points[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ();
+ double sqr_distance = (line_pt - (*input_)[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ();
if (sqr_distance < sqr_threshold)
{
{
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
- double sqr_distance = (line_pt - input_->points[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ();
+ double sqr_distance = (line_pt - (*input_)[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ();
if (sqr_distance < sqr_threshold)
nr_p++;
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
- for (std::size_t i = 0; i < projected_points.points.size (); ++i)
+ for (std::size_t i = 0; i < projected_points.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the line
for (const auto &inlier : inliers)
{
- Eigen::Vector4f pt (input_->points[inlier].x, input_->points[inlier].y, input_->points[inlier].z, 0.0f);
+ Eigen::Vector4f pt ((*input_)[inlier].x, (*input_)[inlier].y, (*input_)[inlier].z, 0.0f);
// double k = (DOT_PROD_3D (points[i], p21) - dotA_B) / dotB_B;
float k = (pt.dot (line_dir) - line_pt.dot (line_dir)) / line_dir.dot (line_dir);
Eigen::Vector4f pp = line_pt + k * line_dir;
// Calculate the projection of the point on the line (pointProj = A + k * B)
- projected_points.points[inlier].x = pp[0];
- projected_points.points[inlier].y = pp[1];
- projected_points.points[inlier].z = pp[2];
+ projected_points[inlier].x = pp[0];
+ projected_points[inlier].y = pp[1];
+ projected_points[inlier].z = pp[2];
}
}
else
{
// Allocate enough space and copy the basics
projected_points.points.resize (inliers.size ());
- projected_points.width = static_cast<std::uint32_t> (inliers.size ());
+ projected_points.width = inliers.size ();
projected_points.height = 1;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
for (std::size_t i = 0; i < inliers.size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[inliers[i]], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the line
for (std::size_t i = 0; i < inliers.size (); ++i)
{
- Eigen::Vector4f pt (input_->points[inliers[i]].x, input_->points[inliers[i]].y, input_->points[inliers[i]].z, 0.0f);
+ Eigen::Vector4f pt ((*input_)[inliers[i]].x, (*input_)[inliers[i]].y, (*input_)[inliers[i]].z, 0.0f);
// double k = (DOT_PROD_3D (points[i], p21) - dotA_B) / dotB_B;
float k = (pt.dot (line_dir) - line_pt.dot (line_dir)) / line_dir.dot (line_dir);
Eigen::Vector4f pp = line_pt + k * line_dir;
// Calculate the projection of the point on the line (pointProj = A + k * B)
- projected_points.points[i].x = pp[0];
- projected_points.points[i].y = pp[1];
- projected_points.points[i].z = pp[2];
+ projected_points[i].x = pp[0];
+ projected_points[i].y = pp[1];
+ projected_points[i].z = pp[2];
}
}
}
{
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
- if ((line_pt - input_->points[index].getVector4fMap ()).cross3 (line_dir).squaredNorm () > sqr_threshold)
+ if ((line_pt - (*input_)[index].getVector4fMap ()).cross3 (line_dir).squaredNorm () > sqr_threshold)
return (false);
}
#define PCL_SAMPLE_CONSENSUS_IMPL_SAC_MODEL_NORMAL_PLANE_H_
#include <pcl/sample_consensus/sac_model_normal_plane.h>
+#include <pcl/common/common.h> // for getAngle3D
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
// Iterate through the 3d points and calculate the distances from them to the plane
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- const PointT &pt = input_->points[(*indices_)[i]];
- const PointNT &nt = normals_->points[(*indices_)[i]];
+ const PointT &pt = (*input_)[(*indices_)[i]];
+ const PointNT &nt = (*normals_)[(*indices_)[i]];
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
Eigen::Vector4f p (pt.x, pt.y, pt.z, 0.0f);
// Iterate through the 3d points and calculate the distances from them to the plane
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- const PointT &pt = input_->points[(*indices_)[i]];
- const PointNT &nt = normals_->points[(*indices_)[i]];
+ const PointT &pt = (*input_)[(*indices_)[i]];
+ const PointNT &nt = (*normals_)[(*indices_)[i]];
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
Eigen::Vector4f p (pt.x, pt.y, pt.z, 0.0f);
// Iterate through the 3d points and calculate the distances from them to the plane
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- const PointT &pt = input_->points[(*indices_)[i]];
- const PointNT &nt = normals_->points[(*indices_)[i]];
+ const PointT &pt = (*input_)[(*indices_)[i]];
+ const PointNT &nt = (*normals_)[(*indices_)[i]];
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
Eigen::Vector4f p (pt.x, pt.y, pt.z, 0.0f);
{
// Calculate the distance from the point to the sphere center as the difference between
// dist(point,sphere_origin) and sphere_radius
- Eigen::Vector4f p (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z,
+ Eigen::Vector4f p ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z,
0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0],
- normals_->points[(*indices_)[i]].normal[1],
- normals_->points[(*indices_)[i]].normal[2],
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0],
+ (*normals_)[(*indices_)[i]].normal[1],
+ (*normals_)[(*indices_)[i]].normal[2],
0.0f);
Eigen::Vector4f n_dir = p - center;
{
// Calculate the distance from the point to the sphere centroid as the difference between
// dist(point,sphere_origin) and sphere_radius
- Eigen::Vector4f p (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z,
+ Eigen::Vector4f p ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z,
0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0],
- normals_->points[(*indices_)[i]].normal[1],
- normals_->points[(*indices_)[i]].normal[2],
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0],
+ (*normals_)[(*indices_)[i]].normal[1],
+ (*normals_)[(*indices_)[i]].normal[2],
0.0f);
Eigen::Vector4f n_dir = (p-center);
{
// Calculate the distance from the point to the sphere as the difference between
// dist(point,sphere_origin) and sphere_radius
- Eigen::Vector4f p (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z,
+ Eigen::Vector4f p ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z,
0.0f);
- Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0],
- normals_->points[(*indices_)[i]].normal[1],
- normals_->points[(*indices_)[i]].normal[2],
+ Eigen::Vector4f n ((*normals_)[(*indices_)[i]].normal[0],
+ (*normals_)[(*indices_)[i]].normal[1],
+ (*normals_)[(*indices_)[i]].normal[2],
0.0f);
Eigen::Vector4f n_dir = (p-center);
#include <pcl/common/centroid.h>
#include <pcl/common/eigen.h>
#include <pcl/common/concatenate.h>
-#include <pcl/point_types.h>
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
return (false);
}
// Get the values at the two points
- pcl::Array4fMapConst p0 = input_->points[samples[0]].getArray4fMap ();
- pcl::Array4fMapConst p1 = input_->points[samples[1]].getArray4fMap ();
- pcl::Array4fMapConst p2 = input_->points[samples[2]].getArray4fMap ();
+ pcl::Array4fMapConst p0 = (*input_)[samples[0]].getArray4fMap ();
+ pcl::Array4fMapConst p1 = (*input_)[samples[1]].getArray4fMap ();
+ pcl::Array4fMapConst p2 = (*input_)[samples[2]].getArray4fMap ();
Eigen::Array4f dy1dy2 = (p1-p0) / (p2-p0);
return (false);
}
- pcl::Array4fMapConst p0 = input_->points[samples[0]].getArray4fMap ();
- pcl::Array4fMapConst p1 = input_->points[samples[1]].getArray4fMap ();
- pcl::Array4fMapConst p2 = input_->points[samples[2]].getArray4fMap ();
+ pcl::Array4fMapConst p0 = (*input_)[samples[0]].getArray4fMap ();
+ pcl::Array4fMapConst p1 = (*input_)[samples[1]].getArray4fMap ();
+ pcl::Array4fMapConst p2 = (*input_)[samples[2]].getArray4fMap ();
// Compute the segment values (in 3d) between p1 and p0
Eigen::Array4f p1p0 = p1 - p0;
{
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
- /*distances[i] = std::abs (model_coefficients[0] * input_->points[(*indices_)[i]].x +
- model_coefficients[1] * input_->points[(*indices_)[i]].y +
- model_coefficients[2] * input_->points[(*indices_)[i]].z +
+ /*distances[i] = std::abs (model_coefficients[0] * (*input_)[(*indices_)[i]].x +
+ model_coefficients[1] * (*input_)[(*indices_)[i]].y +
+ model_coefficients[2] * (*input_)[(*indices_)[i]].z +
model_coefficients[3]);*/
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z,
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z,
1.0f);
distances[i] = std::abs (model_coefficients.dot (pt));
}
{
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z,
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z,
1.0f);
float distance = std::abs (model_coefficients.dot (pt));
{
// Calculate the distance from the point to the plane normal as the dot product
// D = (P-A).N/|N|
- Eigen::Vector4f pt (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z,
+ Eigen::Vector4f pt ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z,
1.0f);
if (std::abs (model_coefficients.dot (pt)) < threshold)
{
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
- for (std::size_t i = 0; i < input_->points.size (); ++i)
+ for (std::size_t i = 0; i < input_->size (); ++i)
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
// Iterate through the 3d points and calculate the distances from them to the plane
for (const auto &inlier : inliers)
{
// Calculate the distance from the point to the plane
- Eigen::Vector4f p (input_->points[inlier].x,
- input_->points[inlier].y,
- input_->points[inlier].z,
+ Eigen::Vector4f p ((*input_)[inlier].x,
+ (*input_)[inlier].y,
+ (*input_)[inlier].z,
1);
// use normalized coefficients to calculate the scalar projection
float distance_to_plane = tmp_mc.dot (p);
- pcl::Vector4fMap pp = projected_points.points[inlier].getVector4fMap ();
+ pcl::Vector4fMap pp = projected_points[inlier].getVector4fMap ();
pp.matrix () = p - mc * distance_to_plane; // mc[3] = 0, therefore the 3rd coordinate is safe
}
}
{
// Allocate enough space and copy the basics
projected_points.points.resize (inliers.size ());
- projected_points.width = static_cast<std::uint32_t> (inliers.size ());
+ projected_points.width = inliers.size ();
projected_points.height = 1;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
for (std::size_t i = 0; i < inliers.size (); ++i)
{
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[inliers[i]], projected_points[i]));
}
// Iterate through the 3d points and calculate the distances from them to the plane
for (std::size_t i = 0; i < inliers.size (); ++i)
{
// Calculate the distance from the point to the plane
- Eigen::Vector4f p (input_->points[inliers[i]].x,
- input_->points[inliers[i]].y,
- input_->points[inliers[i]].z,
+ Eigen::Vector4f p ((*input_)[inliers[i]].x,
+ (*input_)[inliers[i]].y,
+ (*input_)[inliers[i]].z,
1.0f);
// use normalized coefficients to calculate the scalar projection
float distance_to_plane = tmp_mc.dot (p);
- pcl::Vector4fMap pp = projected_points.points[i].getVector4fMap ();
+ pcl::Vector4fMap pp = projected_points[i].getVector4fMap ();
pp.matrix () = p - mc * distance_to_plane; // mc[3] = 0, therefore the 3rd coordinate is safe
}
}
for (const auto &index : indices)
{
- Eigen::Vector4f pt (input_->points[index].x,
- input_->points[index].y,
- input_->points[index].z,
+ Eigen::Vector4f pt ((*input_)[index].x,
+ (*input_)[index].y,
+ (*input_)[index].z,
1.0f);
if (std::abs (model_coefficients.dot (pt)) > threshold)
{
#include <pcl/sample_consensus/sac_model_registration.h>
#include <pcl/common/eigen.h>
-#include <pcl/point_types.h>
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
using namespace pcl::common;
using namespace pcl::traits;
- PointT p10 = input_->points[samples[1]] - input_->points[samples[0]];
- PointT p20 = input_->points[samples[2]] - input_->points[samples[0]];
- PointT p21 = input_->points[samples[2]] - input_->points[samples[1]];
+ PointT p10 = (*input_)[samples[1]] - (*input_)[samples[0]];
+ PointT p20 = (*input_)[samples[2]] - (*input_)[samples[0]];
+ PointT p21 = (*input_)[samples[2]] - (*input_)[samples[1]];
return ((p10.x * p10.x + p10.y * p10.y + p10.z * p10.z) > sample_dist_thresh_ &&
(p20.x * p20.x + p20.y * p20.y + p20.z * p20.z) > sample_dist_thresh_ &&
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt_src (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z, 1.0f);
- Eigen::Vector4f pt_tgt (target_->points[(*indices_tgt_)[i]].x,
- target_->points[(*indices_tgt_)[i]].y,
- target_->points[(*indices_tgt_)[i]].z, 1.0f);
+ Eigen::Vector4f pt_src ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z, 1.0f);
+ Eigen::Vector4f pt_tgt ((*target_)[(*indices_tgt_)[i]].x,
+ (*target_)[(*indices_tgt_)[i]].y,
+ (*target_)[(*indices_tgt_)[i]].z, 1.0f);
Eigen::Vector4f p_tr (transform * pt_src);
// Calculate the distance from the transformed point to its correspondence
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt_src (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z, 1);
- Eigen::Vector4f pt_tgt (target_->points[(*indices_tgt_)[i]].x,
- target_->points[(*indices_tgt_)[i]].y,
- target_->points[(*indices_tgt_)[i]].z, 1);
+ Eigen::Vector4f pt_src ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z, 1);
+ Eigen::Vector4f pt_tgt ((*target_)[(*indices_tgt_)[i]].x,
+ (*target_)[(*indices_tgt_)[i]].y,
+ (*target_)[(*indices_tgt_)[i]].z, 1);
Eigen::Vector4f p_tr (transform * pt_src);
std::size_t nr_p = 0;
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt_src (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z, 1);
- Eigen::Vector4f pt_tgt (target_->points[(*indices_tgt_)[i]].x,
- target_->points[(*indices_tgt_)[i]].y,
- target_->points[(*indices_tgt_)[i]].z, 1);
+ Eigen::Vector4f pt_src ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z, 1);
+ Eigen::Vector4f pt_tgt ((*target_)[(*indices_tgt_)[i]].x,
+ (*target_)[(*indices_tgt_)[i]].y,
+ (*target_)[(*indices_tgt_)[i]].z, 1);
Eigen::Vector4f p_tr (transform * pt_src);
// Calculate the distance from the transformed point to its correspondence
//using namespace pcl::common;
//using namespace pcl::traits;
- //PointT p10 = input_->points[samples[1]] - input_->points[samples[0]];
- //PointT p20 = input_->points[samples[2]] - input_->points[samples[0]];
- //PointT p21 = input_->points[samples[2]] - input_->points[samples[1]];
+ //PointT p10 = (*input_)[samples[1]] - (*input_)[samples[0]];
+ //PointT p20 = (*input_)[samples[2]] - (*input_)[samples[0]];
+ //PointT p21 = (*input_)[samples[2]] - (*input_)[samples[1]];
//return ((p10.x * p10.x + p10.y * p10.y + p10.z * p10.z) > sample_dist_thresh_ &&
// (p20.x * p20.x + p20.y * p20.y + p20.z * p20.z) > sample_dist_thresh_ &&
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt_src (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z, 1.0f);
+ Eigen::Vector4f pt_src ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z, 1.0f);
Eigen::Vector4f p_tr (transform * pt_src);
// Calculate the distance from the transformed point to its correspondence
// need to compute the real norm here to keep MSAC and friends general
- distances[i] = std::sqrt ((uv[0] - target_->points[(*indices_tgt_)[i]].u) *
- (uv[0] - target_->points[(*indices_tgt_)[i]].u) +
- (uv[1] - target_->points[(*indices_tgt_)[i]].v) *
- (uv[1] - target_->points[(*indices_tgt_)[i]].v));
+ distances[i] = std::sqrt ((uv[0] - (*target_)[(*indices_tgt_)[i]].u) *
+ (uv[0] - (*target_)[(*indices_tgt_)[i]].u) +
+ (uv[1] - (*target_)[(*indices_tgt_)[i]].v) *
+ (uv[1] - (*target_)[(*indices_tgt_)[i]].v));
}
}
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt_src (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z, 1.0f);
+ Eigen::Vector4f pt_src ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z, 1.0f);
Eigen::Vector4f p_tr (transform * pt_src);
uv /= uv[2];
- double distance = ((uv[0] - target_->points[(*indices_tgt_)[i]].u) *
- (uv[0] - target_->points[(*indices_tgt_)[i]].u) +
- (uv[1] - target_->points[(*indices_tgt_)[i]].v) *
- (uv[1] - target_->points[(*indices_tgt_)[i]].v));
+ double distance = ((uv[0] - (*target_)[(*indices_tgt_)[i]].u) *
+ (uv[0] - (*target_)[(*indices_tgt_)[i]].u) +
+ (uv[1] - (*target_)[(*indices_tgt_)[i]].v) *
+ (uv[1] - (*target_)[(*indices_tgt_)[i]].v));
// Calculate the distance from the transformed point to its correspondence
if (distance < thresh)
for (std::size_t i = 0; i < indices_->size (); ++i)
{
- Eigen::Vector4f pt_src (input_->points[(*indices_)[i]].x,
- input_->points[(*indices_)[i]].y,
- input_->points[(*indices_)[i]].z, 1.0f);
+ Eigen::Vector4f pt_src ((*input_)[(*indices_)[i]].x,
+ (*input_)[(*indices_)[i]].y,
+ (*input_)[(*indices_)[i]].z, 1.0f);
Eigen::Vector4f p_tr (transform * pt_src);
uv /= uv[2];
// Calculate the distance from the transformed point to its correspondence
- if (((uv[0] - target_->points[(*indices_tgt_)[i]].u) *
- (uv[0] - target_->points[(*indices_tgt_)[i]].u) +
- (uv[1] - target_->points[(*indices_tgt_)[i]].v) *
- (uv[1] - target_->points[(*indices_tgt_)[i]].v)) < thresh)
+ if (((uv[0] - (*target_)[(*indices_tgt_)[i]].u) *
+ (uv[0] - (*target_)[(*indices_tgt_)[i]].u) +
+ (uv[1] - (*target_)[(*indices_tgt_)[i]].v) *
+ (uv[1] - (*target_)[(*indices_tgt_)[i]].v)) < thresh)
{
++nr_p;
}
Eigen::Matrix4f temp;
for (int i = 0; i < 4; i++)
{
- temp (i, 0) = input_->points[samples[i]].x;
- temp (i, 1) = input_->points[samples[i]].y;
- temp (i, 2) = input_->points[samples[i]].z;
+ temp (i, 0) = (*input_)[samples[i]].x;
+ temp (i, 1) = (*input_)[samples[i]].y;
+ temp (i, 2) = (*input_)[samples[i]].z;
temp (i, 3) = 1;
}
float m11 = temp.determinant ();
for (int i = 0; i < 4; ++i)
{
- temp (i, 0) = (input_->points[samples[i]].x) * (input_->points[samples[i]].x) +
- (input_->points[samples[i]].y) * (input_->points[samples[i]].y) +
- (input_->points[samples[i]].z) * (input_->points[samples[i]].z);
+ temp (i, 0) = ((*input_)[samples[i]].x) * ((*input_)[samples[i]].x) +
+ ((*input_)[samples[i]].y) * ((*input_)[samples[i]].y) +
+ ((*input_)[samples[i]].z) * ((*input_)[samples[i]].z);
}
float m12 = temp.determinant ();
for (int i = 0; i < 4; ++i)
{
temp (i, 1) = temp (i, 0);
- temp (i, 0) = input_->points[samples[i]].x;
+ temp (i, 0) = (*input_)[samples[i]].x;
}
float m13 = temp.determinant ();
for (int i = 0; i < 4; ++i)
{
temp (i, 2) = temp (i, 1);
- temp (i, 1) = input_->points[samples[i]].y;
+ temp (i, 1) = (*input_)[samples[i]].y;
}
float m14 = temp.determinant ();
for (int i = 0; i < 4; ++i)
{
temp (i, 0) = temp (i, 2);
- temp (i, 1) = input_->points[samples[i]].x;
- temp (i, 2) = input_->points[samples[i]].y;
- temp (i, 3) = input_->points[samples[i]].z;
+ temp (i, 1) = (*input_)[samples[i]].x;
+ temp (i, 2) = (*input_)[samples[i]].y;
+ temp (i, 3) = (*input_)[samples[i]].z;
}
float m15 = temp.determinant ();
// Calculate the distance from the point to the sphere as the difference between
//dist(point,sphere_origin) and sphere_radius
distances[i] = std::abs (std::sqrt (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) +
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) +
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] ) *
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] )
+ ( (*input_)[(*indices_)[i]].z - model_coefficients[2] ) *
+ ( (*input_)[(*indices_)[i]].z - model_coefficients[2] )
) - model_coefficients[3]);
}
}
for (std::size_t i = 0; i < indices_->size (); ++i)
{
double distance = std::abs (std::sqrt (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) +
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) +
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] ) *
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] )
+ ( (*input_)[(*indices_)[i]].z - model_coefficients[2] ) *
+ ( (*input_)[(*indices_)[i]].z - model_coefficients[2] )
) - model_coefficients[3]);
// Calculate the distance from the point to the sphere as the difference between
// dist(point,sphere_origin) and sphere_radius
// Calculate the distance from the point to the sphere as the difference between
// dist(point,sphere_origin) and sphere_radius
if (std::abs (std::sqrt (
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) *
- ( input_->points[(*indices_)[i]].x - model_coefficients[0] ) +
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) *
+ ( (*input_)[(*indices_)[i]].x - model_coefficients[0] ) +
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) *
- ( input_->points[(*indices_)[i]].y - model_coefficients[1] ) +
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) *
+ ( (*input_)[(*indices_)[i]].y - model_coefficients[1] ) +
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] ) *
- ( input_->points[(*indices_)[i]].z - model_coefficients[2] )
+ ( (*input_)[(*indices_)[i]].z - model_coefficients[2] ) *
+ ( (*input_)[(*indices_)[i]].z - model_coefficients[2] )
) - model_coefficients[3]) < threshold)
nr_p++;
}
}
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.header = input_->header;
projected_points.width = input_->width;
projected_points.height = input_->height;
// Calculate the distance from the point to the sphere as the difference between
//dist(point,sphere_origin) and sphere_radius
if (std::abs (sqrt (
- ( input_->points[index].x - model_coefficients[0] ) *
- ( input_->points[index].x - model_coefficients[0] ) +
- ( input_->points[index].y - model_coefficients[1] ) *
- ( input_->points[index].y - model_coefficients[1] ) +
- ( input_->points[index].z - model_coefficients[2] ) *
- ( input_->points[index].z - model_coefficients[2] )
+ ( (*input_)[index].x - model_coefficients[0] ) *
+ ( (*input_)[index].x - model_coefficients[0] ) +
+ ( (*input_)[index].y - model_coefficients[1] ) *
+ ( (*input_)[index].y - model_coefficients[1] ) +
+ ( (*input_)[index].z - model_coefficients[2] ) *
+ ( (*input_)[index].z - model_coefficients[2] )
) - model_coefficients[3]) > threshold)
{
return (false);
return (false);
}
if (
- (input_->points[samples[0]].x != input_->points[samples[1]].x)
+ ((*input_)[samples[0]].x != (*input_)[samples[1]].x)
&&
- (input_->points[samples[0]].y != input_->points[samples[1]].y)
+ ((*input_)[samples[0]].y != (*input_)[samples[1]].y)
&&
- (input_->points[samples[0]].z != input_->points[samples[1]].z))
+ ((*input_)[samples[0]].z != (*input_)[samples[1]].z))
{
return (true);
}
}
model_coefficients.resize (model_size_);
- model_coefficients[0] = input_->points[samples[0]].x;
- model_coefficients[1] = input_->points[samples[0]].y;
- model_coefficients[2] = input_->points[samples[0]].z;
+ model_coefficients[0] = (*input_)[samples[0]].x;
+ model_coefficients[1] = (*input_)[samples[0]].y;
+ model_coefficients[2] = (*input_)[samples[0]].z;
- model_coefficients[3] = input_->points[samples[1]].x;
- model_coefficients[4] = input_->points[samples[1]].y;
- model_coefficients[5] = input_->points[samples[1]].z;
+ model_coefficients[3] = (*input_)[samples[1]].x;
+ model_coefficients[4] = (*input_)[samples[1]].y;
+ model_coefficients[5] = (*input_)[samples[1]].z;
-// model_coefficients[3] = input_->points[samples[1]].x - model_coefficients[0];
-// model_coefficients[4] = input_->points[samples[1]].y - model_coefficients[1];
-// model_coefficients[5] = input_->points[samples[1]].z - model_coefficients[2];
+// model_coefficients[3] = (*input_)[samples[1]].x - model_coefficients[0];
+// model_coefficients[4] = (*input_)[samples[1]].y - model_coefficients[1];
+// model_coefficients[5] = (*input_)[samples[1]].z - model_coefficients[2];
// model_coefficients.template segment<3> (3).normalize ();
// We don't care about model_coefficients[6] which is the width (radius) of the stick
{
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
- float sqr_distance = (line_pt - input_->points[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ();
+ float sqr_distance = (line_pt - (*input_)[(*indices_)[i]].getVector4fMap ()).cross3 (line_dir).squaredNorm ();
if (sqr_distance < sqr_threshold)
{
{
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
- Eigen::Vector4f dir = input_->points[(*indices_)[i]].getVector4fMap () - line_pt1;
+ Eigen::Vector4f dir = (*input_)[(*indices_)[i]].getVector4fMap () - line_pt1;
//float u = dir.dot (line_dir);
// If the point falls outside of the segment, ignore it
{
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
- Eigen::Vector4f dir = input_->points[(*indices_)[i]].getVector4fMap () - line_pt1;
+ Eigen::Vector4f dir = (*input_)[(*indices_)[i]].getVector4fMap () - line_pt1;
//float u = dir.dot (line_dir);
// If the point falls outside of the segment, ignore it
if (copy_data_fields)
{
// Allocate enough space and copy the basics
- projected_points.points.resize (input_->points.size ());
+ projected_points.points.resize (input_->size ());
projected_points.width = input_->width;
projected_points.height = input_->height;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
// Iterate over each point
- for (std::size_t i = 0; i < projected_points.points.size (); ++i)
+ for (std::size_t i = 0; i < projected_points.size (); ++i)
{
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[i], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[i], projected_points[i]));
}
// Iterate through the 3d points and calculate the distances from them to the line
for (const auto &inlier : inliers)
{
- Eigen::Vector4f pt (input_->points[inlier].x, input_->points[inlier].y, input_->points[inlier].z, 0.0f);
+ Eigen::Vector4f pt ((*input_)[inlier].x, (*input_)[inlier].y, (*input_)[inlier].z, 0.0f);
// double k = (DOT_PROD_3D (points[i], p21) - dotA_B) / dotB_B;
float k = (pt.dot (line_dir) - line_pt.dot (line_dir)) / line_dir.dot (line_dir);
Eigen::Vector4f pp = line_pt + k * line_dir;
// Calculate the projection of the point on the line (pointProj = A + k * B)
- projected_points.points[inlier].x = pp[0];
- projected_points.points[inlier].y = pp[1];
- projected_points.points[inlier].z = pp[2];
+ projected_points[inlier].x = pp[0];
+ projected_points[inlier].y = pp[1];
+ projected_points[inlier].z = pp[2];
}
}
else
{
// Allocate enough space and copy the basics
projected_points.points.resize (inliers.size ());
- projected_points.width = static_cast<std::uint32_t> (inliers.size ());
+ projected_points.width = inliers.size ();
projected_points.height = 1;
using FieldList = typename pcl::traits::fieldList<PointT>::type;
for (std::size_t i = 0; i < inliers.size (); ++i)
{
// Iterate over each dimension
- pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> (input_->points[inliers[i]], projected_points.points[i]));
+ pcl::for_each_type <FieldList> (NdConcatenateFunctor <PointT, PointT> ((*input_)[inliers[i]], projected_points[i]));
}
// Iterate through the 3d points and calculate the distances from them to the line
for (std::size_t i = 0; i < inliers.size (); ++i)
{
- Eigen::Vector4f pt (input_->points[inliers[i]].x, input_->points[inliers[i]].y, input_->points[inliers[i]].z, 0.0f);
+ Eigen::Vector4f pt ((*input_)[inliers[i]].x, (*input_)[inliers[i]].y, (*input_)[inliers[i]].z, 0.0f);
// double k = (DOT_PROD_3D (points[i], p21) - dotA_B) / dotB_B;
float k = (pt.dot (line_dir) - line_pt.dot (line_dir)) / line_dir.dot (line_dir);
Eigen::Vector4f pp = line_pt + k * line_dir;
// Calculate the projection of the point on the line (pointProj = A + k * B)
- projected_points.points[i].x = pp[0];
- projected_points.points[i].y = pp[1];
- projected_points.points[i].z = pp[2];
+ projected_points[i].x = pp[0];
+ projected_points[i].y = pp[1];
+ projected_points[i].z = pp[2];
}
}
}
{
// Calculate the distance from the point to the line
// D = ||(P2-P1) x (P1-P0)|| / ||P2-P1|| = norm (cross (p2-p1, p2-p0)) / norm(p2-p1)
- if ((line_pt - input_->points[index].getVector4fMap ()).cross3 (line_dir).squaredNorm () > sqr_threshold)
+ if ((line_pt - (*input_)[index].getVector4fMap ()).cross3 (line_dir).squaredNorm () > sqr_threshold)
{
return (false);
}
else
rng_alg_.seed (12345u);
- if (indices_->size () > input_->points.size ())
+ if (indices_->size () > input_->size ())
{
- PCL_ERROR ("[pcl::SampleConsensusModel] Invalid index vector given with size %lu while the input PointCloud has size %lu!\n", indices_->size (), input_->points.size ());
+ PCL_ERROR("[pcl::SampleConsensusModel] Invalid index vector given with size "
+ "%zu while the input PointCloud has size %zu!\n",
+ indices_->size(),
+ static_cast<std::size_t>(input_->size()));
indices_->clear ();
}
shuffled_indices_ = *indices_;
if (indices_->empty ())
{
// Prepare a set of indices to be used (entire cloud)
- indices_->resize (cloud->points.size ());
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ indices_->resize (cloud->size ());
+ for (std::size_t i = 0; i < cloud->size (); ++i)
(*indices_)[i] = static_cast<index_t> (i);
}
shuffled_indices_ = *indices_;
for (int i = 0; i < values (); ++i)
{
// Compute the difference between the center of the circle and the datapoint X_i
- float xt = model_->input_->points[indices_[i]].x - x[0];
- float yt = model_->input_->points[indices_[i]].y - x[1];
+ float xt = (*model_->input_)[indices_[i]].x - x[0];
+ float yt = (*model_->input_)[indices_[i]].y - x[1];
// g = sqrt ((x-a)^2 + (y-b)^2) - R
fvec[i] = std::sqrt (xt * xt + yt * yt) - x[2];
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P (model_->input_->points[indices_[i]].x, model_->input_->points[indices_[i]].y, model_->input_->points[indices_[i]].z);
+ Eigen::Vector3d P =
+ (*model_->input_)[indices_[i]].getVector3fMap().template cast<double>();
// C : Circle Center
Eigen::Vector3d C (x[0], x[1], x[2]);
// N : Circle (Plane) Normal
for (int i = 0; i < values (); ++i)
{
// dist = f - r
- Eigen::Vector4f pt (model_->input_->points[indices_[i]].x,
- model_->input_->points[indices_[i]].y,
- model_->input_->points[indices_[i]].z, 0);
+ Eigen::Vector4f pt = (*model_->input_)[indices_[i]].getVector4fMap();
+ pt[3] = 0;
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
for (int i = 0; i < values (); ++i)
{
// dist = f - r
- Eigen::Vector4f pt (model_->input_->points[indices_[i]].x,
- model_->input_->points[indices_[i]].y,
- model_->input_->points[indices_[i]].z, 0);
+ Eigen::Vector4f pt = (*model_->input_)[indices_[i]].getVector4fMap();
+ pt[3] = 0;
fvec[i] = static_cast<float> (pcl::sqrPointToLineDistance (pt, line_pt, line_dir) - x[6]*x[6]);
}
#include <pcl/pcl_macros.h>
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/sac_model_plane.h>
-#include <pcl/sample_consensus/sac_model_perpendicular_plane.h>
#include <pcl/sample_consensus/model_types.h>
namespace pcl
for (int i = 0; i < values (); ++i)
{
// Compute the difference between the center of the sphere and the datapoint X_i
- cen_t[0] = model_->input_->points[indices_[i]].x - x[0];
- cen_t[1] = model_->input_->points[indices_[i]].y - x[1];
- cen_t[2] = model_->input_->points[indices_[i]].z - x[2];
+ cen_t.head<3>() = (*model_->input_)[indices_[i]].getVector3fMap() - x.head<3>();
// g = sqrt ((x-a)^2 + (y-b)^2 + (z-c)^2) - R
fvec[i] = std::sqrt (cen_t.dot (cen_t)) - x[3];
*/
#include <pcl/sample_consensus/impl/sac_model_normal_parallel_plane.hpp>
-#include <pcl/sample_consensus/impl/sac_model_plane.hpp>
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
*/
#include <pcl/sample_consensus/impl/sac_model_normal_plane.hpp>
-#include <pcl/sample_consensus/impl/sac_model_plane.hpp>
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
*/
#include <pcl/sample_consensus/impl/sac_model_normal_sphere.hpp>
-#include <pcl/sample_consensus/impl/sac_model_sphere.hpp>
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
*/
#include <pcl/sample_consensus/impl/sac_model_registration.hpp>
-#include <pcl/sample_consensus/impl/sac_model_registration_2d.hpp>
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
auto iIt = indices_->cbegin ();
auto iEnd = indices_->cbegin () + std::min (static_cast<unsigned> (k), static_cast<unsigned> (indices_->size ()));
for (; iIt != iEnd; ++iIt)
- result.push_back (Entry (*iIt, getDistSqr (input_->points[*iIt], point)));
+ result.push_back (Entry (*iIt, getDistSqr ((*input_)[*iIt], point)));
queue = std::priority_queue<Entry> (result.begin (), result.end ());
Entry entry;
for (; iIt != indices_->cend (); ++iIt)
{
- entry.distance = getDistSqr (input_->points[*iIt], point);
+ entry.distance = getDistSqr ((*input_)[*iIt], point);
if (queue.top ().distance > entry.distance)
{
entry.index = *iIt;
Entry entry;
for (entry.index = 0; entry.index < std::min<pcl::index_t> (k, input_->size ()); ++entry.index)
{
- entry.distance = getDistSqr (input_->points[entry.index], point);
+ entry.distance = getDistSqr ((*input_)[entry.index], point);
result.push_back (entry);
}
// add the rest
for (; entry.index < static_cast<pcl::index_t>(input_->size ()); ++entry.index)
{
- entry.distance = getDistSqr (input_->points[entry.index], point);
+ entry.distance = getDistSqr ((*input_)[entry.index], point);
if (queue.top ().distance > entry.distance)
{
queue.pop ();
auto iIt =indices_->cbegin ();
for (; iIt != indices_->cend () && result.size () < static_cast<unsigned> (k); ++iIt)
{
- if (std::isfinite (input_->points[*iIt].x))
- result.push_back (Entry (*iIt, getDistSqr (input_->points[*iIt], point)));
+ if (std::isfinite ((*input_)[*iIt].x))
+ result.push_back (Entry (*iIt, getDistSqr ((*input_)[*iIt], point)));
}
queue = std::priority_queue<Entry> (result.begin (), result.end ());
Entry entry;
for (; iIt != indices_->cend (); ++iIt)
{
- if (!std::isfinite (input_->points[*iIt].x))
+ if (!std::isfinite ((*input_)[*iIt].x))
continue;
- entry.distance = getDistSqr (input_->points[*iIt], point);
+ entry.distance = getDistSqr ((*input_)[*iIt], point);
if (queue.top ().distance > entry.distance)
{
entry.index = *iIt;
Entry entry;
for (entry.index = 0; (entry.index < static_cast<pcl::index_t>(input_->size ())) && (result.size () < static_cast<std::size_t> (k)); ++entry.index)
{
- if (std::isfinite (input_->points[entry.index].x))
+ if (std::isfinite ((*input_)[entry.index].x))
{
- entry.distance = getDistSqr (input_->points[entry.index], point);
+ entry.distance = getDistSqr ((*input_)[entry.index], point);
result.push_back (entry);
}
}
// add the rest
for (; entry.index < static_cast<pcl::index_t>(input_->size ()); ++entry.index)
{
- if (!std::isfinite (input_->points[entry.index].x))
+ if (!std::isfinite ((*input_)[entry.index].x))
continue;
- entry.distance = getDistSqr (input_->points[entry.index], point);
+ entry.distance = getDistSqr ((*input_)[entry.index], point);
if (queue.top ().distance > entry.distance)
{
queue.pop ();
{
for (const auto& idx : *indices_)
{
- distance = getDistSqr (input_->points[idx], point);
+ distance = getDistSqr ((*input_)[idx], point);
if (distance <= radius)
{
k_indices.push_back (idx);
{
for (std::size_t index = 0; index < input_->size (); ++index)
{
- distance = getDistSqr (input_->points[index], point);
+ distance = getDistSqr ((*input_)[index], point);
if (distance <= radius)
{
k_indices.push_back (index);
{
for (const auto& idx : *indices_)
{
- if (!std::isfinite (input_->points[idx].x))
+ if (!std::isfinite ((*input_)[idx].x))
continue;
- distance = getDistSqr (input_->points[idx], point);
+ distance = getDistSqr ((*input_)[idx], point);
if (distance <= radius)
{
k_indices.push_back (idx);
{
for (std::size_t index = 0; index < input_->size (); ++index)
{
- if (!std::isfinite (input_->points[index].x))
+ if (!std::isfinite ((*input_)[index].x))
continue;
- distance = getDistSqr (input_->points[index], point);
+ distance = getDistSqr ((*input_)[index], point);
if (distance <= radius)
{
k_indices.push_back (index);
#define PCL_SEARCH_KDTREE_IMPL_HPP_
#include <pcl/search/kdtree.h>
-#include <pcl/search/impl/search.hpp>
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, class Tree>
this->getProjectedRadiusSearchBox (query, static_cast<float> (squared_radius), left, right, top, bottom);
// iterate over search box
- if (max_nn == 0 || max_nn >= static_cast<unsigned int> (input_->points.size ()))
- max_nn = static_cast<unsigned int> (input_->points.size ());
+ if (max_nn == 0 || max_nn >= static_cast<unsigned int> (input_->size ()))
+ max_nn = static_cast<unsigned int> (input_->size ());
k_indices.reserve (max_nn);
k_sqr_distances.reserve (max_nn);
{
for (; idx < xEnd; ++idx)
{
- if (!mask_[idx] || !isFinite (input_->points[idx]))
+ if (!mask_[idx] || !isFinite ((*input_)[idx]))
continue;
- float dist_x = input_->points[idx].x - query.x;
- float dist_y = input_->points[idx].y - query.y;
- float dist_z = input_->points[idx].z - query.z;
+ float dist_x = (*input_)[idx].x - query.x;
+ float dist_y = (*input_)[idx].y - query.y;
+ float dist_z = (*input_)[idx].z - query.z;
squared_distance = dist_x * dist_x + dist_y * dist_y + dist_z * dist_z;
- //squared_distance = (input_->points[idx].getVector3fMap () - query.getVector3fMap ()).squaredNorm ();
+ //squared_distance = ((*input_)[idx].getVector3fMap () - query.getVector3fMap ()).squaredNorm ();
if (squared_distance <= squared_radius)
{
k_indices.push_back (idx);
const PointCloud &cloud, index_t index, int k,
Indices &k_indices, std::vector<float> &k_sqr_distances) const
{
- assert (index >= 0 && index < static_cast<index_t> (cloud.points.size ()) && "Out-of-bounds error in nearestKSearch!");
- return (nearestKSearch (cloud.points[index], k, k_indices, k_sqr_distances));
+ assert (index >= 0 && index < static_cast<index_t> (cloud.size ()) && "Out-of-bounds error in nearestKSearch!");
+ return (nearestKSearch (cloud[index], k, k_indices, k_sqr_distances));
}
///////////////////////////////////////////////////////////////////////////////////////////
{
if (!indices_)
{
- assert (index >= 0 && index < static_cast<index_t> (input_->points.size ()) && "Out-of-bounds error in nearestKSearch!");
- return (nearestKSearch (input_->points[index], k, k_indices, k_sqr_distances));
+ assert (index >= 0 && index < static_cast<index_t> (input_->size ()) && "Out-of-bounds error in nearestKSearch!");
+ return (nearestKSearch ((*input_)[index], k, k_indices, k_sqr_distances));
}
assert (index >= 0 && index < static_cast<index_t> (indices_->size ()) && "Out-of-bounds error in nearestKSearch!");
if (index >= static_cast<index_t> (indices_->size ()) || index < 0)
return (0);
- return (nearestKSearch (input_->points[(*indices_)[index]], k, k_indices, k_sqr_distances));
+ return (nearestKSearch ((*input_)[(*indices_)[index]], k, k_indices, k_sqr_distances));
}
///////////////////////////////////////////////////////////////////////////////////////////
Indices &k_indices, std::vector<float> &k_sqr_distances,
unsigned int max_nn) const
{
- assert (index >= 0 && index < static_cast<index_t> (cloud.points.size ()) && "Out-of-bounds error in radiusSearch!");
- return (radiusSearch(cloud.points[index], radius, k_indices, k_sqr_distances, max_nn));
+ assert (index >= 0 && index < static_cast<index_t> (cloud.size ()) && "Out-of-bounds error in radiusSearch!");
+ return (radiusSearch(cloud[index], radius, k_indices, k_sqr_distances, max_nn));
}
///////////////////////////////////////////////////////////////////////////////////////////
{
if (!indices_)
{
- assert (index >= 0 && index < static_cast<index_t> (input_->points.size ()) && "Out-of-bounds error in radiusSearch!");
- return (radiusSearch (input_->points[index], radius, k_indices, k_sqr_distances, max_nn));
+ assert (index >= 0 && index < static_cast<index_t> (input_->size ()) && "Out-of-bounds error in radiusSearch!");
+ return (radiusSearch ((*input_)[index], radius, k_indices, k_sqr_distances, max_nn));
}
assert (index >= 0 && index < static_cast<index_t> (indices_->size ()) && "Out-of-bounds error in radiusSearch!");
- return (radiusSearch (input_->points[(*indices_)[index]], radius, k_indices, k_sqr_distances, max_nn));
+ return (radiusSearch ((*input_)[(*indices_)[index]], radius, k_indices, k_sqr_distances, max_nn));
}
///////////////////////////////////////////////////////////////////////////////////////////
approxNearestSearch (const PointCloudConstPtr &cloud, index_t query_index, index_t &result_index,
float &sqr_distance)
{
- return (tree_->approxNearestSearch (cloud->points[query_index], result_index, sqr_distance));
+ return (tree_->approxNearestSearch ((*cloud)[query_index], result_index, sqr_distance));
}
/** \brief Search for approximate nearest neighbor at the query point.
/** \brief Search for k-nearest neighbors for the given query point.
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] cloud the point cloud data
* \param[in] index a \a valid index in \a cloud representing a \a valid (i.e., finite) query point
/** \brief Search for k-nearest neighbors for the given query point (zero-copy).
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] index a \a valid index representing a \a valid query point in the dataset given
* by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
/** \brief Search for all the nearest neighbors of the query point in a given radius.
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] cloud the point cloud data
* \param[in] index a \a valid index in \a cloud representing a \a valid (i.e., finite) query point
/** \brief Search for all the nearest neighbors of the query point in a given radius (zero-copy).
*
* \attention This method does not do any bounds checking for the input index
- * (i.e., index >= cloud.points.size () || index < 0), and assumes valid (i.e., finite) data.
+ * (i.e., index >= cloud.size () || index < 0), and assumes valid (i.e., finite) data.
*
* \param[in] index a \a valid index representing a \a valid query point in the dataset given
* by \a setInputCloud. If indices were given in setInputCloud, index will be the position in
float euclidean_dist_threshold = euclidean_distance_threshold_;
if (depth_dependent_)
{
- Eigen::Vector3f vec = input_->points[idx1].getVector3fMap ();
+ Eigen::Vector3f vec = (*input_)[idx1].getVector3fMap ();
float z = vec.dot (z_axis_);
dist_threshold *= z * z;
euclidean_dist_threshold *= z * z;
}
- float dx = input_->points[idx1].x - input_->points[idx2].x;
- float dy = input_->points[idx1].y - input_->points[idx2].y;
- float dz = input_->points[idx1].z - input_->points[idx2].z;
+ float dx = (*input_)[idx1].x - (*input_)[idx2].x;
+ float dy = (*input_)[idx1].y - (*input_)[idx2].y;
+ float dz = (*input_)[idx1].z - (*input_)[idx2].z;
float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
- bool normal_ok = (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ );
+ bool normal_ok = ((*normals_)[idx1].getNormalVector3fMap ().dot ((*normals_)[idx2].getNormalVector3fMap () ) > angular_threshold_ );
bool dist_ok = (dist < euclidean_dist_threshold);
- bool curvature_ok = normals_->points[idx1].curvature < curvature_threshold_;
+ bool curvature_ok = (*normals_)[idx1].curvature < curvature_threshold_;
bool plane_d_ok = std::abs ((*plane_coeff_d_)[idx1] - (*plane_coeff_d_)[idx2]) < dist_threshold;
if (distance_map_[idx1] < distance_map_threshold_)
float dist_threshold = distance_threshold_;
if (depth_dependent_)
{
- Eigen::Vector3f vec = input_->points[idx1].getVector3fMap ();
+ Eigen::Vector3f vec = (*input_)[idx1].getVector3fMap ();
float z = vec.dot (z_axis_);
dist_threshold *= z * z;
}
bool
compare (int idx1, int idx2) const override
{
- float dx = input_->points[idx1].x - input_->points[idx2].x;
- float dy = input_->points[idx1].y - input_->points[idx2].y;
- float dz = input_->points[idx1].z - input_->points[idx2].z;
+ float dx = (*input_)[idx1].x - (*input_)[idx2].x;
+ float dy = (*input_)[idx1].y - (*input_)[idx2].y;
+ float dz = (*input_)[idx1].z - (*input_)[idx2].z;
float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
return ( (dist < distance_threshold_)
- && (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ ) );
+ && ((*normals_)[idx1].getNormalVector3fMap ().dot ((*normals_)[idx2].getNormalVector3fMap () ) > angular_threshold_ ) );
}
};
}
unsigned int min_pts_per_cluster = 1,
unsigned int max_pts_per_cluster = (std::numeric_limits<int>::max) ())
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different point "
+ "cloud dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
- if (cloud.points.size () != normals.points.size ())
+ if (cloud.size () != normals.size ())
{
- PCL_ERROR ("[pcl::extractEuclideanClusters] Number of points in the input point cloud (%lu) different than normals (%lu)!\n", cloud.points.size (), normals.points.size ());
+ PCL_ERROR("[pcl::extractEuclideanClusters] Number of points in the input point "
+ "cloud (%zu) different than normals (%zu)!\n",
+ static_cast<std::size_t>(cloud.size()),
+ static_cast<std::size_t>(normals.size()));
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
if (processed[i])
continue;
//processed[nn_indices[j]] = true;
// [-1;1]
- double dot_p = normals.points[i].normal[0] * normals.points[nn_indices[j]].normal[0] +
- normals.points[i].normal[1] * normals.points[nn_indices[j]].normal[1] +
- normals.points[i].normal[2] * normals.points[nn_indices[j]].normal[2];
- if ( std::abs (std::acos (dot_p)) < eps_angle )
+ double dot_p = normals[i].normal[0] * normals[nn_indices[j]].normal[0] +
+ normals[i].normal[1] * normals[nn_indices[j]].normal[1] +
+ normals[i].normal[2] * normals[nn_indices[j]].normal[2];
+ if ( std::acos (std::abs (dot_p)) < eps_angle )
{
processed[nn_indices[j]] = true;
seed_queue.push_back (nn_indices[j]);
{
// \note If the tree was created over <cloud, indices>, we guarantee a 1-1 mapping between what the tree returns
//and indices[i]
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
- {
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ if (tree->getInputCloud()->size() != cloud.size()) {
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different point "
+ "cloud dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
- if (tree->getIndices ()->size () != indices.size ())
- {
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different set of indices (%lu) than the input set (%lu)!\n", tree->getIndices ()->size (), indices.size ());
+ if (tree->getIndices()->size() != indices.size()) {
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different set of "
+ "indices (%zu) than the input set (%zu)!\n",
+ static_cast<std::size_t>(tree->getIndices()->size()),
+ indices.size());
return;
}
- if (cloud.points.size () != normals.points.size ())
- {
- PCL_ERROR ("[pcl::extractEuclideanClusters] Number of points in the input point cloud (%lu) different than normals (%lu)!\n", cloud.points.size (), normals.points.size ());
+ if (cloud.size() != normals.size()) {
+ PCL_ERROR("[pcl::extractEuclideanClusters] Number of points in the input point "
+ "cloud (%zu) different than normals (%zu)!\n",
+ static_cast<std::size_t>(cloud.size()),
+ static_cast<std::size_t>(normals.size()));
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
while (sq_idx < static_cast<int> (seed_queue.size ()))
{
// Search for sq_idx
- if (!tree->radiusSearch (cloud.points[seed_queue[sq_idx]], tolerance, nn_indices, nn_distances))
+ if (!tree->radiusSearch (cloud[seed_queue[sq_idx]], tolerance, nn_indices, nn_distances))
{
sq_idx++;
continue;
//processed[nn_indices[j]] = true;
// [-1;1]
double dot_p =
- normals.points[indices[i]].normal[0] * normals.points[indices[nn_indices[j]]].normal[0] +
- normals.points[indices[i]].normal[1] * normals.points[indices[nn_indices[j]]].normal[1] +
- normals.points[indices[i]].normal[2] * normals.points[indices[nn_indices[j]]].normal[2];
- if ( std::abs (std::acos (dot_p)) < eps_angle )
+ normals[indices[i]].normal[0] * normals[indices[nn_indices[j]]].normal[0] +
+ normals[indices[i]].normal[1] * normals[indices[nn_indices[j]]].normal[1] +
+ normals[indices[i]].normal[2] * normals[indices[nn_indices[j]]].normal[2];
+ if ( std::acos (std::abs (dot_p)) < eps_angle )
{
processed[nn_indices[j]] = true;
seed_queue.push_back (nn_indices[j]);
#pragma once
+#include <deque> // for deque
+#include <map> // for map
#include <pcl/memory.h>
#include <pcl/point_cloud.h>
#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
-#include <pcl/segmentation/boost.h>
#include <pcl/search/search.h>
namespace pcl
float threshold = distance_threshold_;
if (depth_dependent_)
{
- Eigen::Vector3f vec = input_->points[idx1].getVector3fMap ();
+ Eigen::Vector3f vec = (*input_)[idx1].getVector3fMap ();
float z = vec.dot (z_axis_);
threshold *= z * z;
}
- return ( (normals_->points[idx1].getNormalVector3fMap ().dot (desired_road_axis_) > road_angular_threshold_) &&
- (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ ));
+ return ( ((*normals_)[idx1].getNormalVector3fMap ().dot (desired_road_axis_) > road_angular_threshold_) &&
+ ((*normals_)[idx1].getNormalVector3fMap ().dot ((*normals_)[idx2].getNormalVector3fMap () ) > angular_threshold_ ));
// Euclidean proximity of neighbors does not seem to be required -- pixel adjacency handles this well enough
- //return ( (normals_->points[idx1].getNormalVector3fMap ().dot (desired_road_axis_) > road_angular_threshold_) &&
- // (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ ) &&
- // (pcl::euclideanDistance (input_->points[idx1], input_->points[idx2]) < distance_threshold_ ));
+ //return ( ((*normals_)[idx1].getNormalVector3fMap ().dot (desired_road_axis_) > road_angular_threshold_) &&
+ // ((*normals_)[idx1].getNormalVector3fMap ().dot ((*normals_)[idx2].getNormalVector3fMap () ) > angular_threshold_ ) &&
+ // (pcl::euclideanDistance ((*input_)[idx1], (*input_)[idx2]) < distance_threshold_ ));
}
protected:
default(none) \
shared(A, global_min) \
num_threads(threads_)
- for (int i = 0; i < (int)input_->points.size (); ++i)
+ for (int i = 0; i < (int)input_->size (); ++i)
{
// ...then test for lower points within the cell
- PointT p = input_->points[i];
+ PointT p = (*input_)[i];
int row = std::floor((p.y - global_min.y ()) / cell_size_);
int col = std::floor((p.x - global_min.x ()) / cell_size_);
std::vector<int> pt_indices;
for (std::size_t p_idx = 0; p_idx < ground.size (); ++p_idx)
{
- PointT p = cloud->points[p_idx];
+ PointT p = (*cloud)[p_idx];
int erow = static_cast<int> (std::floor ((p.y - global_min.y ()) / cell_size_));
int ecol = static_cast<int> (std::floor ((p.x - global_min.x ()) / cell_size_));
// Create a bool vector of processed point indices, and initialize it to false
// Need to have it contain all possible points because radius search can not return indices into indices
- std::vector<bool> processed (input_->points.size (), false);
+ std::vector<bool> processed (input_->size (), false);
// Process all points indexed by indices_
for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
while (cii < static_cast<int> (current_cluster.size ()))
{
// Search for neighbors around the current seed point of the current cluster
- if (searcher_->radiusSearch (input_->points[current_cluster[cii]], cluster_tolerance_, nn_indices, nn_distances) < 1)
+ if (searcher_->radiusSearch ((*input_)[current_cluster[cii]], cluster_tolerance_, nn_indices, nn_distances) < 1)
{
cii++;
continue;
continue;
// Validate if condition holds
- if (condition_function_ (input_->points[current_cluster[cii]], input_->points[nn_indices[nii]], nn_distances[nii]))
+ if (condition_function_ ((*input_)[current_cluster[cii]], (*input_)[nn_indices[nii]], nn_distances[nii]))
{
// Add the point to the cluster
current_cluster.push_back (nn_indices[nii]);
weights.resize (seg_to_edge_points.second->size ());
for (std::size_t cp = 0; cp < seg_to_edge_points.second->size (); ++cp)
{
- float& cur_weight = seg_to_edge_points.second->points[cp].intensity;
+ float& cur_weight = (*seg_to_edge_points.second)[cp].intensity;
cur_weight = cur_weight < concavity_tolerance_threshold_ ? 0 : 1;
weights[cp] = cur_weight;
}
if (index != -1)
{
data_.push_back (Eigen::Vector3i (i, j, k));
- color_.push_back (input_cloud_->points[index].getRGBVector3i ());
+ color_.push_back ((*input_cloud_)[index].getRGBVector3i ());
}
}
}
// reserve space for the data vector
- data_.resize (filtered_cloud_->points.size ());
+ data_.resize (filtered_cloud_->size ());
std::vector< pcl::PCLPointField > fields;
// check if we have color data
if (rgba_index >= 0)
{
color_data = true;
- color_.resize (filtered_cloud_->points.size ());
+ color_.resize (filtered_cloud_->size ());
}
if (rgba_index >= 0)
{
normal_data = true;
- normal_.resize (filtered_cloud_->points.size ());
+ normal_.resize (filtered_cloud_->size ());
}
*/
// fill the data vector
- for (std::size_t i = 0; i < filtered_cloud_->points.size (); i++)
+ for (std::size_t i = 0; i < filtered_cloud_->size (); i++)
{
- Eigen::Vector3f p (filtered_anno_->points[i].x,
- filtered_anno_->points[i].y,
- filtered_anno_->points[i].z);
+ Eigen::Vector3f p ((*filtered_anno_)[i].x,
+ (*filtered_anno_)[i].y,
+ (*filtered_anno_)[i].z);
Eigen::Vector3i c = voxel_grid_.getGridCoordinates (p.x (), p.y (), p.y ());
data_[i] = c;
if (color_data)
{
- std::uint32_t rgb = *reinterpret_cast<int*>(&filtered_cloud_->points[i].rgba);
+ std::uint32_t rgb = *reinterpret_cast<int*>(&(*filtered_cloud_)[i].rgba);
std::uint8_t r = (rgb >> 16) & 0x0000ff;
std::uint8_t g = (rgb >> 8) & 0x0000ff;
std::uint8_t b = (rgb) & 0x0000ff;
/*
if (normal_data)
{
- float n_x = filtered_cloud_->points[i].normal_x;
- float n_y = filtered_cloud_->points[i].normal_y;
- float n_z = filtered_cloud_->points[i].normal_z;
+ float n_x = (*filtered_cloud_)[i].normal_x;
+ float n_y = (*filtered_cloud_)[i].normal_y;
+ float n_z = (*filtered_cloud_)[i].normal_z;
normal_[i] = Eigen::Vector3f (n_x, n_y, n_z);
}
*/
}
- normal_.resize (filtered_normal_->points.size ());
- for (std::size_t i = 0; i < filtered_normal_->points.size (); i++)
+ normal_.resize (filtered_normal_->size ());
+ for (std::size_t i = 0; i < filtered_normal_->size (); i++)
{
- float n_x = filtered_normal_->points[i].normal_x;
- float n_y = filtered_normal_->points[i].normal_y;
- float n_z = filtered_normal_->points[i].normal_z;
+ float n_x = (*filtered_normal_)[i].normal_x;
+ float n_y = (*filtered_normal_)[i].normal_y;
+ float n_z = (*filtered_normal_)[i].normal_z;
normal_[i] = Eigen::Vector3f (n_x, n_y, n_z);
}
const float n_energy = -std::log ( (1.0f - GT_PROB) / static_cast<float>(n_labels - 1) );
const float p_energy = -std::log ( GT_PROB );
- for (std::size_t k = 0; k < filtered_anno_->points.size (); k++)
+ for (std::size_t k = 0; k < filtered_anno_->size (); k++)
{
- int label = filtered_anno_->points[k].label;
+ int label = (*filtered_anno_)[k].label;
if (labels.empty () && label > 0)
labels.push_back (label);
for (std::size_t i = 0; i < N; i++)
{
- tmp_cloud_OLD.points[i].label = map[i];
+ tmp_cloud_OLD[i].label = map[i];
}
for (int i = 0; i < N; i++)
{
- output.points[i].label = labels[r[i]];
+ output[i].label = labels[r[i]];
}
tmp_cloud = *filtered_anno_;
bool c = true;
- for (std::size_t i = 0; i < tmp_cloud.points.size (); i++)
+ for (std::size_t i = 0; i < tmp_cloud.size (); i++)
{
- if (tmp_cloud.points[i].label != tmp_cloud_OLD.points[i].label)
+ if (tmp_cloud[i].label != tmp_cloud_OLD[i].label)
{
- std::cout << "idx: " << i << " = " <<tmp_cloud.points[i].label << " | " << tmp_cloud_OLD.points[i].label << std::endl;
+ std::cout << "idx: " << i << " = " <<tmp_cloud[i].label << " | " << tmp_cloud_OLD[i].label << std::endl;
c = false;
break;
}
unsigned int min_pts_per_cluster,
unsigned int max_pts_per_cluster)
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different point cloud "
+ "dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
// Check if the tree is sorted -- if it is we don't need to check the first element
int nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (int i = 0; i < static_cast<int> (cloud.points.size ()); ++i)
+ for (int i = 0; i < static_cast<int> (cloud.size ()); ++i)
{
if (processed[i])
continue;
{
// \note If the tree was created over <cloud, indices>, we guarantee a 1-1 mapping between what the tree returns
//and indices[i]
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
- {
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ if (tree->getInputCloud()->size() != cloud.size()) {
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different point cloud "
+ "dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
- if (tree->getIndices ()->size () != indices.size ())
- {
- PCL_ERROR ("[pcl::extractEuclideanClusters] Tree built for a different set of indices (%lu) than the input set (%lu)!\n", tree->getIndices ()->size (), indices.size ());
+ if (tree->getIndices()->size() != indices.size()) {
+ PCL_ERROR("[pcl::extractEuclideanClusters] Tree built for a different set of "
+ "indices (%zu) than the input set (%zu)!\n",
+ static_cast<std::size_t>(tree->getIndices()->size()),
+ indices.size());
return;
}
// Check if the tree is sorted -- if it is we don't need to check the first element
int nn_start_idx = tree->getSortedResults () ? 1 : 0;
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
while (sq_idx < static_cast<int> (seed_queue.size ()))
{
// Search for sq_idx
- int ret = tree->radiusSearch (cloud.points[seed_queue[sq_idx]], tolerance, nn_indices, nn_distances);
+ int ret = tree->radiusSearch (cloud[seed_queue[sq_idx]], tolerance, nn_indices, nn_distances);
if( ret == -1)
{
PCL_ERROR("[pcl::extractEuclideanClusters] Received error code -1 from radiusSearch\n");
unsigned int max_pts_per_cluster,
unsigned int)
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
- PCL_ERROR ("[pcl::extractLabeledEuclideanClusters] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ PCL_ERROR("[pcl::extractLabeledEuclideanClusters] Tree built for a different point "
+ "cloud dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
// Process all points in the indices vector
- for (int i = 0; i < static_cast<int> (cloud.points.size ()); ++i)
+ for (int i = 0; i < static_cast<int> (cloud.size ()); ++i)
{
if (processed[i])
continue;
{
if (processed[nn_indices[j]]) // Has this point been processed before ?
continue;
- if (cloud.points[i].label == cloud.points[nn_indices[j]].label)
+ if (cloud[i].label == cloud[nn_indices[j]].label)
{
// Perform a simple Euclidean clustering
seed_queue.push_back (nn_indices[j]);
r.indices.erase (std::unique (r.indices.begin (), r.indices.end ()), r.indices.end ());
r.header = cloud.header;
- labeled_clusters[cloud.points[i].label].push_back (r); // We could avoid a copy by working directly in the vector
+ labeled_clusters[cloud[i].label].push_back (r); // We could avoid a copy by working directly in the vector
}
}
}
k2 = (k0 + 2) % 3;
// Project the convex hull
pcl::PointCloud<PointT> xy_polygon;
- xy_polygon.points.resize (polygon.points.size ());
- for (std::size_t i = 0; i < polygon.points.size (); ++i)
+ xy_polygon.points.resize (polygon.size ());
+ for (std::size_t i = 0; i < polygon.size (); ++i)
{
- Eigen::Vector4f pt (polygon.points[i].x, polygon.points[i].y, polygon.points[i].z, 0);
- xy_polygon.points[i].x = pt[k1];
- xy_polygon.points[i].y = pt[k2];
- xy_polygon.points[i].z = 0;
+ Eigen::Vector4f pt (polygon[i].x, polygon[i].y, polygon[i].z, 0);
+ xy_polygon[i].x = pt[k1];
+ xy_polygon[i].y = pt[k2];
+ xy_polygon[i].z = 0;
}
PointT xy_point;
xy_point.z = 0;
bool in_poly = false;
double x1, x2, y1, y2;
- int nr_poly_points = static_cast<int> (polygon.points.size ());
+ const auto nr_poly_points = polygon.size ();
// start with the last point to make the check last point<->first point the first one
- double xold = polygon.points[nr_poly_points - 1].x;
- double yold = polygon.points[nr_poly_points - 1].y;
- for (int i = 0; i < nr_poly_points; i++)
+ double xold = polygon[nr_poly_points - 1].x;
+ double yold = polygon[nr_poly_points - 1].y;
+ for (std::size_t i = 0; i < nr_poly_points; i++)
{
- double xnew = polygon.points[i].x;
- double ynew = polygon.points[i].y;
+ double xnew = polygon[i].x;
+ double ynew = polygon[i].y;
if (xnew > xold)
{
x1 = xold;
return;
}
- if (static_cast<int> (planar_hull_->points.size ()) < min_pts_hull_)
+ if (static_cast<int> (planar_hull_->size ()) < min_pts_hull_)
{
- PCL_ERROR ("[pcl::%s::segment] Not enough points (%lu) in the hull!\n", getClassName ().c_str (), planar_hull_->points.size ());
+ PCL_ERROR("[pcl::%s::segment] Not enough points (%zu) in the hull!\n",
+ getClassName().c_str(),
+ static_cast<std::size_t>(planar_hull_->size()));
output.indices.clear ();
return;
}
// Need to flip the plane normal towards the viewpoint
Eigen::Vector4f vp (vpx_, vpy_, vpz_, 0);
// See if we need to flip any plane normals
- vp -= planar_hull_->points[0].getVector4fMap ();
+ vp -= (*planar_hull_)[0].getVector4fMap ();
vp[3] = 0;
// Dot product between the (viewpoint - point) and the plane normal
float cos_theta = vp.dot (model_coefficients);
model_coefficients *= -1;
model_coefficients[3] = 0;
// Hessian form (D = nc . p_plane (centroid here) + p)
- model_coefficients[3] = -1 * (model_coefficients.dot (planar_hull_->points[0].getVector4fMap ()));
+ model_coefficients[3] = -1 * (model_coefficients.dot ((*planar_hull_)[0].getVector4fMap ()));
}
// Project all points
k2 = (k0 + 2) % 3;
// Project the convex hull
pcl::PointCloud<PointT> polygon;
- polygon.points.resize (planar_hull_->points.size ());
- for (std::size_t i = 0; i < planar_hull_->points.size (); ++i)
+ polygon.points.resize (planar_hull_->size ());
+ for (std::size_t i = 0; i < planar_hull_->size (); ++i)
{
- Eigen::Vector4f pt (planar_hull_->points[i].x, planar_hull_->points[i].y, planar_hull_->points[i].z, 0);
- polygon.points[i].x = pt[k1];
- polygon.points[i].y = pt[k2];
- polygon.points[i].z = 0;
+ Eigen::Vector4f pt ((*planar_hull_)[i].x, (*planar_hull_)[i].y, (*planar_hull_)[i].z, 0);
+ polygon[i].x = pt[k1];
+ polygon[i].y = pt[k2];
+ polygon[i].z = 0;
}
PointT pt_xy;
output.indices.resize (indices_->size ());
int l = 0;
- for (std::size_t i = 0; i < projected_points.points.size (); ++i)
+ for (std::size_t i = 0; i < projected_points.size (); ++i)
{
// Check the distance to the user imposed limits from the table planar model
- double distance = pointToPlaneDistanceSigned (input_->points[(*indices_)[i]], model_coefficients);
+ double distance = pointToPlaneDistanceSigned ((*input_)[(*indices_)[i]], model_coefficients);
if (distance < height_limit_min_ || distance > height_limit_max_)
continue;
// Check what points are inside the hull
- Eigen::Vector4f pt (projected_points.points[i].x,
- projected_points.points[i].y,
- projected_points.points[i].z, 0);
+ Eigen::Vector4f pt (projected_points[i].x,
+ projected_points[i].y,
+ projected_points[i].z, 0);
pt_xy.x = pt[k1];
pt_xy.y = pt[k2];
image_.reset (new Image (input_->width, input_->height));
for (std::size_t i = 0; i < input_->size (); ++i)
{
- (*image_) [i] = Color (input_->points[i]);
+ (*image_) [i] = Color ((*input_)[i]);
}
width_ = image_->width;
height_ = image_->height;
{
case TrimapUnknown :
{
- fore = static_cast<float> (-std::log (background_GMM_.probabilityDensity (image_->points[point_index])));
- back = static_cast<float> (-std::log (foreground_GMM_.probabilityDensity (image_->points[point_index])));
+ fore = static_cast<float> (-std::log (background_GMM_.probabilityDensity ((*image_)[point_index])));
+ back = static_cast<float> (-std::log (foreground_GMM_.probabilityDensity ((*image_)[point_index])));
break;
}
case TrimapBackground :
{
if (*nn_it != point_index)
{
- float color_distance = squaredEuclideanDistance (image_->points[point_index], image_->points[*nn_it]);
+ float color_distance = squaredEuclideanDistance ((*image_)[point_index], (*image_)[*nn_it]);
links.weights.push_back (color_distance);
result+= color_distance;
++edges;
std::size_t upleft = (y+1) * input_->width + x - 1;
links.indices[0] = upleft;
links.dists[0] = std::sqrt (2.f);
- float color_dist = squaredEuclideanDistance (image_->points[point_index],
- image_->points[upleft]);
+ float color_dist = squaredEuclideanDistance ((*image_)[point_index],
+ (*image_)[upleft]);
links.weights[0] = color_dist;
result+= color_dist;
edges++;
std::size_t up = (y+1) * input_->width + x;
links.indices[1] = up;
links.dists[1] = 1;
- float color_dist = squaredEuclideanDistance (image_->points[point_index],
- image_->points[up]);
+ float color_dist = squaredEuclideanDistance ((*image_)[point_index],
+ (*image_)[up]);
links.weights[1] = color_dist;
result+= color_dist;
edges++;
std::size_t upright = (y+1) * input_->width + x + 1;
links.indices[2] = upright;
links.dists[2] = std::sqrt (2.f);
- float color_dist = squaredEuclideanDistance (image_->points[point_index],
+ float color_dist = squaredEuclideanDistance ((*image_)[point_index],
image_->points [upright]);
links.weights[2] = color_dist;
result+= color_dist;
std::size_t right = y * input_->width + x + 1;
links.indices[3] = right;
links.dists[3] = 1;
- float color_dist = squaredEuclideanDistance (image_->points[point_index],
- image_->points[right]);
+ float color_dist = squaredEuclideanDistance ((*image_)[point_index],
+ (*image_)[right]);
links.weights[3] = color_dist;
result+= color_dist;
edges++;
pcl::MinCutSegmentation<PointT>::setForegroundPoints (typename pcl::PointCloud<PointT>::Ptr foreground_points)
{
foreground_points_.clear ();
- foreground_points_.reserve (foreground_points->points.size ());
- for (std::size_t i_point = 0; i_point < foreground_points->points.size (); i_point++)
- foreground_points_.push_back (foreground_points->points[i_point]);
+ foreground_points_.insert(
+ foreground_points_.end(), foreground_points->cbegin(), foreground_points->cend());
unary_potentials_are_valid_ = false;
}
pcl::MinCutSegmentation<PointT>::setBackgroundPoints (typename pcl::PointCloud<PointT>::Ptr background_points)
{
background_points_.clear ();
- background_points_.reserve (background_points->points.size ());
- for (std::size_t i_point = 0; i_point < background_points->points.size (); i_point++)
- background_points_.push_back (background_points->points[i_point]);
+ background_points_.insert(
+ background_points_.end(), background_points->cbegin(), background_points->cend());
unary_potentials_are_valid_ = false;
}
template <typename PointT> bool
pcl::MinCutSegmentation<PointT>::buildGraph ()
{
- int number_of_points = static_cast<int> (input_->points.size ());
- int number_of_indices = static_cast<int> (indices_->size ());
+ const auto number_of_points = input_->size ();
+ const auto number_of_indices = indices_->size ();
if (input_->points.empty () || number_of_points == 0 || foreground_points_.empty () == true )
return (false);
edge_marker_.clear ();
edge_marker_.resize (number_of_points + 2, out_edges_marker);
- for (int i_point = 0; i_point < number_of_points + 2; i_point++)
+ for (std::size_t i_point = 0; i_point < number_of_points + 2; i_point++)
vertices_[i_point] = boost::add_vertex (*graph_);
source_ = vertices_[number_of_points];
sink_ = vertices_[number_of_points + 1];
- for (int i_point = 0; i_point < number_of_indices; i_point++)
+ for (std::size_t i_point = 0; i_point < number_of_indices; i_point++)
{
index_t point_index = (*indices_)[i_point];
double source_weight = 0.0;
std::vector<int> neighbours;
std::vector<float> distances;
search_->setInputCloud (input_, indices_);
- for (int i_point = 0; i_point < number_of_indices; i_point++)
+ for (std::size_t i_point = 0; i_point < number_of_indices; i_point++)
{
index_t point_index = (*indices_)[i_point];
search_->nearestKSearch (i_point, number_of_neighbours_, neighbours, distances);
//double closest_background_point[] = {0.0, 0.0};
double initial_point[] = {0.0, 0.0};
- initial_point[0] = input_->points[point].x;
- initial_point[1] = input_->points[point].y;
+ initial_point[0] = (*input_)[point].x;
+ initial_point[1] = (*input_)[point].y;
for (std::size_t i_point = 0; i_point < foreground_points_.size (); i_point++)
{
{
double weight = 0.0;
double distance = 0.0;
- distance += (input_->points[source].x - input_->points[target].x) * (input_->points[source].x - input_->points[target].x);
- distance += (input_->points[source].y - input_->points[target].y) * (input_->points[source].y - input_->points[target].y);
- distance += (input_->points[source].z - input_->points[target].z) * (input_->points[source].z - input_->points[target].z);
+ distance += ((*input_)[source].x - (*input_)[target].x) * ((*input_)[source].x - (*input_)[target].x);
+ distance += ((*input_)[source].y - (*input_)[target].y) * ((*input_)[source].y - (*input_)[target].y);
+ distance += ((*input_)[source].z - (*input_)[target].z) * ((*input_)[source].z - (*input_)[target].z);
distance *= inverse_sigma_;
weight = std::exp (-distance);
pcl::MinCutSegmentation<PointT>::assembleLabels (ResidualCapacityMap& residual_capacity)
{
std::vector<int> labels;
- labels.resize (input_->points.size (), 0);
+ labels.resize (input_->size (), 0);
int number_of_indices = static_cast<int> (indices_->size ());
for (int i_point = 0; i_point < number_of_indices; i_point++)
labels[(*indices_)[i_point]] = 1;
for (int i_point = 0; i_point < num_of_pts_in_first_cluster; i_point++)
{
index_t point_index = clusters_[0].indices[i_point];
- point.x = *(input_->points[point_index].data);
- point.y = *(input_->points[point_index].data + 1);
- point.z = *(input_->points[point_index].data + 2);
+ point.x = *((*input_)[point_index].data);
+ point.y = *((*input_)[point_index].data + 1);
+ point.z = *((*input_)[point_index].data + 2);
point.r = background_color[0];
point.g = background_color[1];
point.b = background_color[2];
for (int i_point = 0; i_point < num_of_pts_in_second_cluster; i_point++)
{
index_t point_index = clusters_[1].indices[i_point];
- point.x = *(input_->points[point_index].data);
- point.y = *(input_->points[point_index].data + 1);
- point.z = *(input_->points[point_index].data + 2);
+ point.x = *((*input_)[point_index].data);
+ point.y = *((*input_)[point_index].data + 1);
+ point.z = *((*input_)[point_index].data + 2);
point.r = foreground_color[0];
point.g = foreground_color[1];
point.b = foreground_color[2];
int curr_idx = start_idx;
int curr_x = start_idx % labels->width;
int curr_y = start_idx / labels->width;
- unsigned label = labels->points[start_idx].label;
+ unsigned label = (*labels)[start_idx].label;
// fill lookup table for next points to visit
Neighbor directions [8] = {Neighbor(-1, 0, -1),
x = curr_x + directions [dIdx].d_x;
y = curr_y + directions [dIdx].d_y;
index = curr_idx + directions [dIdx].d_index;
- if (x >= 0 && x < int(labels->width) && y >= 0 && y < int(labels->height) && labels->points[index].label != label)
+ if (x >= 0 && x < int(labels->width) && y >= 0 && y < int(labels->height) && (*labels)[index].label != label)
{
direction = dIdx;
break;
x = curr_x + directions [nIdx].d_x;
y = curr_y + directions [nIdx].d_y;
index = curr_idx + directions [nIdx].d_index;
- if (x >= 0 && x < int(labels->width) && y >= 0 && y < int(labels->height) && labels->points[index].label == label)
+ if (x >= 0 && x < int(labels->width) && y >= 0 && y < int(labels->height) && (*labels)[index].label == label)
break;
}
unsigned invalid_label = std::numeric_limits<unsigned>::max ();
PointLT invalid_pt;
invalid_pt.label = std::numeric_limits<unsigned>::max ();
- labels.points.resize (input_->points.size (), invalid_pt);
+ labels.points.resize (input_->size (), invalid_pt);
labels.width = input_->width;
labels.height = input_->height;
std::size_t clust_id = 0;
//First pixel
- if (std::isfinite (input_->points[0].x))
+ if (std::isfinite ((*input_)[0].x))
{
labels[0].label = clust_id++;
run_ids.push_back (labels[0].label );
// First row
for (int colIdx = 1; colIdx < static_cast<int> (input_->width); ++colIdx)
{
- if (!std::isfinite (input_->points[colIdx].x))
+ if (!std::isfinite ((*input_)[colIdx].x))
continue;
if (compare_->compare (colIdx, colIdx - 1 ))
{
for (std::size_t rowIdx = 1; rowIdx < input_->height; ++rowIdx, previous_row = current_row, current_row += input_->width)
{
// First pixel
- if (std::isfinite (input_->points[current_row].x))
+ if (std::isfinite ((*input_)[current_row].x))
{
if (compare_->compare (current_row, previous_row))
{
// Rest of row
for (int colIdx = 1; colIdx < static_cast<int> (input_->width); ++colIdx)
{
- if (std::isfinite (input_->points[current_row + colIdx].x))
+ if (std::isfinite ((*input_)[current_row + colIdx].x))
{
if (compare_->compare (current_row + colIdx, current_row + colIdx - 1))
{
}
label_indices.resize (max_id + 1);
- for (std::size_t idx = 0; idx < input_->points.size (); idx++)
+ for (std::size_t idx = 0; idx < input_->size (); idx++)
{
if (labels[idx].label != invalid_label)
{
{
Eigen::Vector3f norm (normal[0], normal[1], normal[2]); //(region.coefficients_[0], region.coefficients_[1], region.coefficients_[2]);
pcl::PointCloud<PointT> projected_cloud;
- projected_cloud.resize (cloud.points.size ());
- for (std::size_t i = 0; i < cloud.points.size (); i++)
+ projected_cloud.resize (cloud.size ());
+ for (std::size_t i = 0; i < cloud.size (); i++)
{
- Eigen::Vector3f pt (cloud.points[i].x, cloud.points[i].y, cloud.points[i].z);
+ Eigen::Vector3f pt (cloud[i].x, cloud[i].y, cloud[i].z);
//Eigen::Vector3f intersection = (vp, pt, norm, centroid);
float u = norm.dot ((centroid - vp)) / norm.dot ((pt - vp));
Eigen::Vector3f intersection (vp + u * (pt - vp));
}
// Check that we got the same number of points and normals
- if (static_cast<int> (normals_->points.size ()) != static_cast<int> (input_->points.size ()))
+ if (normals_->size () != input_->size ())
{
- PCL_ERROR ("[pcl::%s::segment] Number of points in input cloud (%lu) and normal cloud (%lu) do not match!\n",
- getClassName ().c_str (), input_->points.size (),
- normals_->points.size ());
+ PCL_ERROR("[pcl::%s::segment] Number of points in input cloud (%zu) and normal "
+ "cloud (%zu) do not match!\n",
+ getClassName().c_str(),
+ static_cast<std::size_t>(input_->size()),
+ static_cast<std::size_t>(normals_->size()));
return;
}
}
// Calculate range part of planes' hessian normal form
- std::vector<float> plane_d (input_->points.size ());
+ std::vector<float> plane_d (input_->size ());
for (std::size_t i = 0; i < input_->size (); ++i)
- plane_d[i] = input_->points[i].getVector3fMap ().dot (normals_->points[i].getNormalVector3fMap ());
+ plane_d[i] = (*input_)[i].getVector3fMap ().dot ((*normals_)[i].getNormalVector3fMap ());
// Make a comparator
//PlaneCoefficientComparator<PointT,PointNT> plane_comparator (plane_d);
pcl::OrganizedConnectedComponentSegmentation<PointT,PointLT>::findLabeledRegionBoundary (inlier_indices[i].indices[0], labels, boundary_indices[i]);
boundary_cloud.points.resize (boundary_indices[i].indices.size ());
for (std::size_t j = 0; j < boundary_indices[i].indices.size (); j++)
- boundary_cloud.points[j] = input_->points[boundary_indices[i].indices[j]];
+ boundary_cloud[j] = (*input_)[boundary_indices[i].indices[j]];
Eigen::Vector3f centroid = Eigen::Vector3f (centroids[i][0],centroids[i][1],centroids[i][2]);
Eigen::Vector4f model = Eigen::Vector4f (model_coefficients[i].values[0],
pcl::OrganizedConnectedComponentSegmentation<PointT,PointLT>::findLabeledRegionBoundary (inlier_indices[i].indices[max_inlier_idx], labels, boundary_indices[i]);
boundary_cloud.points.resize (boundary_indices[i].indices.size ());
for (std::size_t j = 0; j < boundary_indices[i].indices.size (); j++)
- boundary_cloud.points[j] = input_->points[boundary_indices[i].indices[j]];
+ boundary_cloud[j] = (*input_)[boundary_indices[i].indices[j]];
Eigen::Vector3f centroid = Eigen::Vector3f (centroids[i][0],centroids[i][1],centroids[i][2]);
Eigen::Vector4f model = Eigen::Vector4f (model_coefficients[i].values[0],
pcl::OrganizedConnectedComponentSegmentation<PointT,PointLT>::findLabeledRegionBoundary (inlier_indices[i].indices[max_inlier_idx], labels, boundary_indices[i]);
boundary_cloud.points.resize (boundary_indices[i].indices.size ());
for (std::size_t j = 0; j < boundary_indices[i].indices.size (); j++)
- boundary_cloud.points[j] = input_->points[boundary_indices[i].indices[j]];
+ boundary_cloud[j] = (*input_)[boundary_indices[i].indices[j]];
Eigen::Vector3f centroid = Eigen::Vector3f (centroids[i][0],centroids[i][1],centroids[i][2]);
Eigen::Vector4f model = Eigen::Vector4f (model_coefficients[i].values[0],
if (refinement_compare_->compare (current_row+colIdx, current_row+colIdx+1))
{
//test1 = true;
- labels->points[current_row+colIdx+1].label = current_label;
+ (*labels)[current_row+colIdx+1].label = current_label;
label_indices[current_label].indices.push_back (current_row+colIdx+1);
inlier_indices[label_to_model[current_label]].indices.push_back (current_row+colIdx+1);
}
//Check down
if (refinement_compare_->compare (current_row+colIdx, next_row+colIdx))
{
- labels->points[next_row+colIdx].label = current_label;
+ (*labels)[next_row+colIdx].label = current_label;
label_indices[current_label].indices.push_back (next_row+colIdx);
inlier_indices[label_to_model[current_label]].indices.push_back (next_row+colIdx);
}
//Check left
if (refinement_compare_->compare (current_row+colIdx, current_row+colIdx-1))
{
- labels->points[current_row+colIdx-1].label = current_label;
+ (*labels)[current_row+colIdx-1].label = current_label;
label_indices[current_label].indices.push_back (current_row+colIdx-1);
inlier_indices[label_to_model[current_label]].indices.push_back (current_row+colIdx-1);
}
//Check up
if (refinement_compare_->compare (current_row+colIdx, prev_row+colIdx))
{
- labels->points[prev_row+colIdx].label = current_label;
+ (*labels)[prev_row+colIdx].label = current_label;
label_indices[current_label].indices.push_back (prev_row+colIdx);
inlier_indices[label_to_model[current_label]].indices.push_back (prev_row+colIdx);
}
std::vector<int> pt_indices;
for (std::size_t p_idx = 0; p_idx < ground.size (); ++p_idx)
{
- float diff = cloud->points[p_idx].z - cloud_f->points[p_idx].z;
+ float diff = (*cloud)[p_idx].z - (*cloud_f)[p_idx].z;
if (diff < height_thresholds[i])
pt_indices.push_back (ground[p_idx]);
}
return (false);
// if user forgot to pass normals or the sizes of point and normal cloud are different
- if ( !normals_ || input_->points.size () != normals_->points.size () )
+ if ( !normals_ || input_->size () != normals_->size () )
return (false);
// if residual test is on then we need to check if all needed parameters were correctly initialized
std::vector<int> neighbours;
std::vector<float> distances;
- point_neighbours_.resize (input_->points.size (), neighbours);
+ point_neighbours_.resize (input_->size (), neighbours);
if (input_->is_dense)
{
for (int i_point = 0; i_point < point_number; i_point++)
{
neighbours.clear ();
int point_index = (*indices_)[i_point];
- if (!pcl::isFinite (input_->points[point_index]))
+ if (!pcl::isFinite ((*input_)[point_index]))
continue;
search_->nearestKSearch (i_point, neighbour_number_, neighbours, distances);
point_neighbours_[point_index].swap (neighbours);
pcl::RegionGrowing<PointT, NormalT>::applySmoothRegionGrowingAlgorithm ()
{
int num_of_pts = static_cast<int> (indices_->size ());
- point_labels_.resize (input_->points.size (), -1);
+ point_labels_.resize (input_->size (), -1);
std::vector< std::pair<float, int> > point_residual;
std::pair<float, int> pair;
for (int i_point = 0; i_point < num_of_pts; i_point++)
{
int point_index = (*indices_)[i_point];
- point_residual[i_point].first = normals_->points[point_index].curvature;
+ point_residual[i_point].first = (*normals_)[point_index].curvature;
point_residual[i_point].second = point_index;
}
std::sort (point_residual.begin (), point_residual.end (), comparePair);
float cosine_threshold = std::cos (theta_threshold_);
float data[4];
- data[0] = input_->points[point].data[0];
- data[1] = input_->points[point].data[1];
- data[2] = input_->points[point].data[2];
- data[3] = input_->points[point].data[3];
+ data[0] = (*input_)[point].data[0];
+ data[1] = (*input_)[point].data[1];
+ data[2] = (*input_)[point].data[2];
+ data[3] = (*input_)[point].data[3];
Eigen::Map<Eigen::Vector3f> initial_point (static_cast<float*> (data));
- Eigen::Map<Eigen::Vector3f> initial_normal (static_cast<float*> (normals_->points[point].normal));
+ Eigen::Map<Eigen::Vector3f> initial_normal (static_cast<float*> ((*normals_)[point].normal));
//check the angle between normals
if (smooth_mode_flag_ == true)
{
- Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> (normals_->points[nghbr].normal));
+ Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> ((*normals_)[nghbr].normal));
float dot_product = std::abs (nghbr_normal.dot (initial_normal));
if (dot_product < cosine_threshold)
{
}
else
{
- Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> (normals_->points[nghbr].normal));
- Eigen::Map<Eigen::Vector3f> initial_seed_normal (static_cast<float*> (normals_->points[initial_seed].normal));
+ Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> ((*normals_)[nghbr].normal));
+ Eigen::Map<Eigen::Vector3f> initial_seed_normal (static_cast<float*> ((*normals_)[initial_seed].normal));
float dot_product = std::abs (nghbr_normal.dot (initial_seed_normal));
if (dot_product < cosine_threshold)
return (false);
}
// check the curvature if needed
- if (curvature_flag_ && normals_->points[nghbr].curvature > curvature_threshold_)
+ if (curvature_flag_ && (*normals_)[nghbr].curvature > curvature_threshold_)
{
is_a_seed = false;
}
// check the residual if needed
float data_1[4];
- data_1[0] = input_->points[nghbr].data[0];
- data_1[1] = input_->points[nghbr].data[1];
- data_1[2] = input_->points[nghbr].data[2];
- data_1[3] = input_->points[nghbr].data[3];
+ data_1[0] = (*input_)[nghbr].data[0];
+ data_1[1] = (*input_)[nghbr].data[1];
+ data_1[2] = (*input_)[nghbr].data[2];
+ data_1[3] = (*input_)[nghbr].data[3];
Eigen::Map<Eigen::Vector3f> nghbr_point (static_cast<float*> (data_1));
float residual = std::abs (initial_normal.dot (initial_point - nghbr_point));
if (residual_flag_ && residual > residual_threshold_)
template <typename PointT, typename NormalT> void
pcl::RegionGrowing<PointT, NormalT>::assembleRegions ()
{
- int number_of_segments = static_cast<int> (num_pts_in_segment_.size ());
- int number_of_points = static_cast<int> (input_->points.size ());
+ const auto number_of_segments = num_pts_in_segment_.size ();
+ const auto number_of_points = input_->size ();
pcl::PointIndices segment;
clusters_.resize (number_of_segments, segment);
- for (int i_seg = 0; i_seg < number_of_segments; i_seg++)
+ for (std::size_t i_seg = 0; i_seg < number_of_segments; i_seg++)
{
clusters_[i_seg].indices.resize ( num_pts_in_segment_[i_seg], 0);
}
- std::vector<int> counter;
- counter.resize (number_of_segments, 0);
+ std::vector<int> counter(number_of_segments, 0);
- for (int i_point = 0; i_point < number_of_points; i_point++)
+ for (std::size_t i_point = 0; i_point < number_of_points; i_point++)
{
- int segment_index = point_labels_[i_point];
+ const auto segment_index = point_labels_[i_point];
if (segment_index != -1)
{
- int point_index = counter[segment_index];
+ const auto point_index = counter[segment_index];
clusters_[segment_index].indices[point_index] = i_point;
counter[segment_index] = point_index + 1;
}
colored_cloud->width = input_->width;
colored_cloud->height = input_->height;
colored_cloud->is_dense = input_->is_dense;
- for (std::size_t i_point = 0; i_point < input_->points.size (); i_point++)
+ for (const auto& i_point: *input_)
{
pcl::PointXYZRGB point;
- point.x = *(input_->points[i_point].data);
- point.y = *(input_->points[i_point].data + 1);
- point.z = *(input_->points[i_point].data + 2);
+ point.x = *(i_point.data);
+ point.y = *(i_point.data + 1);
+ point.z = *(i_point.data + 2);
point.r = 255;
point.g = 0;
point.b = 0;
{
int index;
index = *i_point;
- colored_cloud->points[index].r = colors[3 * next_color];
- colored_cloud->points[index].g = colors[3 * next_color + 1];
- colored_cloud->points[index].b = colors[3 * next_color + 2];
+ (*colored_cloud)[index].r = colors[3 * next_color];
+ (*colored_cloud)[index].g = colors[3 * next_color + 1];
+ (*colored_cloud)[index].b = colors[3 * next_color + 2];
}
next_color++;
}
colored_cloud->width = input_->width;
colored_cloud->height = input_->height;
colored_cloud->is_dense = input_->is_dense;
- for (std::size_t i_point = 0; i_point < input_->points.size (); i_point++)
+ for (const auto& i_point: *input_)
{
pcl::PointXYZRGBA point;
- point.x = *(input_->points[i_point].data);
- point.y = *(input_->points[i_point].data + 1);
- point.z = *(input_->points[i_point].data + 2);
+ point.x = *(i_point.data);
+ point.y = *(i_point.data + 1);
+ point.z = *(i_point.data + 2);
point.r = 255;
point.g = 0;
point.b = 0;
for (auto i_point = i_segment->indices.cbegin (); i_point != i_segment->indices.cend (); i_point++)
{
int index = *i_point;
- colored_cloud->points[index].r = colors[3 * next_color];
- colored_cloud->points[index].g = colors[3 * next_color + 1];
- colored_cloud->points[index].b = colors[3 * next_color + 2];
+ (*colored_cloud)[index].r = colors[3 * next_color];
+ (*colored_cloud)[index].g = colors[3 * next_color + 1];
+ (*colored_cloud)[index].b = colors[3 * next_color + 2];
}
next_color++;
}
if (normal_flag_)
{
// if user forgot to pass normals or the sizes of point and normal cloud are different
- if ( !normals_ || input_->points.size () != normals_->points.size () )
+ if ( !normals_ || input_->size () != normals_->size () )
return (false);
}
std::vector<int> neighbours;
std::vector<float> distances;
- point_neighbours_.resize (input_->points.size (), neighbours);
- point_distances_.resize (input_->points.size (), distances);
+ point_neighbours_.resize (input_->size (), neighbours);
+ point_distances_.resize (input_->size (), distances);
for (int i_point = 0; i_point < point_number; i_point++)
{
{
int point_index = (*indices_)[i_point];
int segment_index = point_labels_[point_index];
- segment_color[segment_index][0] += input_->points[point_index].r;
- segment_color[segment_index][1] += input_->points[point_index].g;
- segment_color[segment_index][2] += input_->points[point_index].b;
+ segment_color[segment_index][0] += (*input_)[point_index].r;
+ segment_color[segment_index][1] += (*input_)[point_index].g;
+ segment_color[segment_index][2] += (*input_)[point_index].b;
}
for (int i_seg = 0; i_seg < number_of_segments_; i_seg++)
{
point_color.resize (3, 0);
std::vector<unsigned int> nghbr_color;
nghbr_color.resize (3, 0);
- point_color[0] = input_->points[point].r;
- point_color[1] = input_->points[point].g;
- point_color[2] = input_->points[point].b;
- nghbr_color[0] = input_->points[nghbr].r;
- nghbr_color[1] = input_->points[nghbr].g;
- nghbr_color[2] = input_->points[nghbr].b;
+ point_color[0] = (*input_)[point].r;
+ point_color[1] = (*input_)[point].g;
+ point_color[2] = (*input_)[point].b;
+ nghbr_color[0] = (*input_)[nghbr].r;
+ nghbr_color[1] = (*input_)[nghbr].g;
+ nghbr_color[2] = (*input_)[nghbr].b;
float difference = calculateColorimetricalDifference (point_color, nghbr_color);
if (difference > color_p2p_threshold_)
return (false);
if (normal_flag_)
{
float data[4];
- data[0] = input_->points[point].data[0];
- data[1] = input_->points[point].data[1];
- data[2] = input_->points[point].data[2];
- data[3] = input_->points[point].data[3];
+ data[0] = (*input_)[point].data[0];
+ data[1] = (*input_)[point].data[1];
+ data[2] = (*input_)[point].data[2];
+ data[3] = (*input_)[point].data[3];
Eigen::Map<Eigen::Vector3f> initial_point (static_cast<float*> (data));
- Eigen::Map<Eigen::Vector3f> initial_normal (static_cast<float*> (normals_->points[point].normal));
+ Eigen::Map<Eigen::Vector3f> initial_normal (static_cast<float*> ((*normals_)[point].normal));
if (smooth_mode_flag_ == true)
{
- Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> (normals_->points[nghbr].normal));
+ Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> ((*normals_)[nghbr].normal));
float dot_product = std::abs (nghbr_normal.dot (initial_normal));
if (dot_product < cosine_threshold)
return (false);
}
else
{
- Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> (normals_->points[nghbr].normal));
- Eigen::Map<Eigen::Vector3f> initial_seed_normal (static_cast<float*> (normals_->points[initial_seed].normal));
+ Eigen::Map<Eigen::Vector3f> nghbr_normal (static_cast<float*> ((*normals_)[nghbr].normal));
+ Eigen::Map<Eigen::Vector3f> initial_seed_normal (static_cast<float*> ((*normals_)[initial_seed].normal));
float dot_product = std::abs (nghbr_normal.dot (initial_seed_normal));
if (dot_product < cosine_threshold)
return (false);
}
// check the curvature if needed
- if (curvature_flag_ && normals_->points[nghbr].curvature > curvature_threshold_)
+ if (curvature_flag_ && (*normals_)[nghbr].curvature > curvature_threshold_)
is_a_seed = false;
// check the residual if needed
if (residual_flag_)
{
float data_p[4];
- data_p[0] = input_->points[point].data[0];
- data_p[1] = input_->points[point].data[1];
- data_p[2] = input_->points[point].data[2];
- data_p[3] = input_->points[point].data[3];
+ data_p[0] = (*input_)[point].data[0];
+ data_p[1] = (*input_)[point].data[1];
+ data_p[2] = (*input_)[point].data[2];
+ data_p[3] = (*input_)[point].data[3];
float data_n[4];
- data_n[0] = input_->points[nghbr].data[0];
- data_n[1] = input_->points[nghbr].data[1];
- data_n[2] = input_->points[nghbr].data[2];
- data_n[3] = input_->points[nghbr].data[3];
+ data_n[0] = (*input_)[nghbr].data[0];
+ data_n[1] = (*input_)[nghbr].data[1];
+ data_n[2] = (*input_)[nghbr].data[2];
+ data_n[3] = (*input_)[nghbr].data[3];
Eigen::Map<Eigen::Vector3f> nghbr_point (static_cast<float*> (data_n));
Eigen::Map<Eigen::Vector3f> initial_point (static_cast<float*> (data_p));
- Eigen::Map<Eigen::Vector3f> initial_normal (static_cast<float*> (normals_->points[point].normal));
+ Eigen::Map<Eigen::Vector3f> initial_normal (static_cast<float*> ((*normals_)[point].normal));
float residual = std::abs (initial_normal.dot (initial_point - nghbr_point));
if (residual > residual_threshold_)
is_a_seed = false;
return (false);
}
// Check if input is synced with the normals
- if (input_->points.size () != normals_->points.size ())
+ if (input_->size () != normals_->size ())
{
PCL_ERROR ("[pcl::%s::initSACModel] The number of points in the input point cloud differs than the number of points in the normals!\n", getClassName ().c_str ());
return (false);
PointIndices &indices_out,
float delta_hue)
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
- PCL_ERROR ("[pcl::seededHueSegmentation] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ PCL_ERROR("[pcl::seededHueSegmentation] Tree built for a different point cloud "
+ "dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
seed_queue.push_back (i);
PointXYZRGB p;
- p = cloud.points[i];
+ p = cloud[i];
PointXYZHSV h;
PointXYZRGBtoXYZHSV(p, h);
continue;
PointXYZRGB p_l;
- p_l = cloud.points[nn_indices[j]];
+ p_l = cloud[nn_indices[j]];
PointXYZHSV h_l;
PointXYZRGBtoXYZHSV(p_l, h_l);
PointIndices &indices_out,
float delta_hue)
{
- if (tree->getInputCloud ()->points.size () != cloud.points.size ())
+ if (tree->getInputCloud ()->size () != cloud.size ())
{
- PCL_ERROR ("[pcl::seededHueSegmentation] Tree built for a different point cloud dataset (%lu) than the input cloud (%lu)!\n", tree->getInputCloud ()->points.size (), cloud.points.size ());
+ PCL_ERROR("[pcl::seededHueSegmentation] Tree built for a different point cloud "
+ "dataset (%zu) than the input cloud (%zu)!\n",
+ static_cast<std::size_t>(tree->getInputCloud()->size()),
+ static_cast<std::size_t>(cloud.size()));
return;
}
// Create a bool vector of processed point indices, and initialize it to false
- std::vector<bool> processed (cloud.points.size (), false);
+ std::vector<bool> processed (cloud.size (), false);
std::vector<int> nn_indices;
std::vector<float> nn_distances;
seed_queue.push_back (i);
PointXYZRGB p;
- p = cloud.points[i];
+ p = cloud[i];
PointXYZHSV h;
PointXYZRGBtoXYZHSV(p, h);
continue;
PointXYZRGB p_l;
- p_l = cloud.points[nn_indices[j]];
+ p_l = cloud[nn_indices[j]];
PointXYZHSV h_l;
PointXYZRGBtoXYZHSV(p_l, h_l);
std::vector<int> src_indices;
// Iterate through the source data set
- for (int i = 0; i < static_cast<int> (src.points.size ()); ++i)
+ for (int i = 0; i < static_cast<int> (src.size ()); ++i)
{
// Ignore invalid points in the inpout cloud
- if (!isFinite (src.points[i]))
+ if (!isFinite (src[i]))
continue;
// Search for the closest point in the target data set (number of neighbors to find = 1)
- if (!tree->nearestKSearch (src.points[i], 1, nn_indices, nn_distances))
+ if (!tree->nearestKSearch (src[i], 1, nn_indices, nn_distances))
{
- PCL_WARN ("No neighbor found for point %lu (%f %f %f)!\n", i, src.points[i].x, src.points[i].y, src.points[i].z);
+ PCL_WARN ("No neighbor found for point %lu (%f %f %f)!\n", i, src[i].x, src[i].y, src[i].z);
continue;
}
// Add points without a corresponding point in the target cloud to the output cloud
}
//Compute normal
pcl::computePointNormal (*voxel_centroid_cloud_, indices, new_voxel_data.normal_, new_voxel_data.curvature_);
- pcl::flipNormalTowardsViewpoint (voxel_centroid_cloud_->points[new_voxel_data.idx_], 0.0f,0.0f,0.0f, new_voxel_data.normal_);
+ pcl::flipNormalTowardsViewpoint ((*voxel_centroid_cloud_)[new_voxel_data.idx_], 0.0f,0.0f,0.0f, new_voxel_data.normal_);
new_voxel_data.normal_[3] = 0.0f;
new_voxel_data.normal_.normalize ();
new_voxel_data.owner_ = nullptr;
}
//Compute normal
pcl::computePointNormal (*parent_->voxel_centroid_cloud_, indices, voxel_data.normal_, voxel_data.curvature_);
- pcl::flipNormalTowardsViewpoint (parent_->voxel_centroid_cloud_->points[voxel_data.idx_], 0.0f,0.0f,0.0f, voxel_data.normal_);
+ pcl::flipNormalTowardsViewpoint ((*parent_->voxel_centroid_cloud_)[voxel_data.idx_], 0.0f,0.0f,0.0f, voxel_data.normal_);
voxel_data.normal_[3] = 0.0f;
voxel_data.normal_.normalize ();
}
#define PCL_UNARY_CLASSIFIER_HPP_
#include <Eigen/Core>
-#include <flann/algorithms/center_chooser.h>
-#include <flann/util/matrix.h>
+#include <flann/flann.hpp> // for flann::Index
+#include <flann/algorithms/dist.h> // for flann::ChiSquareDistance
+#include <flann/algorithms/linear_index.h> // for flann::LinearIndexParams
+#include <flann/util/matrix.h> // for flann::Matrix
#include <pcl/segmentation/unary_classifier.h>
#include <pcl/common/io.h>
pcl::PointCloud<pcl::PointXYZ>::Ptr out)
{
// resize points of output cloud
- out->points.resize (in->points.size ());
- out->width = static_cast<int> (out->points.size ());
+ out->points.resize (in->size ());
+ out->width = out->size ();
out->height = 1;
out->is_dense = false;
- for (std::size_t i = 0; i < in->points.size (); i++)
+ for (std::size_t i = 0; i < in->size (); i++)
{
pcl::PointXYZ point;
// fill X Y Z
- point.x = in->points[i].x;
- point.y = in->points[i].y;
- point.z = in->points[i].z;
- out->points[i] = point;
+ point.x = (*in)[i].x;
+ point.y = (*in)[i].y;
+ point.z = (*in)[i].z;
+ (*out)[i] = point;
}
}
// TODO:: check if input cloud has RGBA information and insert into the cloud
// resize points of output cloud
- out->points.resize (in->points.size ());
- out->width = static_cast<int> (out->points.size ());
+ out->points.resize (in->size ());
+ out->width = out->size ();
out->height = 1;
out->is_dense = false;
- for (std::size_t i = 0; i < in->points.size (); i++)
+ for (std::size_t i = 0; i < in->size (); i++)
{
pcl::PointXYZRGBL point;
// X Y Z R G B L
- point.x = in->points[i].x;
- point.y = in->points[i].y;
- point.z = in->points[i].z;
- //point.rgba = in->points[i].rgba;
+ point.x = (*in)[i].x;
+ point.y = (*in)[i].y;
+ point.z = (*in)[i].z;
+ //point.rgba = (*in)[i].rgba;
point.label = 1;
- out->points[i] = point;
+ (*out)[i] = point;
}
}
if (label_idx != -1)
{
- for (std::size_t i = 0; i < in->points.size (); i++)
+ for (const auto& point: *in)
{
// get the 'label' field
std::uint32_t label;
- memcpy (&label, reinterpret_cast<char*> (&in->points[i]) + fields[label_idx].offset, sizeof(std::uint32_t));
+ memcpy (&label, reinterpret_cast<const char*> (&point) + fields[label_idx].offset, sizeof(std::uint32_t));
// check if label exist
bool exist = false;
if (label_idx != -1)
{
- for (std::size_t i = 0; i < in->points.size (); i++)
+ for (std::size_t i = 0; i < in->size (); i++)
{
// get the 'label' field
std::uint32_t label;
- memcpy (&label, reinterpret_cast<char*> (&in->points[i]) + fields[label_idx].offset, sizeof(std::uint32_t));
+ memcpy (&label, reinterpret_cast<char*> (&(*in)[i]) + fields[label_idx].offset, sizeof(std::uint32_t));
if (static_cast<int> (label) == label_num)
{
pcl::PointXYZ point;
// X Y Z
- point.x = in->points[i].x;
- point.y = in->points[i].y;
- point.z = in->points[i].z;
+ point.x = (*in)[i].x;
+ point.y = (*in)[i].y;
+ point.z = (*in)[i].z;
out->points.push_back (point);
}
}
- out->width = static_cast<int> (out->points.size ());
+ out->width = out->size ();
out->height = 1;
out->is_dense = false;
}
pcl::PointCloud<pcl::FPFHSignature33>::Ptr out,
int k)
{
- pcl::Kmeans kmeans (static_cast<int> (in->points.size ()), 33);
+ pcl::Kmeans kmeans (static_cast<int> (in->size ()), 33);
kmeans.setClusterSize (k);
// add points to the clustering
pcl::Kmeans::Centroids centroids = kmeans.get_centroids ();
// initialize output cloud
- out->width = static_cast<int> (centroids.size ());
+ out->width = centroids.size ();
out->height = 1;
out->is_dense = false;
out->points.resize (static_cast<int> (centroids.size ()));
pcl::FPFHSignature33 point;
for (int idx = 0; idx < 33; idx++)
point.histogram[idx] = centroids[i][idx];
- out->points[i] = point;
+ (*out)[i] = point;
}
}
// estimate the total number of row's needed
int n_row = 0;
for (const auto &trained_feature : trained_features)
- n_row += static_cast<int> (trained_feature->points.size ());
+ n_row += static_cast<int> (trained_feature->size ());
// Convert data into FLANN format
int n_col = 33;
for (std::size_t k = 0; k < trained_features.size (); k++)
{
pcl::PointCloud<pcl::FPFHSignature33>::Ptr hist = trained_features[k];
- std::size_t c = hist->points.size ();
+ const auto c = hist->size ();
for (std::size_t i = 0; i < c; ++i)
for (std::size_t j = 0; j < data.cols; ++j)
- data[(k * c) + i][j] = hist->points[i].histogram[j];
+ data[(k * c) + i][j] = (*hist)[i].histogram[j];
}
// build kd-tree given the training features
index->buildIndex ();
int k = 1;
- indi.resize (query_features->points.size ());
- dist.resize (query_features->points.size ());
+ indi.resize (query_features->size ());
+ dist.resize (query_features->size ());
// Query all points
- for (std::size_t i = 0; i < query_features->points.size (); i++)
+ for (std::size_t i = 0; i < query_features->size (); i++)
{
// Query point
flann::Matrix<float> p = flann::Matrix<float>(new float[n_col], 1, n_col);
- memcpy (&p.ptr ()[0], query_features->points[i].histogram, p.cols * p.rows * sizeof (float));
+ memcpy (&p.ptr ()[0], (*query_features)[i].histogram, p.cols * p.rows * sizeof (float));
flann::Matrix<int> indices (new int[k], 1, k);
flann::Matrix<float> distances (new float[k], 1, k);
{
float nfm = static_cast<float> (n_feature_means);
- for (std::size_t i = 0; i < out->points.size (); i++)
+ for (std::size_t i = 0; i < out->size (); i++)
{
if (dist[i] < feature_threshold)
{
std::modf (l , &intpart);
int label = static_cast<int> (intpart);
- out->points[i].label = label+2;
+ (*out)[i].label = label+2;
}
}
}
pcl::PointCloud<pcl::FPFHSignature33>::Ptr feature (new pcl::PointCloud<pcl::FPFHSignature33>);
computeFPFH (tmp_cloud, feature, normal_radius_search_, fpfh_radius_search_);
- //PCL_INFO ("Number of input cloud features: %d\n", static_cast<int> (feature->points.size ()));
+ //PCL_INFO ("Number of input cloud features: %d\n", static_cast<int> (feature->size ()));
// use k-means to cluster the features
kmeansClustering (feature, output, cluster_size_);
queryFeatureDistances (trained_features_, input_cloud_features, indices, distance);
// assign a label to each point of the input point cloud
- int n_feature_means = static_cast<int> (trained_features_[0]->points.size ());
+ const auto n_feature_means = trained_features_[0]->size ();
convertCloud (input_cloud_, out);
assignLabels (indices, distance, n_feature_means, feature_threshold_, out);
//std::cout << "Assign labels - DONE" << std::endl;
PointCloudLPtr& labels,
std::vector<pcl::PointIndices>& label_indices)
{
- pcl::utils::ignore(centroids);
- pcl::utils::ignore(covariances);
+ pcl::utils::ignore(centroids, covariances);
refine(model_coefficients, inlier_indices, labels, label_indices);
}
float threshold = distance_threshold_;
if (depth_dependent_)
{
- Eigen::Vector3f vec = input_->points[idx1].getVector3fMap ();
+ Eigen::Vector3f vec = (*input_)[idx1].getVector3fMap ();
float z = vec.dot (z_axis_);
threshold *= z * z;
}
return ( (std::fabs ((*plane_coeff_d_)[idx1] - (*plane_coeff_d_)[idx2]) < threshold)
- && (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ ) );
+ && ((*normals_)[idx1].getNormalVector3fMap ().dot ((*normals_)[idx2].getNormalVector3fMap () ) > angular_threshold_ ) );
}
protected:
bool
compare (int idx1, int idx2) const override
{
- int current_label = labels_->points[idx1].label;
- int next_label = labels_->points[idx2].label;
+ int current_label = (*labels_)[idx1].label;
+ int next_label = (*labels_)[idx2].label;
if (!((*refine_labels_)[current_label] && !(*refine_labels_)[next_label]))
return (false);
const pcl::ModelCoefficients& model_coeff = (*models_)[(*label_to_model_)[current_label]];
- PointT pt = input_->points[idx2];
+ PointT pt = (*input_)[idx2];
double ptp_dist = std::fabs (model_coeff.values[0] * pt.x +
model_coeff.values[1] * pt.y +
model_coeff.values[2] * pt.z +
if (depth_dependent_)
{
//Eigen::Vector4f origin = input_->sensor_origin_;
- Eigen::Vector3f vec = input_->points[idx1].getVector3fMap ();// - origin.head<3> ();
+ Eigen::Vector3f vec = (*input_)[idx1].getVector3fMap ();// - origin.head<3> ();
float z = vec.dot (z_axis_);
threshold *= z * z;
bool
compare (int idx1, int idx2) const override
{
- float dx = input_->points[idx1].x - input_->points[idx2].x;
- float dy = input_->points[idx1].y - input_->points[idx2].y;
- float dz = input_->points[idx1].z - input_->points[idx2].z;
+ float dx = (*input_)[idx1].x - (*input_)[idx2].x;
+ float dy = (*input_)[idx1].y - (*input_)[idx2].y;
+ float dz = (*input_)[idx1].z - (*input_)[idx2].z;
float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
- int dr = input_->points[idx1].r - input_->points[idx2].r;
- int dg = input_->points[idx1].g - input_->points[idx2].g;
- int db = input_->points[idx1].b - input_->points[idx2].b;
+ int dr = (*input_)[idx1].r - (*input_)[idx2].r;
+ int dg = (*input_)[idx1].g - (*input_)[idx2].g;
+ int db = (*input_)[idx1].b - (*input_)[idx2].b;
//Note: This is not the best metric for color comparisons, we should probably use HSV space.
float color_dist = static_cast<float> (dr*dr + dg*dg + db*db);
return ( (dist < distance_threshold_)
- && (normals_->points[idx1].getNormalVector3fMap ().dot (normals_->points[idx2].getNormalVector3fMap () ) > angular_threshold_ )
+ && ((*normals_)[idx1].getNormalVector3fMap ().dot ((*normals_)[idx2].getNormalVector3fMap () ) > angular_threshold_ )
&& (color_dist < color_threshold_));
}
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#include <pcl/segmentation/extract_clusters.h>
#include <pcl/segmentation/impl/extract_clusters.hpp>
-#include <pcl/segmentation/extract_labeled_clusters.h>
#include <pcl/segmentation/impl/extract_labeled_clusters.hpp>
// Instantiations of specific point types
#include <pcl/segmentation/grabcut_segmentation.h>
-#include <cstdlib>
#include <cassert>
#include <vector>
#include <map>
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#include <pcl/segmentation/organized_multi_plane_segmentation.h>
#include <pcl/segmentation/impl/organized_multi_plane_segmentation.hpp>
PCL_INSTANTIATE_PRODUCT(OrganizedMultiPlaneSegmentation, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES)((pcl::Label)))
*
*/
-#include <flann/flann.hpp>
-
#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
-#include <pcl/segmentation/unary_classifier.h>
#include <pcl/segmentation/impl/unary_classifier.hpp>
// Instantiations of specific point types
set(build FALSE)
find_package(OpenGL)
-find_package(GLEW)
+if(APPLE)
+ # homebrew's FindGLEW module is not in good shape
+ find_package(glew CONFIG)
+ELSE()
+ find_package(GLEW)
+ENDIF()
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" OFF)
PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} EXT_DEPS opengl glew)
float depth_val = texture2D(DepthSampler, TexCoord0.st).r;
float ref_meters = 1.0 / (reference_depth_val * (1.0/f - 1.0/n) + 1.0/n);
float depth_meters = 1.0 / (depth_val * (1.0/f - 1.0/n) + 1.0/n);
- float min_dist = std::abs(ref_meters - depth_meters);
+ float min_dist = abs(ref_meters - depth_meters);
float likelihood = texture2D(CostSampler, vec2(clamp(min_dist/3.0, 0.0, 1.0),0.0)).r;
float ratio = 0.99;
char*
readTextFile(const char* filename)
{
- using namespace std;
char* buf = nullptr;
- ifstream file;
- file.open(filename, ios::in | ios::binary | ios::ate);
+ std::ifstream file;
+ file.open(filename, std::ios::in | std::ios::binary | std::ios::ate);
if (file.is_open()) {
- ifstream::pos_type size;
+ std::ifstream::pos_type size;
size = file.tellg();
- buf = new char[size + static_cast<ifstream::pos_type>(1)];
- file.seekg(0, ios::beg);
+ buf = new char[size + static_cast<std::ifstream::pos_type>(1)];
+ file.seekg(0, std::ios::beg);
file.read(buf, size);
file.close();
buf[size] = 0;
PCL_DEBUG("RGB Triangle mesh: ");
PCL_DEBUG("Mesh polygons: %ld", plg->polygons.size());
- PCL_DEBUG("Mesh points: %ld", newcloud.points.size());
+ PCL_DEBUG("Mesh points: %zu", static_cast<std::size_t>(newcloud.size()));
Eigen::Vector4f tmp;
for (const auto& polygon : plg->polygons) {
for (const unsigned int& point : polygon.vertices) {
- tmp = newcloud.points[point].getVector4fMap();
+ tmp = newcloud[point].getVector4fMap();
vertices.push_back(Vertex(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
- Eigen::Vector3f(newcloud.points[point].r / 255.0f,
- newcloud.points[point].g / 255.0f,
- newcloud.points[point].b / 255.0f)));
+ Eigen::Vector3f(newcloud[point].r / 255.0f,
+ newcloud[point].g / 255.0f,
+ newcloud[point].b / 255.0f)));
indices.push_back(indices.size());
}
}
Eigen::Vector4f tmp;
for (const auto& polygon : plg->polygons) {
for (const unsigned int& point : polygon.vertices) {
- tmp = newcloud.points[point].getVector4fMap();
+ tmp = newcloud[point].getVector4fMap();
vertices.push_back(Vertex(Eigen::Vector3f(tmp(0), tmp(1), tmp(2)),
Eigen::Vector3f(1.0, 1.0, 1.0)));
indices.push_back(indices.size());
for (std::size_t j = 0; j < apoly_in.vertices.size(); j++) { // each point
std::uint32_t pt = apoly_in.vertices[j];
- tmp = newcloud.points[pt].getVector4fMap();
+ tmp = newcloud[pt].getVector4fMap();
// x,y,z
apoly.vertices_[3 * j + 0] = tmp(0);
apoly.vertices_[3 * j + 1] = tmp(1);
apoly.vertices_[3 * j + 2] = tmp(2);
// r,g,b: input is ints 0->255, opengl wants floats 0->1
- apoly.colors_[4 * j + 0] = newcloud.points[pt].r / 255.0f; // Red
- apoly.colors_[4 * j + 1] = newcloud.points[pt].g / 255.0f; // Green
- apoly.colors_[4 * j + 2] = newcloud.points[pt].b / 255.0f; // Blue
+ apoly.colors_[4 * j + 0] = newcloud[pt].r / 255.0f; // Red
+ apoly.colors_[4 * j + 1] = newcloud[pt].g / 255.0f; // Green
+ apoly.colors_[4 * j + 2] = newcloud[pt].b / 255.0f; // Blue
apoly.colors_[4 * j + 3] = 1.0f; // transparency? unnecessary?
}
polygons.push_back(apoly);
for (std::size_t j = 0; j < apoly_in.vertices.size(); j++) { // each point
std::uint32_t pt = apoly_in.vertices[j];
- tmp = newcloud.points[pt].getVector4fMap();
+ tmp = newcloud[pt].getVector4fMap();
// x,y,z
apoly.vertices_[3 * j + 0] = tmp(0);
apoly.vertices_[3 * j + 1] = tmp(1);
GLenum mode, pcl::PointCloud<pcl::PointXYZRGB>::Ptr pc)
: mode_(mode)
{
- nvertices_ = pc->points.size();
+ nvertices_ = pc->size();
vertices_ = new float[3 * nvertices_];
colors_ = new float[4 * nvertices_];
- for (std::size_t i = 0; i < pc->points.size(); ++i) {
- vertices_[3 * i + 0] = pc->points[i].x;
- vertices_[3 * i + 1] = pc->points[i].y;
- vertices_[3 * i + 2] = pc->points[i].z;
+ for (std::size_t i = 0; i < pc->size(); ++i) {
+ vertices_[3 * i + 0] = (*pc)[i].x;
+ vertices_[3 * i + 1] = (*pc)[i].y;
+ vertices_[3 * i + 2] = (*pc)[i].z;
- colors_[4 * i + 0] = pc->points[i].r / 255.0f;
- colors_[4 * i + 1] = pc->points[i].g / 255.0f;
- colors_[4 * i + 2] = pc->points[i].b / 255.0f;
+ colors_[4 * i + 0] = (*pc)[i].r / 255.0f;
+ colors_[4 * i + 1] = (*pc)[i].g / 255.0f;
+ colors_[4 * i + 2] = (*pc)[i].b / 255.0f;
colors_[4 * i + 3] = 1.0;
}
}
//#define SIMULATION_DEBUG 1
#define DO_TIMING_PROFILE 0
-using namespace std;
-
// 301 values, 0.0 uniform 1.0 normal. properly truncated/normalized
float normal_sigma0x5_normal1x0_range0to3_step0x01[] = {
1.59576912f, 1.59545000f, 1.59449302f, 1.59289932f, 1.59067083f, 1.58781019f,
// timestamps and displays the elapsed time between them as
// a fraction and time used [for profiling]
void
-display_tic_toc(vector<double>& tic_toc, const string& fun_name)
+display_tic_toc(std::vector<double>& tic_toc, const std::string& fun_name)
{
std::size_t tic_toc_size = tic_toc.size();
// screen, The Z-buffer is natively -1 (far) to 1 (near). But in this class we
// invert this to be 0 (near, 0.7m) and 1 (far, 20m), so by negating y we get to
// a right-hand computer vision system which is also used by PCL and OpenNi.
- pc->points[idx].z = z;
- pc->points[idx].x =
+ (*pc)[idx].z = z;
+ (*pc)[idx].x =
(static_cast<float>(x) - camera_cx_) * z * (-camera_fx_reciprocal_);
- pc->points[idx].y =
+ (*pc)[idx].y =
(static_cast<float>(y) - camera_cy_) * z * (-camera_fy_reciprocal_);
- int rgb_idx = y * col_width_ + x; // camera_width_
- pc->points[idx].b = color_buffer[rgb_idx * 3 + 2]; // blue
- pc->points[idx].g = color_buffer[rgb_idx * 3 + 1]; // green
- pc->points[idx].r = color_buffer[rgb_idx * 3]; // red
+ int rgb_idx = y * col_width_ + x; // camera_width_
+ (*pc)[idx].b = color_buffer[rgb_idx * 3 + 2]; // blue
+ (*pc)[idx].g = color_buffer[rgb_idx * 3 + 1]; // green
+ (*pc)[idx].r = color_buffer[rgb_idx * 3]; // red
points_added++;
}
else if (organized) {
pc->is_dense = false;
- pc->points[idx].z = std::numeric_limits<float>::quiet_NaN();
- pc->points[idx].x = std::numeric_limits<float>::quiet_NaN();
- pc->points[idx].y = std::numeric_limits<float>::quiet_NaN();
- pc->points[idx].rgba = 0;
+ (*pc)[idx].z = std::numeric_limits<float>::quiet_NaN();
+ (*pc)[idx].x = std::numeric_limits<float>::quiet_NaN();
+ (*pc)[idx].y = std::numeric_limits<float>::quiet_NaN();
+ (*pc)[idx].rgba = 0;
}
}
}
if (!organized) {
- pc->width = 1;
- pc->height = points_added;
+ pc->height = 1;
+ pc->width = points_added;
pc->points.resize(points_added);
}
set(SUBSYS_NAME tools)
-find_package(GLEW)
find_package(GLUT)
if(NOT (GLEW_FOUND AND GLUT_FOUND))
return()
endif()
+if(APPLE)
+ # GLUT::GLUT is not properly set on macos
+ set(_glut_target ${GLUT_glut_LIBRARY})
+else()
+ set(_glut_target GLUT::GLUT)
+endif()
+
include_directories(SYSTEM ${VTK_INCLUDE_DIRS})
PCL_ADD_EXECUTABLE(pcl_sim_viewer COMPONENT ${SUBSYS_NAME} SOURCES sim_viewer.cpp)
target_link_libraries (pcl_sim_viewer
${VTK_IO_TARGET_LINK_LIBRARIES} pcl_kdtree
pcl_simulation pcl_common pcl_io pcl_visualization
- GLEW::GLEW GLUT::GLUT ${OPENGL_LIBRARIES})
+ GLEW::GLEW ${_glut_target} ${OPENGL_LIBRARIES})
PCL_ADD_EXECUTABLE(pcl_sim_test_simple COMPONENT ${SUBSYS_NAME} SOURCES sim_test_simple.cpp)
target_link_libraries (pcl_sim_test_simple
${VTK_IO_TARGET_LINK_LIBRARIES}
pcl_simulation pcl_common pcl_io pcl_visualization
- GLEW::GLEW GLUT::GLUT ${OPENGL_LIBRARIES})
+ GLEW::GLEW ${_glut_target} ${OPENGL_LIBRARIES})
PCL_ADD_EXECUTABLE(pcl_sim_test_performance COMPONENT ${SUBSYS_NAME} SOURCES sim_test_performance.cpp)
target_link_libraries (pcl_sim_test_performance
${VTK_IO_TARGET_LINK_LIBRARIES}
pcl_simulation pcl_common pcl_io pcl_visualization
- GLEW::GLEW GLUT::GLUT ${OPENGL_LIBRARIES})
+ GLEW::GLEW ${_glut_target} ${OPENGL_LIBRARIES})
set(srcs simulation_io.cpp)
set(incs simulation_io.hpp)
${VTK_IO_TARGET_LINK_LIBRARIES}
${OPENNI_INCLUDES})
target_link_libraries(${LIB_NAME} pcl_simulation pcl_common pcl_io
- ${VTK_IO_TARGET_LINK_LIBRARIES} ${OPENGL_LIBRARIES} GLUT::GLUT)
+ ${VTK_IO_TARGET_LINK_LIBRARIES} ${OPENGL_LIBRARIES} ${_glut_target} GLEW::GLEW)
PCL_ADD_EXECUTABLE(pcl_sim_terminal_demo COMPONENT ${SUBSYS_NAME} SOURCES sim_terminal_demo.cpp)
target_link_libraries (pcl_sim_terminal_demo
${VTK_IO_TARGET_LINK_LIBRARIES}
pcl_simulation pcl_common pcl_io pcl_visualization pcl_simulation_io
- GLEW::GLEW GLUT::GLUT ${OPENGL_LIBRARIES})
+ GLEW::GLEW ${_glut_target} ${OPENGL_LIBRARIES})
* pcl_sim_terminal_demo 2 ../../../../kmcl/models/table_models/meta_model.ply
*/
+#include <pcl/common/time.h> // for getTime
#include <pcl/memory.h>
#include <Eigen/Dense>
using namespace pcl::console;
using namespace pcl::io;
using namespace pcl::simulation;
-using namespace std;
SimExample::Ptr simexample;
// Output the simulated output to file:
void
-write_sim_output(const string& fname_root)
+write_sim_output(const std::string& fname_root)
{
pcl::PointCloud<pcl::PointXYZRGB>::Ptr pc_out(new pcl::PointCloud<pcl::PointXYZRGB>);
// TODO: what to do when there are more than one simulated view?
if (!pc_out->points.empty()) {
- std::cout << pc_out->points.size() << " points written to file\n";
+ std::cout << pc_out->size() << " points written to file\n";
pcl::PCDWriter writer;
// writer.write ( string (fname_root + ".pcd"), *pc_out, false); /// ASCII
- writer.writeBinary(string(fname_root + ".pcd"), *pc_out);
+ writer.writeBinary(std::string(fname_root + ".pcd"), *pc_out);
// std::cout << "finished writing file\n";
}
else {
- std::cout << pc_out->points.size() << " points in cloud, not written\n";
+ std::cout << pc_out->size() << " points in cloud, not written\n";
}
// simexample->write_score_image (simexample->rl_->getScoreBuffer (),
// string (fname_root + "_score.png") );
simexample->write_rgb_image(simexample->rl_->getColorBuffer(),
- string(fname_root + "_rgb.png"));
+ std::string(fname_root + "_rgb.png"));
simexample->write_depth_image(simexample->rl_->getDepthBuffer(),
- string(fname_root + "_depth.png"));
+ std::string(fname_root + "_depth.png"));
// simexample->write_depth_image_uint (simexample->rl_->getDepthBuffer (),
// string (fname_root + "_depth_uint.png") );
using namespace pcl::console;
using namespace pcl::io;
using namespace pcl::simulation;
-using namespace std;
std::uint16_t t_gamma[2048];
using namespace pcl::io;
using namespace pcl::simulation;
-using namespace std;
using namespace std::chrono_literals;
std::uint16_t t_gamma[2048];
*/
#include <pcl/common/common.h>
+#include <pcl/common/time.h> // for getTime
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
#include <pcl/console/print.h>
using namespace pcl::io;
using namespace pcl::simulation;
-using namespace std;
-
using ColorHandler = pcl::visualization::PointCloudColorHandler<pcl::PCLPointCloud2>;
using ColorHandlerPtr = ColorHandler::Ptr;
using ColorHandlerConstPtr = ColorHandler::ConstPtr;
if (is_color_) {
pcl::common::IntensityFieldAccessor<PointT> intensity_accessor;
intensity_accessor.set(new_point,
- static_cast<float>(image_->points[disparity_point].r +
- image_->points[disparity_point].g +
- image_->points[disparity_point].b) /
+ static_cast<float>((*image_)[disparity_point].r +
+ (*image_)[disparity_point].g +
+ (*image_)[disparity_point].b) /
3.0f);
}
#pragma once
-#include <pcl/conversions.h>
+#include <pcl/point_cloud.h> // for PointCloud
#include <pcl/point_types.h>
#include <algorithm>
PointXYZ point_3D = translateCoordinates(row, column, disparity);
float height = point_3D.y;
- RGB point_RGB = image_->points[column + row * disparity_map_width_];
+ RGB point_RGB = (*image_)[column + row * disparity_map_width_];
float intensity =
static_cast<float>((point_RGB.r + point_RGB.g + point_RGB.b) / 3);
*/
#include <pcl/stereo/stereo_grabber.h>
-#include <pcl/point_cloud.h>
+#include <pcl/PCLPointCloud2.h> // for PCLPointCloud2
#include <pcl/point_types.h>
///////////////////////////////////////////////////////////////////////////////////////////
#include "pcl/stereo/stereo_matching.h"
+#include <pcl/console/print.h> // for PCL_ERROR
+
//////////////////////////////////////////////////////////////////////////////
pcl::StereoMatching::StereoMatching()
{
... /* printf() style ags */
);
-ON_DECL
+PCL_EXPORTS ON_DECL
void ON_ErrorEx( const char*, // sFileName: __FILE__ will do fine
int, // line number: __LINE__ will do fine
const char*, // sFunctionName: __FUNCTION__ will do fine
void upSample( BSplineElements& high ) const;
void differentiate( BSplineElements< Degree-1 >& d ) const;
- void print( FILE* fp=stdout ) const
+ void print( FILE* ) const
{
for( int i=0 ; i<this->size() ; i++ )
{
// <=> i > r - 1 - 0.5 * Degree
// i - 0.5 * Degree < r
// <=> i < r + 0.5 * Degree
- template< int Degree > inline bool LeftOverlap( unsigned int depth , int offset )
+ template< int Degree > inline bool LeftOverlap( unsigned int, int offset )
{
offset <<= 1;
if( Degree & 1 ) return (offset < 1+Degree) && (offset > -1-Degree );
else return (offset < Degree) && (offset > -2-Degree );
}
- template< int Degree > inline bool RightOverlap( unsigned int depth , int offset )
+ template< int Degree > inline bool RightOverlap( unsigned int, int offset )
{
offset <<= 1;
- int r = 1<<(depth+1);
if( Degree & 1 ) return (offset > 2-1-Degree) && (offset < 2+1+Degree );
else return (offset > 2-2-Degree) && (offset < 2+ Degree );
}
- template< int Degree > inline int ReflectLeft( unsigned int depth , int offset )
+ template< int Degree > inline int ReflectLeft( unsigned int, int offset )
{
if( Degree&1 ) return -offset;
else return -1-offset;
if( set ) _addRight( offset+2*res , boundary );
}
template< int Degree >
- void BSplineElements< Degree >::upSample( BSplineElements< Degree >& high ) const
+ void BSplineElements< Degree >::upSample( BSplineElements< Degree >&) const
{
POISSON_THROW_EXCEPTION (pcl::poisson::PoissonBadArgumentException, "B-spline up-sampling not supported for degree " << Degree);
}
template<>
- void BSplineElements< 1 >::upSample( BSplineElements< 1 >& high ) const;
+ void PCL_EXPORTS BSplineElements< 1 >::upSample( BSplineElements< 1 >& high ) const;
template<>
- void BSplineElements< 2 >::upSample( BSplineElements< 2 >& high ) const;
+ void PCL_EXPORTS BSplineElements< 2 >::upSample( BSplineElements< 2 >& high ) const;
template< int Degree >
void BSplineElements< Degree >::differentiate( BSplineElements< Degree-1 >& d ) const
while (cnt != input_->size ())
{
Point3D< Real > p;
- p[0] = input_->points[cnt].x;
- p[1] = input_->points[cnt].y;
- p[2] = input_->points[cnt].z;
+ p[0] = (*input_)[cnt].x;
+ p[1] = (*input_)[cnt].y;
+ p[2] = (*input_)[cnt].z;
for (i = 0; i < DIMENSION; i++)
{
cnt = 0;
while (cnt != input_->size ())
{
- position[0] = input_->points[cnt].x;
- position[1] = input_->points[cnt].y;
- position[2] = input_->points[cnt].z;
- normal[0] = input_->points[cnt].normal_x;
- normal[1] = input_->points[cnt].normal_y;
- normal[2] = input_->points[cnt].normal_z;
+ position[0] = (*input_)[cnt].x;
+ position[1] = (*input_)[cnt].y;
+ position[2] = (*input_)[cnt].z;
+ normal[0] = (*input_)[cnt].normal_x;
+ normal[1] = (*input_)[cnt].normal_y;
+ normal[2] = (*input_)[cnt].normal_z;
for( i=0 ; i<DIMENSION ; i++ ) position[i] = ( position[i]-center[i] ) / scale;
myCenter[0] = myCenter[1] = myCenter[2] = Real(0.5);
cnt=0;
while (cnt != input_->size ())
{
- position[0] = input_->points[cnt].x;
- position[1] = input_->points[cnt].y;
- position[2] = input_->points[cnt].z;
- normal[0] = input_->points[cnt].normal_x;
- normal[1] = input_->points[cnt].normal_y;
- normal[2] = input_->points[cnt].normal_z;
+ position[0] = (*input_)[cnt].x;
+ position[1] = (*input_)[cnt].y;
+ position[2] = (*input_)[cnt].z;
+ normal[0] = (*input_)[cnt].normal_x;
+ normal[1] = (*input_)[cnt].normal_y;
+ normal[2] = (*input_)[cnt].normal_z;
cnt ++;
for( i=0 ; i<DIMENSION ; i++ ) position[i] = ( position[i]-center[i] ) / scale;
myCenter[0] = myCenter[1] = myCenter[2] = Real(0.5);
// PCL includes
#include <pcl/surface/reconstruction.h>
-#include <pcl/surface/boost.h>
-#include <pcl/conversions.h>
#include <pcl/kdtree/kdtree.h>
-#include <pcl/PolygonMesh.h>
#include <fstream>
-#include <iostream>
namespace pcl
{
+ struct PolygonMesh;
+
/** \brief Returns if a point X is visible from point R (or the origin)
* when taking into account the segment between the points S1 and S2
* \param X 2D coordinate of the point
static_cast<Eigen::MatrixXf::Index> (y - y_w + window_size_));
Eigen::VectorXf::Index d_color = static_cast<Eigen::VectorXf::Index> (
- std::abs (input_->points[y_w * input_->width + x_w].r - input_->points[y * input_->width + x].r) +
- std::abs (input_->points[y_w * input_->width + x_w].g - input_->points[y * input_->width + x].g) +
- std::abs (input_->points[y_w * input_->width + x_w].b - input_->points[y * input_->width + x].b));
+ std::abs ((*input_)[y_w * input_->width + x_w].r - (*input_)[y * input_->width + x].r) +
+ std::abs ((*input_)[y_w * input_->width + x_w].g - (*input_)[y * input_->width + x].g) +
+ std::abs ((*input_)[y_w * input_->width + x_w].b - (*input_)[y * input_->width + x].b));
float val_exp_rgb = val_exp_rgb_vector (d_color);
- if (std::isfinite (input_->points[y_w*input_->width + x_w].z))
+ if (std::isfinite ((*input_)[y_w*input_->width + x_w].z))
{
- sum += val_exp_depth * val_exp_rgb * input_->points[y_w*input_->width + x_w].z;
+ sum += val_exp_depth * val_exp_rgb * (*input_)[y_w*input_->width + x_w].z;
norm_sum += val_exp_depth * val_exp_rgb;
}
}
- output.points[y*input_->width + x].r = input_->points[y*input_->width + x].r;
- output.points[y*input_->width + x].g = input_->points[y*input_->width + x].g;
- output.points[y*input_->width + x].b = input_->points[y*input_->width + x].b;
+ output[y*input_->width + x].r = (*input_)[y*input_->width + x].r;
+ output[y*input_->width + x].g = (*input_)[y*input_->width + x].g;
+ output[y*input_->width + x].b = (*input_)[y*input_->width + x].b;
if (norm_sum != 0.0f)
{
float depth = sum / norm_sum;
Eigen::Vector3f pc (static_cast<float> (x) * depth, static_cast<float> (y) * depth, depth);
Eigen::Vector3f pw (unprojection_matrix_ * pc);
- output.points[y*input_->width + x].x = pw[0];
- output.points[y*input_->width + x].y = pw[1];
- output.points[y*input_->width + x].z = pw[2];
+ output[y*input_->width + x].x = pw[0];
+ output[y*input_->width + x].y = pw[1];
+ output[y*input_->width + x].z = pw[2];
}
else
{
- output.points[y*input_->width + x].x = nan;
- output.points[y*input_->width + x].y = nan;
- output.points[y*input_->width + x].z = nan;
+ output[y*input_->width + x].x = nan;
+ output[y*input_->width + x].y = nan;
+ output[y*input_->width + x].z = nan;
}
}
std::vector<pcl::Vertices> polygons;
performReconstruction (output, polygons);
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.height = 1;
output.is_dense = true;
// Perform the actual surface reconstruction
performReconstruction (output, polygons);
- output.width = static_cast<std::uint32_t> (output.points.size ());
+ output.width = output.size ();
output.height = 1;
output.is_dense = true;
int exitcode;
// Array of coordinates for each point
- coordT *points = reinterpret_cast<coordT*> (calloc (cloud_transformed.points.size () * dim_, sizeof(coordT)));
+ coordT *points = reinterpret_cast<coordT*> (calloc (cloud_transformed.size () * dim_, sizeof(coordT)));
- for (std::size_t i = 0; i < cloud_transformed.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_transformed.size (); ++i)
{
- points[i * dim_ + 0] = static_cast<coordT> (cloud_transformed.points[i].x);
- points[i * dim_ + 1] = static_cast<coordT> (cloud_transformed.points[i].y);
+ points[i * dim_ + 0] = static_cast<coordT> (cloud_transformed[i].x);
+ points[i * dim_ + 1] = static_cast<coordT> (cloud_transformed[i].y);
if (dim_ > 2)
- points[i * dim_ + 2] = static_cast<coordT> (cloud_transformed.points[i].z);
+ points[i * dim_ + 2] = static_cast<coordT> (cloud_transformed[i].z);
}
// Compute concave hull
- exitcode = qh_new_qhull (dim_, static_cast<int> (cloud_transformed.points.size ()), points, ismalloc, flags, outfile, errfile);
+ exitcode = qh_new_qhull (dim_, static_cast<int> (cloud_transformed.size ()), points, ismalloc, flags, outfile, errfile);
if (exitcode != 0)
{
- PCL_ERROR ("[pcl::%s::performReconstrution] ERROR: qhull was unable to compute a concave hull for the given point cloud (%lu)!\n", getClassName ().c_str (), cloud_transformed.points.size ());
+ PCL_ERROR("[pcl::%s::performReconstrution] ERROR: qhull was unable to compute a "
+ "concave hull for the given point cloud (%zu)!\n",
+ getClassName().c_str(),
+ static_cast<std::size_t>(cloud_transformed.size()));
//check if it fails because of NaN values...
if (!cloud_transformed.is_dense)
bool NaNvalues = false;
for (std::size_t i = 0; i < cloud_transformed.size (); ++i)
{
- if (!std::isfinite (cloud_transformed.points[i].x) ||
- !std::isfinite (cloud_transformed.points[i].y) ||
- !std::isfinite (cloud_transformed.points[i].z))
+ if (!std::isfinite (cloud_transformed[i].x) ||
+ !std::isfinite (cloud_transformed[i].y) ||
+ !std::isfinite (cloud_transformed[i].z))
{
NaNvalues = true;
break;
if (voronoi_centers_)
{
- voronoi_centers_->points[non_upper].x = static_cast<float> (facet->center[0]);
- voronoi_centers_->points[non_upper].y = static_cast<float> (facet->center[1]);
- voronoi_centers_->points[non_upper].z = static_cast<float> (facet->center[2]);
+ (*voronoi_centers_)[non_upper].x = static_cast<float> (facet->center[0]);
+ (*voronoi_centers_)[non_upper].y = static_cast<float> (facet->center[1]);
+ (*voronoi_centers_)[non_upper].z = static_cast<float> (facet->center[2]);
}
non_upper++;
{
if (!added_vertices[vertex->id])
{
- alpha_shape.points[vertices].x = static_cast<float> (vertex->point[0]);
- alpha_shape.points[vertices].y = static_cast<float> (vertex->point[1]);
- alpha_shape.points[vertices].z = static_cast<float> (vertex->point[2]);
+ alpha_shape[vertices].x = static_cast<float> (vertex->point[0]);
+ alpha_shape[vertices].y = static_cast<float> (vertex->point[1]);
+ alpha_shape[vertices].z = static_cast<float> (vertex->point[2]);
qhid_to_pcidx[vertex->id] = vertices; //map the vertex id of qhull to the point cloud index
added_vertices[vertex->id] = true;
}
alpha_shape.points.resize (vertices);
- alpha_shape.width = static_cast<std::uint32_t> (alpha_shape.points.size ());
+ alpha_shape.width = alpha_shape.size ();
alpha_shape.height = 1;
}
else
if (voronoi_centers_)
{
- voronoi_centers_->points[dd].x = static_cast<float> (facet->center[0]);
- voronoi_centers_->points[dd].y = static_cast<float> (facet->center[1]);
- voronoi_centers_->points[dd].z = 0.0f;
+ (*voronoi_centers_)[dd].x = static_cast<float> (facet->center[0]);
+ (*voronoi_centers_)[dd].y = static_cast<float> (facet->center[1]);
+ (*voronoi_centers_)[dd].z = 0.0f;
}
++dd;
{
if (!added_vertices[vertex->id])
{
- alpha_shape.points[vertices].x = static_cast<float> (vertex->point[0]);
- alpha_shape.points[vertices].y = static_cast<float> (vertex->point[1]);
+ alpha_shape[vertices].x = static_cast<float> (vertex->point[0]);
+ alpha_shape[vertices].y = static_cast<float> (vertex->point[1]);
if (dim_ > 2)
- alpha_shape.points[vertices].z = static_cast<float> (vertex->point[2]);
+ alpha_shape[vertices].z = static_cast<float> (vertex->point[2]);
else
- alpha_shape.points[vertices].z = 0;
+ alpha_shape[vertices].z = 0;
qhid_to_pcidx[vertex->id] = vertices; //map the vertex id of qhull to the point cloud index
added_vertices[vertex->id] = true;
int sorted_idx = 0;
while (!edges.empty ())
{
- alpha_shape_sorted.points[sorted_idx] = alpha_shape.points[(*curr).first];
+ alpha_shape_sorted[sorted_idx] = alpha_shape[(*curr).first];
// check where we can go from (*curr).first
for (const int &i : (*curr).second)
{
// for each point in the concave hull, search for the nearest neighbor in the original point cloud
hull_indices_.header = input_->header;
hull_indices_.indices.clear ();
- hull_indices_.indices.reserve (alpha_shape.points.size ());
+ hull_indices_.indices.reserve (alpha_shape.size ());
- for (std::size_t i = 0; i < alpha_shape.points.size (); i++)
+ for (const auto& point: alpha_shape)
{
- tree.nearestKSearch (alpha_shape.points[i], 1, neighbor, distances);
+ tree.nearestKSearch (point, 1, neighbor, distances);
hull_indices_.indices.push_back (neighbor[0]);
}
bool xz_proj_safe = true;
// Check the input's normal to see which projection to use
- PointInT p0 = input_->points[(*indices_)[0]];
- PointInT p1 = input_->points[(*indices_)[indices_->size () - 1]];
- PointInT p2 = input_->points[(*indices_)[indices_->size () / 2]];
+ PointInT p0 = (*input_)[(*indices_)[0]];
+ PointInT p1 = (*input_)[(*indices_)[indices_->size () - 1]];
+ PointInT p2 = (*input_)[(*indices_)[indices_->size () / 2]];
Eigen::Array4f dy1dy2 = (p1.getArray4fMap () - p0.getArray4fMap ()) / (p2.getArray4fMap () - p0.getArray4fMap ());
while (!( (dy1dy2[0] != dy1dy2[1]) || (dy1dy2[2] != dy1dy2[1]) ) )
{
- p0 = input_->points[(*indices_)[rand () % indices_->size ()]];
- p1 = input_->points[(*indices_)[rand () % indices_->size ()]];
- p2 = input_->points[(*indices_)[rand () % indices_->size ()]];
+ p0 = (*input_)[(*indices_)[rand () % indices_->size ()]];
+ p1 = (*input_)[(*indices_)[rand () % indices_->size ()]];
+ p2 = (*input_)[(*indices_)[rand () % indices_->size ()]];
dy1dy2 = (p1.getArray4fMap () - p0.getArray4fMap ()) / (p2.getArray4fMap () - p0.getArray4fMap ());
}
pcl::PointCloud<PointInT> normal_calc_cloud;
normal_calc_cloud.points.resize (3);
- normal_calc_cloud.points[0] = p0;
- normal_calc_cloud.points[1] = p1;
- normal_calc_cloud.points[2] = p2;
+ normal_calc_cloud[0] = p0;
+ normal_calc_cloud[1] = p1;
+ normal_calc_cloud[2] = p2;
Eigen::Vector4d normal_calc_centroid;
Eigen::Matrix3d normal_calc_covariance;
{
for (std::size_t i = 0; i < indices_->size (); ++i, j+=dimension)
{
- points[j + 0] = static_cast<coordT> (input_->points[(*indices_)[i]].x);
- points[j + 1] = static_cast<coordT> (input_->points[(*indices_)[i]].y);
+ points[j + 0] = static_cast<coordT> ((*input_)[(*indices_)[i]].x);
+ points[j + 1] = static_cast<coordT> ((*input_)[(*indices_)[i]].y);
}
}
else if (yz_proj_safe)
{
for (std::size_t i = 0; i < indices_->size (); ++i, j+=dimension)
{
- points[j + 0] = static_cast<coordT> (input_->points[(*indices_)[i]].y);
- points[j + 1] = static_cast<coordT> (input_->points[(*indices_)[i]].z);
+ points[j + 0] = static_cast<coordT> ((*input_)[(*indices_)[i]].y);
+ points[j + 1] = static_cast<coordT> ((*input_)[(*indices_)[i]].z);
}
}
else if (xz_proj_safe)
{
for (std::size_t i = 0; i < indices_->size (); ++i, j+=dimension)
{
- points[j + 0] = static_cast<coordT> (input_->points[(*indices_)[i]].x);
- points[j + 1] = static_cast<coordT> (input_->points[(*indices_)[i]].z);
+ points[j + 0] = static_cast<coordT> ((*input_)[(*indices_)[i]].x);
+ points[j + 1] = static_cast<coordT> ((*input_)[(*indices_)[i]].z);
}
}
else
int num_vertices = qh num_vertices;
hull.points.resize (num_vertices);
- memset (&hull.points[0], static_cast<int> (hull.points.size ()), sizeof (PointInT));
+ memset (&hull.points[0], hull.size (), sizeof (PointInT));
vertexT * vertex;
int i = 0;
std::vector<std::pair<int, Eigen::Vector4f>, Eigen::aligned_allocator<std::pair<int, Eigen::Vector4f> > > idx_points (num_vertices);
- idx_points.resize (hull.points.size ());
- memset (&idx_points[0], static_cast<int> (hull.points.size ()), sizeof (std::pair<int, Eigen::Vector4f>));
+ idx_points.resize (hull.size ());
+ memset (&idx_points[0], hull.size (), sizeof (std::pair<int, Eigen::Vector4f>));
FORALLvertices
{
- hull.points[i] = input_->points[(*indices_)[qh_pointid (vertex->point)]];
+ hull[i] = (*input_)[(*indices_)[qh_pointid (vertex->point)]];
idx_points[i].first = qh_pointid (vertex->point);
++i;
}
pcl::compute3DCentroid (hull, centroid);
if (xy_proj_safe)
{
- for (std::size_t j = 0; j < hull.points.size (); j++)
+ for (std::size_t j = 0; j < hull.size (); j++)
{
- idx_points[j].second[0] = hull.points[j].x - centroid[0];
- idx_points[j].second[1] = hull.points[j].y - centroid[1];
+ idx_points[j].second[0] = hull[j].x - centroid[0];
+ idx_points[j].second[1] = hull[j].y - centroid[1];
}
}
else if (yz_proj_safe)
{
- for (std::size_t j = 0; j < hull.points.size (); j++)
+ for (std::size_t j = 0; j < hull.size (); j++)
{
- idx_points[j].second[0] = hull.points[j].y - centroid[1];
- idx_points[j].second[1] = hull.points[j].z - centroid[2];
+ idx_points[j].second[0] = hull[j].y - centroid[1];
+ idx_points[j].second[1] = hull[j].z - centroid[2];
}
}
else if (xz_proj_safe)
{
- for (std::size_t j = 0; j < hull.points.size (); j++)
+ for (std::size_t j = 0; j < hull.size (); j++)
{
- idx_points[j].second[0] = hull.points[j].x - centroid[0];
- idx_points[j].second[1] = hull.points[j].z - centroid[2];
+ idx_points[j].second[0] = hull[j].x - centroid[0];
+ idx_points[j].second[1] = hull[j].z - centroid[2];
}
}
std::sort (idx_points.begin (), idx_points.end (), comparePoints2D);
polygons.resize (1);
- polygons[0].vertices.resize (hull.points.size ());
+ polygons[0].vertices.resize (hull.size ());
hull_indices_.header = input_->header;
hull_indices_.indices.clear ();
- hull_indices_.indices.reserve (hull.points.size ());
+ hull_indices_.indices.reserve (hull.size ());
- for (int j = 0; j < static_cast<int> (hull.points.size ()); j++)
+ for (int j = 0; j < static_cast<int> (hull.size ()); j++)
{
hull_indices_.indices.push_back ((*indices_)[idx_points[j].first]);
- hull.points[j] = input_->points[(*indices_)[idx_points[j].first]];
+ hull[j] = (*input_)[(*indices_)[idx_points[j].first]];
polygons[0].vertices[j] = static_cast<unsigned int> (j);
}
int curlong, totlong;
qh_memfreeshort (&curlong, &totlong);
- hull.width = static_cast<std::uint32_t> (hull.points.size ());
+ hull.width = hull.size ();
hull.height = 1;
hull.is_dense = false;
return;
int j = 0;
for (std::size_t i = 0; i < indices_->size (); ++i, j+=dimension)
{
- points[j + 0] = static_cast<coordT> (input_->points[(*indices_)[i]].x);
- points[j + 1] = static_cast<coordT> (input_->points[(*indices_)[i]].y);
- points[j + 2] = static_cast<coordT> (input_->points[(*indices_)[i]].z);
+ points[j + 0] = static_cast<coordT> ((*input_)[(*indices_)[i]].x);
+ points[j + 1] = static_cast<coordT> ((*input_)[(*indices_)[i]].y);
+ points[j + 2] = static_cast<coordT> ((*input_)[(*indices_)[i]].z);
}
// Compute convex hull
// 0 if no error from qhull
if (exitcode != 0)
{
- PCL_ERROR ("[pcl::%s::performReconstrution3D] ERROR: qhull was unable to compute a convex hull for the given point cloud (%lu)!\n", getClassName ().c_str (), input_->points.size ());
+ PCL_ERROR("[pcl::%s::performReconstrution3D] ERROR: qhull was unable to compute a "
+ "convex hull for the given point cloud (%zu)!\n",
+ getClassName().c_str(),
+ static_cast<std::size_t>(input_->size()));
hull.points.resize (0);
hull.width = hull.height = 0;
{
// Add vertices to hull point_cloud and store index
hull_indices_.indices.push_back ((*indices_)[qh_pointid (vertex->point)]);
- hull.points[i] = input_->points[hull_indices_.indices.back ()];
+ hull[i] = (*input_)[hull_indices_.indices.back ()];
qhid_to_pcidx[vertex->id] = i; // map the vertex id of qhull to the point cloud index
++i;
int curlong, totlong;
qh_memfreeshort (&curlong, &totlong);
- hull.width = static_cast<std::uint32_t> (hull.points.size ());
+ hull.width = hull.size ();
hull.height = 1;
hull.is_dense = false;
}
std::vector<pcl::Vertices> polygons;
performReconstruction (points, polygons, false);
- points.width = static_cast<std::uint32_t> (points.points.size ());
+ points.width = points.size ();
points.height = 1;
points.is_dense = true;
// Perform the actual surface reconstruction
performReconstruction (points, polygons, true);
- points.width = static_cast<std::uint32_t> (points.points.size ());
+ points.width = points.size ();
points.height = 1;
points.is_dense = true;
{
// Skip invalid points from the indices list
for (std::vector<int>::const_iterator it = indices_->begin (); it != indices_->end (); ++it)
- if (!std::isfinite (input_->points[*it].x) ||
- !std::isfinite (input_->points[*it].y) ||
- !std::isfinite (input_->points[*it].z))
+ if (!std::isfinite ((*input_)[*it].x) ||
+ !std::isfinite ((*input_)[*it].y) ||
+ !std::isfinite ((*input_)[*it].z))
state_[*it] = NONE;
}
// Saving coordinates and point to index mapping
coords_.clear ();
coords_.reserve (indices_->size ());
- std::vector<int> point2index (input_->points.size (), -1);
+ std::vector<int> point2index (input_->size (), -1);
for (int cp = 0; cp < static_cast<int> (indices_->size ()); ++cp)
{
- coords_.push_back(input_->points[(*indices_)[cp]].getVector3fMap());
+ coords_.push_back((*input_)[(*indices_)[cp]].getVector3fMap());
point2index[(*indices_)[cp]] = cp;
}
// creating starting triangle
//searchForNeighbors ((*indices_)[R_], nnIdx, sqrDists);
- //tree_->nearestKSearch (input_->points[(*indices_)[R_]], nnn_, nnIdx, sqrDists);
+ //tree_->nearestKSearch ((*input_)[(*indices_)[R_]], nnn_, nnIdx, sqrDists);
tree_->nearestKSearch (indices_->at (R_), nnn_, nnIdx, sqrDists);
double sqr_dist_threshold = (std::min)(sqr_max_edge, sqr_mu * sqrDists[1]);
}
// Get the normal estimate at the current point
- const Eigen::Vector3f nc = input_->points[(*indices_)[R_]].getNormalVector3fMap ();
+ const Eigen::Vector3f nc = (*input_)[(*indices_)[R_]].getNormalVector3fMap ();
// Get a coordinate system that lies on a plane defined by its normal
v_ = nc.unitOrthogonal ();
continue;
}
//searchForNeighbors ((*indices_)[R_], nnIdx, sqrDists);
- //tree_->nearestKSearch (input_->points[(*indices_)[R_]], nnn_, nnIdx, sqrDists);
+ //tree_->nearestKSearch ((*input_)[(*indices_)[R_]], nnn_, nnIdx, sqrDists);
tree_->nearestKSearch (indices_->at (R_), nnn_, nnIdx, sqrDists);
// Search tree returns indices into the original cloud, but we are working with indices TODO: make that optional!
}
// Get the normal estimate at the current point
- const Eigen::Vector3f nc = input_->points[(*indices_)[R_]].getNormalVector3fMap ();
+ const Eigen::Vector3f nc = (*input_)[(*indices_)[R_]].getNormalVector3fMap ();
// Get a coordinate system that lies on a plane defined by its normal
v_ = nc.unitOrthogonal ();
if ((ffn_[R_] == nnIdx[i]) || (sfn_[R_] == nnIdx[i]))
angles_[i].visible = true;
bool same_side = true;
- const Eigen::Vector3f neighbor_normal = input_->points[(*indices_)[nnIdx[i]]].getNormalVector3fMap (); /// NOTE: nnIdx was reset
+ const Eigen::Vector3f neighbor_normal = (*input_)[(*indices_)[nnIdx[i]]].getNormalVector3fMap (); /// NOTE: nnIdx was reset
double cosine = nc.dot (neighbor_normal);
if (cosine > 1) cosine = 1;
if (cosine < -1) cosine = -1;
template <typename PointNT> void
pcl::GridProjection<PointNT>::scaleInputDataPoint (double scale_factor)
{
- for (std::size_t i = 0; i < data_->points.size(); ++i)
- {
- data_->points[i].x /= static_cast<float> (scale_factor);
- data_->points[i].y /= static_cast<float> (scale_factor);
- data_->points[i].z /= static_cast<float> (scale_factor);
+ for (auto& point: *data_) {
+ point.getVector3fMap() /= static_cast<float> (scale_factor);
}
max_p_ /= static_cast<float> (scale_factor);
min_p_ /= static_cast<float> (scale_factor);
model_coefficients[3] = -1 * model_coefficients.dot (xyz_centroid);
// Projected point
- Eigen::Vector3f point (p.x (), p.y (), p.z ()); //= Eigen::Vector3f::MapAligned (&output.points[cp].x, 3);
+ Eigen::Vector3f point (p.x (), p.y (), p.z ()); //= Eigen::Vector3f::MapAligned (&output[cp].x, 3);
float distance = point.dot (model_coefficients.head <3> ()) + model_coefficients[3];
point -= distance * model_coefficients.head < 3 > ();
for (std::size_t i = 0; i < pt_union_indices.size (); ++i)
{
- Eigen::Vector4f pp (data_->points[pt_union_indices[i]].x, data_->points[pt_union_indices[i]].y, data_->points[pt_union_indices[i]].z, 0);
+ Eigen::Vector4f pp ((*data_)[pt_union_indices[i]].x, (*data_)[pt_union_indices[i]].y, (*data_)[pt_union_indices[i]].z, 0);
pt_union_dist[i] = (pp - p).squaredNorm ();
pt_union_weight[i] = pow (M_E, -pow (pt_union_dist[i], 2.0) / gaussian_scale_);
mag += pow (M_E, -pow (sqrt (pt_union_dist[i]), 2.0) / gaussian_scale_);
pcl::VectorAverage3f vector_average;
Eigen::Vector3f v (
- data_->points[pt_union_indices[0]].normal[0],
- data_->points[pt_union_indices[0]].normal[1],
- data_->points[pt_union_indices[0]].normal[2]);
+ (*data_)[pt_union_indices[0]].normal[0],
+ (*data_)[pt_union_indices[0]].normal[1],
+ (*data_)[pt_union_indices[0]].normal[2]);
for (std::size_t i = 0; i < pt_union_weight.size (); ++i)
{
pt_union_weight[i] /= sum;
- Eigen::Vector3f vec (data_->points[pt_union_indices[i]].normal[0],
- data_->points[pt_union_indices[i]].normal[1],
- data_->points[pt_union_indices[i]].normal[2]);
+ Eigen::Vector3f vec ((*data_)[pt_union_indices[i]].normal[0],
+ (*data_)[pt_union_indices[i]].normal[1],
+ (*data_)[pt_union_indices[i]].normal[2]);
if (vec.dot (v) < 0)
vec = -vec;
vector_average.add (vec, static_cast<float> (pt_union_weight[i]));
for (int i = 0; i < k_; i++)
{
k_weight[i] /= sum;
- Eigen::Vector3f vec (data_->points[k_indices[i]].normal[0],
- data_->points[k_indices[i]].normal[1],
- data_->points[k_indices[i]].normal[2]);
+ Eigen::Vector3f vec ((*data_)[k_indices[i]].normal[0],
+ (*data_)[k_indices[i]].normal[1],
+ (*data_)[k_indices[i]].normal[2]);
vector_average.add (vec, k_weight[i]);
}
vector_average.getEigenVector1 (out_vector);
double sum = 0.0;
for (std::size_t i = 0; i < pt_union_indices.size (); ++i)
{
- Eigen::Vector4f pp (data_->points[pt_union_indices[i]].x, data_->points[pt_union_indices[i]].y, data_->points[pt_union_indices[i]].z, 0);
+ Eigen::Vector4f pp ((*data_)[pt_union_indices[i]].x, (*data_)[pt_union_indices[i]].y, (*data_)[pt_union_indices[i]].z, 0);
pt_union_dist[i] = (pp - p).norm ();
sum += pow (M_E, -pow (pt_union_dist[i], 2.0) / gaussian_scale_);
}
// Store the point cloud data into the voxel grid, and at the same time
// create a hash map to store the information for each cell
cell_hash_map_.max_load_factor (2.0);
- cell_hash_map_.rehash (data_->points.size () / static_cast<long unsigned int> (cell_hash_map_.max_load_factor ()));
+ cell_hash_map_.rehash (data_->size () / static_cast<long unsigned int> (cell_hash_map_.max_load_factor ()));
// Go over all points and insert them into the right leaf
- for (int cp = 0; cp < static_cast<int> (data_->points.size ()); ++cp)
+ for (int cp = 0; cp < static_cast<int> (data_->size ()); ++cp)
{
// Check if the point is invalid
- if (!std::isfinite (data_->points[cp].x) ||
- !std::isfinite (data_->points[cp].y) ||
- !std::isfinite (data_->points[cp].z))
+ if (!std::isfinite ((*data_)[cp].x) ||
+ !std::isfinite ((*data_)[cp].y) ||
+ !std::isfinite ((*data_)[cp].z))
continue;
Eigen::Vector3i index_3d;
- getCellIndex (data_->points[cp].getVector4fMap (), index_3d);
+ getCellIndex ((*data_)[cp].getVector4fMap (), index_3d);
int index_1d = getIndexIn1D (index_3d);
if (cell_hash_map_.find (index_1d) == cell_hash_map_.end ())
{
output.header = input_->header;
pcl::PointCloud<pcl::PointXYZ> cloud;
- cloud.width = static_cast<std::uint32_t> (surface_.size ());
+ cloud.width = surface_.size ();
cloud.height = 1;
cloud.is_dense = true;
cloud.points.resize (surface_.size ());
// Copy the data from surface_ to cloud
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- cloud.points[i].x = surface_[i].x ();
- cloud.points[i].y = surface_[i].y ();
- cloud.points[i].z = surface_[i].z ();
+ cloud[i].x = surface_[i].x ();
+ cloud[i].y = surface_[i].y ();
+ cloud[i].z = surface_[i].z ();
}
pcl::toPCLPointCloud2 (cloud, output.cloud);
}
// The mesh surface is held in surface_. Copy it to the output format
points.header = input_->header;
- points.width = static_cast<std::uint32_t> (surface_.size ());
+ points.width = surface_.size ();
points.height = 1;
points.is_dense = true;
if (!is_far_ignored || nn_sqr_dists[0] < dist_ignore_)
{
- const Eigen::Vector3f normal = input_->points[nn_indices[0]].getNormalVector3fMap ();
+ const Eigen::Vector3f normal = (*input_)[nn_indices[0]].getNormalVector3fMap ();
if (!std::isnan (normal (0)) && normal.norm () > 0.5f)
grid_[z_start + z] = normal.dot (
- point - input_->points[nn_indices[0]].getVector3fMap ());
+ point - (*input_)[nn_indices[0]].getVector3fMap ());
}
}
}
{
// boolean variable to determine whether we are in the off_surface domain for the columns
bool col_off = (col_i >= N);
- M (row_i, col_i) = kernel (Eigen::Vector3f (input_->points[col_i%N].getVector3fMap ()).cast<double> () + Eigen::Vector3f (input_->points[col_i%N].getNormalVector3fMap ()).cast<double> () * col_off * off_surface_epsilon_,
- Eigen::Vector3f (input_->points[row_i%N].getVector3fMap ()).cast<double> () + Eigen::Vector3f (input_->points[row_i%N].getNormalVector3fMap ()).cast<double> () * row_off * off_surface_epsilon_);
+ M (row_i, col_i) = kernel (Eigen::Vector3f ((*input_)[col_i%N].getVector3fMap ()).cast<double> () + Eigen::Vector3f ((*input_)[col_i%N].getNormalVector3fMap ()).cast<double> () * col_off * off_surface_epsilon_,
+ Eigen::Vector3f ((*input_)[row_i%N].getVector3fMap ()).cast<double> () + Eigen::Vector3f ((*input_)[row_i%N].getNormalVector3fMap ()).cast<double> () * row_off * off_surface_epsilon_);
}
d (row_i, 0) = row_off * off_surface_epsilon_;
std::vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d> > centers (2*N);
for (unsigned int i = 0; i < N; ++i)
{
- centers[i] = Eigen::Vector3f (input_->points[i].getVector3fMap ()).cast<double> ();
- centers[i + N] = Eigen::Vector3f (input_->points[i].getVector3fMap ()).cast<double> () + Eigen::Vector3f (input_->points[i].getNormalVector3fMap ()).cast<double> () * off_surface_epsilon_;
+ centers[i] = Eigen::Vector3f ((*input_)[i].getVector3fMap ()).cast<double> ();
+ centers[i + N] = Eigen::Vector3f ((*input_)[i].getVector3fMap ()).cast<double> () + Eigen::Vector3f ((*input_)[i].getNormalVector3fMap ()).cast<double> () * off_surface_epsilon_;
weights[i] = w (i, 0);
weights[i + N] = w (i + N, 0);
}
#include <pcl/type_traits.h>
#include <pcl/surface/mls.h>
#include <pcl/common/io.h>
+#include <pcl/common/common.h> // for getMinMax3D
#include <pcl/common/copy_point.h>
#include <pcl/common/centroid.h>
#include <pcl/common/eigen.h>
#include <pcl/common/geometry.h>
+#include <pcl/search/kdtree.h> // for KdTree
+#include <pcl/search/organized.h> // for OrganizedNeighbor
#ifdef _OPENMP
#include <omp.h>
if (compute_normals_)
{
normals_->height = 1;
- normals_->width = static_cast<std::uint32_t> (normals_->size ());
+ normals_->width = normals_->size ();
for (std::size_t i = 0; i < output.size (); ++i)
{
using FieldList = typename pcl::traits::fieldList<PointOutT>::type;
- pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output.points[i], "normal_x", normals_->points[i].normal_x));
- pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output.points[i], "normal_y", normals_->points[i].normal_y));
- pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output.points[i], "normal_z", normals_->points[i].normal_z));
- pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output.points[i], "curvature", normals_->points[i].curvature));
+ pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output[i], "normal_x", (*normals_)[i].normal_x));
+ pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output[i], "normal_y", (*normals_)[i].normal_y));
+ pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output[i], "normal_z", (*normals_)[i].normal_z));
+ pcl::for_each_type<FieldList> (SetIfFieldExists<PointOutT, float> (output[i], "curvature", (*normals_)[i].curvature));
}
}
// Set proper widths and heights for the clouds
output.height = 1;
- output.width = static_cast<std::uint32_t> (output.size ());
+ output.width = output.size ();
deinitCompute ();
}
aux.z = static_cast<float> (point[2]);
// Copy additional point information if available
- copyMissingFields (input_->points[index], aux);
+ copyMissingFields ((*input_)[index], aux);
projected_points.push_back (aux);
corresponding_input_indices.indices.push_back (index);
// Copy all information from the input cloud to the output points (not doing any interpolation)
for (std::size_t pp = pp_size; pp < projected_points[tn].size (); ++pp)
- copyMissingFields (input_->points[(*indices_)[cp]], projected_points[tn][pp]);
+ copyMissingFields ((*input_)[(*indices_)[cp]], projected_points[tn][pp]);
#else
computeMLSPointNormal (index, nn_indices, projected_points, projected_points_normals, *corresponding_input_indices_, mls_results_[mls_result_index]);
for (std::size_t dp_i = 0; dp_i < distinct_cloud_->size (); ++dp_i) // dp_i = distinct_point_i
{
// Distinct cloud may have nan points, skip them
- if (!std::isfinite (distinct_cloud_->points[dp_i].x))
+ if (!std::isfinite ((*distinct_cloud_)[dp_i].x))
continue;
// Get 3D position of point
- //Eigen::Vector3f pos = distinct_cloud_->points[dp_i].getVector3fMap ();
+ //Eigen::Vector3f pos = (*distinct_cloud_)[dp_i].getVector3fMap ();
std::vector<int> nn_indices;
std::vector<float> nn_dists;
- tree_->nearestKSearch (distinct_cloud_->points[dp_i], 1, nn_indices, nn_dists);
+ tree_->nearestKSearch ((*distinct_cloud_)[dp_i], 1, nn_indices, nn_dists);
int input_index = nn_indices.front ();
// If the closest point did not have a valid MLS fitting result
if (mls_results_[input_index].valid == false)
continue;
- Eigen::Vector3d add_point = distinct_cloud_->points[dp_i].getVector3fMap ().template cast<double> ();
+ Eigen::Vector3d add_point = (*distinct_cloud_)[dp_i].getVector3fMap ().template cast<double> ();
MLSResult::MLSProjectionResults proj = mls_results_[input_index].projectPoint (add_point, projection_method_, 5 * nr_coeff_);
addProjectedPointNormal (input_index, proj.point, proj.normal, mls_results_[input_index].curvature, output, *normals_, *corresponding_input_indices_);
}
model_coefficients.head<3> ().matrix () = eigen_vector;
model_coefficients[3] = -1 * model_coefficients.dot (xyz_centroid);
- query_point = cloud.points[index].getVector3fMap ().template cast<double> ();
+ query_point = cloud[index].getVector3fMap ().template cast<double> ();
if (!std::isfinite(eigen_vector[0]) || !std::isfinite(eigen_vector[1]) || !std::isfinite(eigen_vector[2]))
{
std::vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d> > de_meaned (num_neighbors);
for (std::size_t ni = 0; ni < static_cast<std::size_t>(num_neighbors); ++ni)
{
- de_meaned[ni][0] = cloud.points[nn_indices[ni]].x - mean[0];
- de_meaned[ni][1] = cloud.points[nn_indices[ni]].y - mean[1];
- de_meaned[ni][2] = cloud.points[nn_indices[ni]].z - mean[2];
+ de_meaned[ni][0] = cloud[nn_indices[ni]].x - mean[0];
+ de_meaned[ni][1] = cloud[nn_indices[ni]].y - mean[1];
+ de_meaned[ni][2] = cloud[nn_indices[ni]].z - mean[2];
weight_vec (ni) = weight_func (de_meaned[ni].dot (de_meaned[ni]));
}
// Put initial cloud in voxel grid
data_size_ = static_cast<std::uint64_t> (1.5 * max_size / voxel_size_);
for (std::size_t i = 0; i < indices->size (); ++i)
- if (std::isfinite (cloud->points[(*indices)[i]].x))
+ if (std::isfinite ((*cloud)[(*indices)[i]].x))
{
Eigen::Vector3i pos;
- getCellIndex (cloud->points[(*indices)[i]].getVector3fMap (), pos);
+ getCellIndex ((*cloud)[(*indices)[i]].getVector3fMap (), pos);
std::uint64_t index_1d;
getIndexIn1D (pos, index_1d);
// (running over complete image since some rows and columns are left out
// depending on triangle_pixel_size)
// avoid to do that here (only needed for ASCII mesh file output, e.g., in vtk files
- for (std::size_t i = 0; i < input_->points.size (); ++i)
- if (!isFinite (input_->points[i]))
+ for (std::size_t i = 0; i < input_->size (); ++i)
+ if (!isFinite ((*input_)[i]))
resetPointData (i, output, 0.0f, x_idx, y_idx, z_idx);
}
if (right_cut_upper && right_cut_lower && left_cut_upper && left_cut_lower)
{
- float dist_right_cut = std::abs (input_->points[index_down].z - input_->points[index_right].z);
- float dist_left_cut = std::abs (input_->points[i].z - input_->points[index_down_right].z);
+ float dist_right_cut = std::abs ((*input_)[index_down].z - (*input_)[index_right].z);
+ float dist_left_cut = std::abs ((*input_)[i].z - (*input_)[index_down_right].z);
if (dist_right_cut >= dist_left_cut)
{
if (store_shadowed_faces_ || !isShadowedTriangle (i, index_down_right, index_right))
for (int i = 0; i < int (mesh.inCorePoints.size ()); i++)
{
p = mesh.inCorePoints[i];
- cloud.points[i].x = p.coords[0]*scale+center.coords[0];
- cloud.points[i].y = p.coords[1]*scale+center.coords[1];
- cloud.points[i].z = p.coords[2]*scale+center.coords[2];
+ cloud[i].x = p.coords[0]*scale+center.coords[0];
+ cloud[i].y = p.coords[1]*scale+center.coords[1];
+ cloud[i].z = p.coords[2]*scale+center.coords[2];
}
for (int i = int (mesh.inCorePoints.size ()); i < int (mesh.outOfCorePointCount () + mesh.inCorePoints.size ()); i++)
{
mesh.nextOutOfCorePoint (p);
- cloud.points[i].x = p.coords[0]*scale+center.coords[0];
- cloud.points[i].y = p.coords[1]*scale+center.coords[1];
- cloud.points[i].z = p.coords[2]*scale+center.coords[2];
+ cloud[i].x = p.coords[0]*scale+center.coords[0];
+ cloud[i].y = p.coords[1]*scale+center.coords[1];
+ cloud[i].z = p.coords[2]*scale+center.coords[2];
}
pcl::toPCLPointCloud2 (cloud, output.cloud);
output.polygons.resize (mesh.polygonCount ());
for (int i = 0; i < int(mesh.inCorePoints.size ()); i++)
{
p = mesh.inCorePoints[i];
- points.points[i].x = p.coords[0]*scale+center.coords[0];
- points.points[i].y = p.coords[1]*scale+center.coords[1];
- points.points[i].z = p.coords[2]*scale+center.coords[2];
+ points[i].x = p.coords[0]*scale+center.coords[0];
+ points[i].y = p.coords[1]*scale+center.coords[1];
+ points[i].z = p.coords[2]*scale+center.coords[2];
}
for (int i = int(mesh.inCorePoints.size()); i < int (mesh.outOfCorePointCount() + mesh.inCorePoints.size ()); i++)
{
mesh.nextOutOfCorePoint (p);
- points.points[i].x = p.coords[0]*scale+center.coords[0];
- points.points[i].y = p.coords[1]*scale+center.coords[1];
- points.points[i].z = p.coords[2]*scale+center.coords[2];
+ points[i].x = p.coords[0]*scale+center.coords[0];
+ points[i].y = p.coords[1]*scale+center.coords[1];
+ points[i].z = p.coords[2]*scale+center.coords[2];
}
polygons.resize (mesh.polygonCount ());
return false;
}
- if (input_->points.size () != normals_->points.size ())
+ if (input_->size () != normals_->size ())
{
PCL_ERROR ("SurfelSmoothing: number of input points different from the number of given normals\n");
return false;
// PCL_INFO ("SurfelSmoothing: cloud smoothing iteration starting ...\n");
output_positions = PointCloudInPtr (new PointCloudIn);
- output_positions->points.resize (interm_cloud_->points.size ());
+ output_positions->points.resize (interm_cloud_->size ());
output_normals = NormalCloudPtr (new NormalCloud);
- output_normals->points.resize (interm_cloud_->points.size ());
+ output_normals->points.resize (interm_cloud_->size ());
std::vector<int> nn_indices;
std::vector<float> nn_distances;
- std::vector<float> diffs (interm_cloud_->points.size ());
+ std::vector<float> diffs (interm_cloud_->size ());
float total_residual = 0.0f;
- for (std::size_t i = 0; i < interm_cloud_->points.size (); ++i)
+ for (std::size_t i = 0; i < interm_cloud_->size (); ++i)
{
Eigen::Vector4f smoothed_point = Eigen::Vector4f::Zero ();
Eigen::Vector4f smoothed_normal = Eigen::Vector4f::Zero ();
// get neighbors
// @todo using 5x the scale for searching instead of all the points to avoid O(N^2)
- tree_->radiusSearch (interm_cloud_->points[i], 5*scale_, nn_indices, nn_distances);
+ tree_->radiusSearch ((*interm_cloud_)[i], 5*scale_, nn_indices, nn_distances);
float theta_normalization_factor = 0.0;
std::vector<float> theta (nn_indices.size ());
for (std::size_t nn_index_i = 0; nn_index_i < nn_indices.size (); ++nn_index_i)
{
- float dist = pcl::squaredEuclideanDistance (interm_cloud_->points[i], input_->points[nn_indices[nn_index_i]]);//interm_cloud_->points[nn_indices[nn_index_i]]);
+ float dist = pcl::squaredEuclideanDistance ((*interm_cloud_)[i], (*input_)[nn_indices[nn_index_i]]);//(*interm_cloud_)[nn_indices[nn_index_i]]);
float theta_i = std::exp ( (-1) * dist / scale_squared_);
theta_normalization_factor += theta_i;
- smoothed_normal += theta_i * interm_normals_->points[nn_indices[nn_index_i]].getNormalVector4fMap ();
+ smoothed_normal += theta_i * (*interm_normals_)[nn_indices[nn_index_i]].getNormalVector4fMap ();
theta[nn_index_i] = theta_i;
}
// find minimum along the normal
float e_residual;
- smoothed_point = interm_cloud_->points[i].getVector4fMap ();
+ smoothed_point = (*interm_cloud_)[i].getVector4fMap ();
while (true)
{
e_residual = 0.0f;
smoothed_point(3) = 0.0f;
for (std::size_t nn_index_i = 0; nn_index_i < nn_indices.size (); ++nn_index_i)
{
- Eigen::Vector4f neighbor = input_->points[nn_indices[nn_index_i]].getVector4fMap ();//interm_cloud_->points[nn_indices[nn_index_i]].getVector4fMap ();
+ Eigen::Vector4f neighbor = (*input_)[nn_indices[nn_index_i]].getVector4fMap ();//(*interm_cloud_)[nn_indices[nn_index_i]].getVector4fMap ();
neighbor(3) = 0.0f;
float dot_product = smoothed_normal.dot (neighbor - smoothed_point);
e_residual += theta[nn_index_i] * dot_product;// * dot_product;
total_residual += e_residual;
- output_positions->points[i].getVector4fMap () = smoothed_point;
- output_normals->points[i].getNormalVector4fMap () = normals_->points[i].getNormalVector4fMap ();//smoothed_normal;
+ (*output_positions)[i].getVector4fMap () = smoothed_point;
+ (*output_normals)[i].getNormalVector4fMap () = (*normals_)[i].getNormalVector4fMap ();//smoothed_normal;
}
// std::cerr << "Total residual after iteration: " << total_residual << std::endl;
PointNT &output_normal)
{
Eigen::Vector4f average_normal = Eigen::Vector4f::Zero ();
- Eigen::Vector4f result_point = input_->points[point_index].getVector4fMap ();
+ Eigen::Vector4f result_point = (*input_)[point_index].getVector4fMap ();
result_point(3) = 0.0f;
// @todo parameter
float theta_i = std::exp ( (-1) * dist / scale_squared_);
theta_normalization_factor += theta_i;
- average_normal += theta_i * normals_->points[nn_indices[nn_index_i]].getNormalVector4fMap ();
+ average_normal += theta_i * (*normals_)[nn_indices[nn_index_i]].getNormalVector4fMap ();
theta[nn_index_i] = theta_i;
}
average_normal /= theta_normalization_factor;
e_residual_along_normal = 0.0f;
for (std::size_t nn_index_i = 0; nn_index_i < nn_indices.size (); ++nn_index_i)
{
- Eigen::Vector4f neighbor = input_->points[nn_indices[nn_index_i]].getVector4fMap ();
+ Eigen::Vector4f neighbor = (*input_)[nn_indices[nn_index_i]].getVector4fMap ();
neighbor(3) = 0.0f;
float dot_product = average_normal.dot (neighbor - result_point);
e_residual_along_normal += theta[nn_index_i] * dot_product;
output_point.x = result_point(0);
output_point.y = result_point(1);
output_point.z = result_point(2);
- output_normal = normals_->points[point_index];
+ output_normal = (*normals_)[point_index];
if (big_iterations == max_big_iterations)
PCL_DEBUG ("[pcl::SurfelSmoothing::smoothPoint] Passed the number of BIG iterations: %d\n", big_iterations);
output_normals->height = input_->height;
output_normals->width = input_->width;
- output_positions->points.resize (input_->points.size ());
- output_normals->points.resize (input_->points.size ());
- for (std::size_t i = 0; i < input_->points.size (); ++i)
+ output_positions->points.resize (input_->size ());
+ output_normals->points.resize (input_->size ());
+ for (std::size_t i = 0; i < input_->size (); ++i)
{
- smoothPoint (i, output_positions->points[i], output_normals->points[i]);
+ smoothPoint (i, (*output_positions)[i], (*output_normals)[i]);
}
}
NormalCloudPtr &cloud2_normals,
pcl::IndicesPtr &output_features)
{
- if (interm_cloud_->points.size () != cloud2->points.size () ||
- cloud2->points.size () != cloud2_normals->points.size ())
+ if (interm_cloud_->size () != cloud2->size () ||
+ cloud2->size () != cloud2_normals->size ())
{
PCL_ERROR ("[pcl::SurfelSmoothing::extractSalientFeaturesBetweenScales]: Number of points in the clouds does not match.\n");
return;
}
- std::vector<float> diffs (cloud2->points.size ());
- for (std::size_t i = 0; i < cloud2->points.size (); ++i)
- diffs[i] = cloud2_normals->points[i].getNormalVector4fMap ().dot (cloud2->points[i].getVector4fMap () -
- interm_cloud_->points[i].getVector4fMap ());
+ std::vector<float> diffs (cloud2->size ());
+ for (std::size_t i = 0; i < cloud2->size (); ++i)
+ diffs[i] = (*cloud2_normals)[i].getNormalVector4fMap ().dot ((*cloud2)[i].getVector4fMap () -
+ (*interm_cloud_)[i].getVector4fMap ());
std::vector<int> nn_indices;
std::vector<float> nn_distances;
- output_features->resize (cloud2->points.size ());
- for (int point_i = 0; point_i < static_cast<int> (cloud2->points.size ()); ++point_i)
+ output_features->resize (cloud2->size ());
+ for (int point_i = 0; point_i < static_cast<int> (cloud2->size ()); ++point_i)
{
// Get neighbors
tree_->radiusSearch (point_i, scale_, nn_indices, nn_distances);
for (const unsigned int &vertex : tex_polygon.vertices)
{
// get point
- PointInT pt = camera_transformed_cloud->points[vertex];
+ PointInT pt = (*camera_transformed_cloud)[vertex];
// compute UV coordinates for this point
getPointUVCoordinates (pt, current_cam, tmp_VT);
for (const int &index : indices)
{
// if intersected point is on the over side of the camera
- if (pt.z * cloud->points[index].z < 0)
+ if (pt.z * (*cloud)[index].z < 0)
{
nbocc--;
continue;
}
- if (std::fabs (cloud->points[index].z - pt.z) <= distance_threshold)
+ if (std::fabs ((*cloud)[index].z - pt.z) <= distance_threshold)
{
// points are very close to each-other, we do not consider the occlusion
nbocc--;
// for each point of the cloud, raycast toward camera and check intersected voxels.
Eigen::Vector3f direction;
std::vector<int> indices;
- for (std::size_t i = 0; i < input_cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < input_cloud->size (); ++i)
{
- direction (0) = input_cloud->points[i].x;
- direction (1) = input_cloud->points[i].y;
- direction (2) = input_cloud->points[i].z;
+ direction (0) = (*input_cloud)[i].x;
+ direction (1) = (*input_cloud)[i].y;
+ direction (2) = (*input_cloud)[i].z;
// if point is not occluded
octree.getIntersectedVoxelIndices (direction, -direction, indices);
for (const int &index : indices)
{
// if intersected point is on the over side of the camera
- if (input_cloud->points[i].z * input_cloud->points[index].z < 0)
+ if ((*input_cloud)[i].z * (*input_cloud)[index].z < 0)
{
nbocc--;
continue;
}
- if (std::fabs (input_cloud->points[index].z - input_cloud->points[i].z) <= maxDeltaZ)
+ if (std::fabs ((*input_cloud)[index].z - (*input_cloud)[i].z) <= maxDeltaZ)
{
// points are very close to each-other, we do not consider the occlusion
nbocc--;
if (nbocc == 0)
{
// point is added in the filtered mesh
- filtered_cloud->points.push_back (input_cloud->points[i]);
+ filtered_cloud->points.push_back ((*input_cloud)[i]);
visible_indices.push_back (static_cast<int> (i));
}
else
std::vector<double> zDist;
std::vector<double> ptDist;
// for each point of the cloud, ray-trace toward the camera and check intersected voxels.
- for (std::size_t i = 0; i < input_cloud->points.size (); ++i)
+ for (const auto& point: *input_cloud)
{
- direction (0) = input_cloud->points[i].x;
- pt.x = input_cloud->points[i].x;
- direction (1) = input_cloud->points[i].y;
- pt.y = input_cloud->points[i].y;
- direction (2) = input_cloud->points[i].z;
- pt.z = input_cloud->points[i].z;
+ direction = pt.getVector3fMap() = point.getVector3fMap();
// get number of occlusions for that point
indices.clear ();
for (const int &index : indices)
{
// if intersected point is on the over side of the camera
- if (pt.z * input_cloud->points[index].z < 0)
+ if (pt.z * (*input_cloud)[index].z < 0)
{
nbocc--;
}
- else if (std::fabs (input_cloud->points[index].z - pt.z) <= maxDeltaZ)
+ else if (std::fabs ((*input_cloud)[index].z - pt.z) <= maxDeltaZ)
{
// points are very close to each-other, we do not consider the occlusion
nbocc--;
}
else
{
- zDist.push_back (std::fabs (input_cloud->points[index].z - pt.z));
- ptDist.push_back (pcl::euclideanDistance (input_cloud->points[index], pt));
+ zDist.push_back (std::fabs ((*input_cloud)[index].z - pt.z));
+ ptDist.push_back (pcl::euclideanDistance ((*input_cloud)[index], pt));
}
}
pcl::PointXY uv_coord3;
if (isFaceProjected (cameras[current_cam],
- camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[0]],
- camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[1]],
- camera_cloud->points[mesh.tex_polygons[current_cam][idx_face].vertices[2]],
+ (*camera_cloud)[mesh.tex_polygons[current_cam][idx_face].vertices[0]],
+ (*camera_cloud)[mesh.tex_polygons[current_cam][idx_face].vertices[1]],
+ (*camera_cloud)[mesh.tex_polygons[current_cam][idx_face].vertices[2]],
uv_coord1,
uv_coord2,
uv_coord3))
pcl::PointXY uv_coord3;
if (isFaceProjected (cameras[current_cam],
- camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]],
- camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]],
- camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]],
+ (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]],
+ (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]],
+ (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]],
uv_coord1,
uv_coord2,
uv_coord3))
// for each neighbor
for (const int &idxNeighbor : idxNeighbors)
{
- if (std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
- std::max (camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z,
- camera_cloud->points[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]].z))
- < camera_cloud->points[indexes_uv_to_points[idxNeighbor].idx_cloud].z)
+ if (std::max ((*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[0]].z,
+ std::max ((*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[1]].z,
+ (*camera_cloud)[mesh.tex_polygons[idx_pcam][idx_face].vertices[2]].z))
+ < (*camera_cloud)[indexes_uv_to_points[idxNeighbor].idx_cloud].z)
{
// neighbor is farther than all the face's points. Check if it falls into the triangle
- if (checkPointInsideTriangle(uv_coord1, uv_coord2, uv_coord3, projections->points[idxNeighbor]))
+ if (checkPointInsideTriangle(uv_coord1, uv_coord2, uv_coord3, (*projections)[idxNeighbor]))
{
// current neighbor is inside triangle and is closer => the corresponding face
visibility[indexes_uv_to_points[idxNeighbor].idx_face] = false;
if (visibility[idx_face])
{
// face is visible by the current camera copy UV coordinates
- mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](0) = projections->points[idx_face*3].x;
- mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](1) = projections->points[idx_face*3].y;
+ mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](0) = (*projections)[idx_face*3].x;
+ mesh.tex_coordinates[current_cam][cpt_visible_faces * 3](1) = (*projections)[idx_face*3].y;
- mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](0) = projections->points[idx_face*3 + 1].x;
- mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](1) = projections->points[idx_face*3 + 1].y;
+ mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](0) = (*projections)[idx_face*3 + 1].x;
+ mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 1](1) = (*projections)[idx_face*3 + 1].y;
- mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](0) = projections->points[idx_face*3 + 2].x;
- mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](1) = projections->points[idx_face*3 + 2].y;
+ mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](0) = (*projections)[idx_face*3 + 2].x;
+ mesh.tex_coordinates[current_cam][cpt_visible_faces * 3 + 2](1) = (*projections)[idx_face*3 + 2].y;
visible_faces[cpt_visible_faces] = mesh.tex_polygons[current_cam][idx_face];
#include <functional>
#include <map>
#include <random>
+#include <Eigen/Core> // for Vector3i, Vector3d, ...
// PCL includes
#include <pcl/memory.h>
#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
-#include <pcl/search/pcl_search.h>
-#include <pcl/common/common.h>
+#include <pcl/search/search.h> // for Search
-#include <pcl/surface/boost.h>
-#include <pcl/surface/eigen.h>
#include <pcl/surface/processing.h>
namespace pcl
inline bool
isValidTriangle (const int& a, const int& b, const int& c)
{
- if (!pcl::isFinite (input_->points[a])) return (false);
- if (!pcl::isFinite (input_->points[b])) return (false);
- if (!pcl::isFinite (input_->points[c])) return (false);
+ if (!pcl::isFinite ((*input_)[a])) return (false);
+ if (!pcl::isFinite ((*input_)[b])) return (false);
+ if (!pcl::isFinite ((*input_)[c])) return (false);
return (true);
}
inline bool
isShadowedTriangle (const int& a, const int& b, const int& c)
{
- if (isShadowed (input_->points[a], input_->points[b])) return (true);
- if (isShadowed (input_->points[b], input_->points[c])) return (true);
- if (isShadowed (input_->points[c], input_->points[a])) return (true);
+ if (isShadowed ((*input_)[a], (*input_)[b])) return (true);
+ if (isShadowed ((*input_)[b], (*input_)[c])) return (true);
+ if (isShadowed ((*input_)[c], (*input_)[a])) return (true);
return (false);
}
inline bool
isValidQuad (const int& a, const int& b, const int& c, const int& d)
{
- if (!pcl::isFinite (input_->points[a])) return (false);
- if (!pcl::isFinite (input_->points[b])) return (false);
- if (!pcl::isFinite (input_->points[c])) return (false);
- if (!pcl::isFinite (input_->points[d])) return (false);
+ if (!pcl::isFinite ((*input_)[a])) return (false);
+ if (!pcl::isFinite ((*input_)[b])) return (false);
+ if (!pcl::isFinite ((*input_)[c])) return (false);
+ if (!pcl::isFinite ((*input_)[d])) return (false);
return (true);
}
inline bool
isShadowedQuad (const int& a, const int& b, const int& c, const int& d)
{
- if (isShadowed (input_->points[a], input_->points[b])) return (true);
- if (isShadowed (input_->points[b], input_->points[c])) return (true);
- if (isShadowed (input_->points[c], input_->points[d])) return (true);
- if (isShadowed (input_->points[d], input_->points[a])) return (true);
+ if (isShadowed ((*input_)[a], (*input_)[b])) return (true);
+ if (isShadowed ((*input_)[b], (*input_)[c])) return (true);
+ if (isShadowed ((*input_)[c], (*input_)[d])) return (true);
+ if (isShadowed ((*input_)[d], (*input_)[a])) return (true);
return (false);
}
#pragma once
#include <pcl/pcl_base.h>
-#include <pcl/point_cloud.h>
#include <pcl/PolygonMesh.h>
namespace pcl
{
+ template <typename PointT> class PointCloud;
+
/** \brief @b CloudSurfaceProcessing represents the base class for algorithms that takes a point cloud as input and
* produces a new output cloud that has been modified towards a better surface representation. These types of
* algorithms include surface smoothing, hole filling, cloud upsampling etc.
#pragma once
-#include <pcl/surface/boost.h>
-#include <pcl/PolygonMesh.h>
+#include <vector> // for vector
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
namespace pcl
{
+ struct PolygonMesh;
+
namespace surface
{
class PCL_EXPORTS SimplificationRemoveUnusedVertices
#pragma once
#include <pcl/pcl_macros.h>
-#include <pcl/PolygonMesh.h>
#include <pcl/surface/vtk_smoothing/vtk.h>
namespace pcl
{
+ struct PolygonMesh;
+
class PCL_EXPORTS VTKUtils
{
public:
#include "pcl/surface/3rdparty/opennurbs/opennurbs.h"
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
+
FILE* ON_FileStream::Open( const wchar_t* filename, const wchar_t* mode )
{
FILE* fp = 0;
{
int buffer;
std::size_t res = fread( &buffer, 1, 1, m_fp );
- (void) res;
+ pcl::utils::ignore(res);
if ( feof( m_fp ) )
{
rc = true;
#include "pcl/surface/3rdparty/opennurbs/opennurbs.h"
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
+
ON_OBJECT_IMPLEMENT( ON_Font, ON_Object, "4F0F51FB-35D0-4865-9998-6D2C6A99721D" );
ON_Font::ON_Font()
bool ON_Font::IsSymbolFontFaceName( const wchar_t* s)
{
bool rc = false;
- (void) s; // no op to supress warning
+ pcl::utils::ignore(s);
#if defined(ON_OS_WINDOWS_GDI)
if( s && s[0])
{
#include "pcl/surface/3rdparty/opennurbs/opennurbs.h"
+#include <pcl/common/utils.h> // pcl::utils::ignore
+
+
/*
If the speed of ON_qsort() functions on arrays that
are nearly sorted is as good as heap sort, then define
void
ON_qsort( void *base, std::size_t nel, std::size_t width, int (*compar)(void*,const void *, const void *),void* context)
{
- // no-op to suppress warning on all compilers
- (void) base; (void) nel; (void) width; (void) compar; (void) context;
+ pcl::utils::ignore(base, nel, width, compar, context);
#if defined(ON__HAVE_RELIABLE_SYSTEM_CONTEXT_QSORT)
// The call here must be a thread safe system qsort
// that is faster than the alternative code in this function.
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#include <pcl/surface/concave_hull.h>
#include <pcl/surface/impl/concave_hull.hpp>
// Instantiations of specific point types
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#include <pcl/surface/convex_hull.h>
#include <pcl/surface/impl/convex_hull.hpp>
// Instantiations of specific point types
{
for (int prev = n - 1, cur = 0; cur < n; prev = cur++)
{
- prev_p = points_->points[vertices[prev]].getVector3fMap();
- cur_p = points_->points[vertices[cur]].getVector3fMap();
+ prev_p = (*points_)[vertices[prev]].getVector3fMap();
+ cur_p = (*points_)[vertices[cur]].getVector3fMap();
total += prev_p.cross( cur_p );
}
//unit_normal is unit normal vector of plane defined by the first three points
- prev_p = points_->points[vertices[1]].getVector3fMap() - points_->points[vertices[0]].getVector3fMap();
- cur_p = points_->points[vertices[2]].getVector3fMap() - points_->points[vertices[0]].getVector3fMap();
+ prev_p = (*points_)[vertices[1]].getVector3fMap() - (*points_)[vertices[0]].getVector3fMap();
+ cur_p = (*points_)[vertices[2]].getVector3fMap() - (*points_)[vertices[0]].getVector3fMap();
unit_normal = (prev_p.cross(cur_p)).normalized();
area = total.dot( unit_normal );
pcl::EarClipping::isEar (int u, int v, int w, const std::vector<std::uint32_t>& vertices)
{
Eigen::Vector3f p_u, p_v, p_w;
- p_u = points_->points[vertices[u]].getVector3fMap();
- p_v = points_->points[vertices[v]].getVector3fMap();
- p_w = points_->points[vertices[w]].getVector3fMap();
+ p_u = (*points_)[vertices[u]].getVector3fMap();
+ p_v = (*points_)[vertices[v]].getVector3fMap();
+ p_w = (*points_)[vertices[w]].getVector3fMap();
const float eps = 1e-15f;
Eigen::Vector3f p_uv, p_uw;
{
if ((k == u) || (k == v) || (k == w))
continue;
- p = points_->points[vertices[k]].getVector3fMap();
+ p = (*points_)[vertices[k]].getVector3fMap();
if (isInsideTriangle (p_u, p_v, p_w, p))
return (false);
*
*/
-#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
#include <pcl/surface/grid_projection.h>
#include <pcl/surface/impl/grid_projection.hpp>
#include <pcl/point_types.h>
#include <pcl/surface/marching_cubes_rbf.h>
#include <pcl/surface/impl/marching_cubes_rbf.hpp>
-#include <pcl/surface/impl/marching_cubes.hpp>
// Instantiations of specific point types
PCL_INSTANTIATE(MarchingCubesRBF, (pcl::PointNormal)(pcl::PointXYZRGBNormal)(pcl::PointXYZINormal))
#include <pcl/surface/on_nurbs/nurbs_solve.h>
-using namespace std;
using namespace pcl;
using namespace on_nurbs;
#include <pcl/surface/on_nurbs/nurbs_solve.h>
-using namespace std;
using namespace pcl;
using namespace on_nurbs;
*
*/
-#include <pcl/surface/impl/marching_cubes.hpp>
#include <pcl/surface/impl/organized_fast_mesh.hpp>
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
-#include <pcl/surface/poisson.h>
#include <pcl/surface/impl/poisson.hpp>
// Instantiations of specific point types
*/
#include <pcl/surface/simplification_remove_unused_vertices.h>
+#include <pcl/PolygonMesh.h>
#include <cstring>
#include <vector>
#include <iostream>
-#include <cstdio>
void
pcl::surface::SimplificationRemoveUnusedVertices::simplify(const pcl::PolygonMesh& input, pcl::PolygonMesh& output, std::vector<int>& indices)
output.cloud.point_step = input.cloud.point_step;
output.cloud.is_bigendian = input.cloud.is_bigendian;
output.cloud.height = 1; // cloud is no longer organized
- output.cloud.width = static_cast<int> (indices.size ());
+ output.cloud.width = indices.size ();
output.cloud.row_step = output.cloud.point_step * output.cloud.width;
output.cloud.data.resize (output.cloud.width * output.cloud.height * output.cloud.point_step);
output.cloud.is_dense = false;
#include <pcl/point_types.h>
#include <pcl/impl/instantiate.hpp>
-#include <pcl/surface/surfel_smoothing.h>
#include <pcl/surface/impl/surfel_smoothing.hpp>
PCL_INSTANTIATE_PRODUCT(SurfelSmoothing, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES))
#include <pcl/surface/vtk_smoothing/vtk_mesh_quadric_decimation.h>
#include <pcl/surface/vtk_smoothing/vtk_utils.h>
-#include <vtkVersion.h>
#include <vtkQuadricDecimation.h>
//////////////////////////////////////////////////////////////////////////////////////////////
#include <pcl/surface/vtk_smoothing/vtk_utils.h>
+#include <pcl/PolygonMesh.h>
#include <pcl/conversions.h>
-#include <pcl/common/common.h>
+#include <pcl/point_types.h> // for PointXYZ, PointXYZRGB, RGB
#include <vtkVersion.h>
#include <vtkCellArray.h>
#include <vtkTriangleFilter.h>
for (vtkIdType i = 0; i < mesh_points->GetNumberOfPoints (); ++i)
{
mesh_points->GetPoint (i, &point_xyz[0]);
- cloud_temp->points[i].x = static_cast<float> (point_xyz[0]);
- cloud_temp->points[i].y = static_cast<float> (point_xyz[1]);
- cloud_temp->points[i].z = static_cast<float> (point_xyz[2]);
+ (*cloud_temp)[i].x = static_cast<float> (point_xyz[0]);
+ (*cloud_temp)[i].y = static_cast<float> (point_xyz[1]);
+ (*cloud_temp)[i].z = static_cast<float> (point_xyz[2]);
poly_colors->GetTupleValue (i, &point_color[0]);
- cloud_temp->points[i].r = point_color[0];
- cloud_temp->points[i].g = point_color[1];
- cloud_temp->points[i].b = point_color[2];
+ (*cloud_temp)[i].r = point_color[0];
+ (*cloud_temp)[i].g = point_color[1];
+ (*cloud_temp)[i].b = point_color[2];
}
- cloud_temp->width = static_cast<std::uint32_t> (cloud_temp->points.size ());
+ cloud_temp->width = cloud_temp->size ();
cloud_temp->height = 1;
cloud_temp->is_dense = true;
for (vtkIdType i = 0; i < mesh_points->GetNumberOfPoints (); ++i)
{
mesh_points->GetPoint (i, &point_xyz[0]);
- cloud_temp->points[i].x = static_cast<float> (point_xyz[0]);
- cloud_temp->points[i].y = static_cast<float> (point_xyz[1]);
- cloud_temp->points[i].z = static_cast<float> (point_xyz[2]);
+ (*cloud_temp)[i].x = static_cast<float> (point_xyz[0]);
+ (*cloud_temp)[i].y = static_cast<float> (point_xyz[1]);
+ (*cloud_temp)[i].z = static_cast<float> (point_xyz[2]);
}
- cloud_temp->width = static_cast<std::uint32_t> (cloud_temp->points.size ());
+ cloud_temp->width = cloud_temp->size ();
cloud_temp->height = 1;
cloud_temp->is_dense = true;
enable_testing()
+# VS needs -C config to run correct
if(MSVC)
- # VS needs -C config to run correct
- add_custom_target(tests "${CMAKE_CTEST_COMMAND}" -C $<$<CONFIG:Debug>:Debug>$<$<CONFIG:Release>:Release> -V -T Test VERBATIM)
-else()
- add_custom_target(tests "${CMAKE_CTEST_COMMAND}" -V -T Test VERBATIM)
+ set(PCL_CTEST_ARGUMENTS ${PCL_CTEST_ARGUMENTS} -C $<$<CONFIG:Debug>:Debug>$<$<CONFIG:Release>:Release>)
endif()
+# Enables you to disable GPU tests. Used on CI as it has no access to GPU hardware
+if(PCL_DISABLE_GPU_TESTS)
+ set(PCL_CTEST_ARGUMENTS ${PCL_CTEST_ARGUMENTS} -E gpu)
+endif()
+
+add_custom_target(tests "${CMAKE_CTEST_COMMAND}" ${PCL_CTEST_ARGUMENTS} -V -T Test VERBATIM)
+
set_target_properties(tests PROPERTIES FOLDER "Tests")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
add_subdirectory(features)
add_subdirectory(filters)
add_subdirectory(geometry)
+add_subdirectory(gpu)
add_subdirectory(io)
add_subdirectory(kdtree)
add_subdirectory(keypoints)
PCL_ADD_TEST(common_transforms test_transforms FILES test_transforms.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_int test_plane_intersection FILES test_plane_intersection.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_pca test_pca FILES test_pca.cpp LINK_WITH pcl_gtest pcl_common)
-#PCL_ADD_TEST(common_spring test_spring FILES test_spring.cpp LINK_WITH pcl_gtest pcl_common)
+PCL_ADD_TEST(common_spring test_spring FILES test_spring.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_gaussian test_gaussian FILES test_gaussian.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_operators test_operators FILES test_operators.cpp LINK_WITH pcl_gtest pcl_common)
-#PCL_ADD_TEST(common_convolution test_convolution FILES test_convolution.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_eigen test_eigen FILES test_eigen.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_intensity test_intensity FILES test_intensity.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_generator test_generator FILES test_generator.cpp LINK_WITH pcl_gtest pcl_common)
*/
#include <pcl/test/gtest.h>
-#include <iostream>
#include <pcl/range_image/bearing_angle_image.h>
pcl::BearingAngleImage bearing_angle_image;
/////////////////////////////////////////////////////////////////////
TEST (BearingAngleImageTest, GenerateBAImage)
{
- point_cloud.points[0] = pcl::PointXYZ (3.0, 1.5, -2.0);
- point_cloud.points[1] = pcl::PointXYZ (1.0, 3.0, 2.0);
- point_cloud.points[2] = pcl::PointXYZ (2.0, 3.0, 2.0);
+ point_cloud[0] = pcl::PointXYZ (3.0, 1.5, -2.0);
+ point_cloud[1] = pcl::PointXYZ (1.0, 3.0, 2.0);
+ point_cloud[2] = pcl::PointXYZ (2.0, 3.0, 2.0);
- point_cloud.points[3] = pcl::PointXYZ (2.0, 3.0, 1.0);
- point_cloud.points[4] = pcl::PointXYZ (4.0, 2.0, 2.0);
- point_cloud.points[5] = pcl::PointXYZ (-1.5, 3.0, 1.0);
+ point_cloud[3] = pcl::PointXYZ (2.0, 3.0, 1.0);
+ point_cloud[4] = pcl::PointXYZ (4.0, 2.0, 2.0);
+ point_cloud[5] = pcl::PointXYZ (-1.5, 3.0, 1.0);
bearing_angle_image.generateBAImage (point_cloud);
std::uint8_t grays[6];
for (int i = 0; i < 3 * 2; ++i)
{
- grays[i] = (bearing_angle_image.points[i].rgba >> 8) & 0xff;
+ grays[i] = (bearing_angle_image[i].rgba >> 8) & 0xff;
}
EXPECT_EQ (0, grays[0]);
*/
#include <pcl/test/gtest.h>
-#include <pcl/common/common.h>
-#include <pcl/common/distances.h>
-#include <pcl/common/intersections.h>
-#include <pcl/common/io.h>
#include <pcl/common/eigen.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
TEST (PCL, compute3DCentroidFloat)
{
pcl::PointIndices pindices;
- std::vector<int> indices;
+ Indices indices;
PointXYZ point;
PointCloud<PointXYZ> cloud;
Eigen::Vector4f centroid;
TEST (PCL, compute3DCentroidDouble)
{
pcl::PointIndices pindices;
- std::vector<int> indices;
+ Indices indices;
PointXYZ point;
PointCloud<PointXYZ> cloud;
Eigen::Vector4d centroid;
TEST (PCL, compute3DCentroidCloudIterator)
{
pcl::PointIndices pindices;
- std::vector<int> indices;
+ Indices indices;
PointXYZ point;
PointCloud<PointXYZ> cloud;
Eigen::Vector4f centroid_f;
{
PointCloud<PointXYZ> cloud;
PointXYZ point;
- std::vector <int> indices;
+ Indices indices;
Eigen::Vector4f centroid;
Eigen::Matrix3f covariance_matrix;
{
PointCloud<PointXYZ> cloud;
PointXYZ point;
- std::vector <int> indices;
+ Indices indices;
Eigen::Vector4f centroid;
Eigen::Matrix3f covariance_matrix;
{
PointCloud<PointXYZ> cloud;
PointXYZ point;
- std::vector <int> indices;
+ Indices indices;
Eigen::Matrix3f covariance_matrix;
// test empty cloud which is dense
{
PointCloud<PointXYZ> cloud;
PointXYZ point;
- std::vector <int> indices;
+ Indices indices;
Eigen::Matrix3f covariance_matrix;
Eigen::Vector4f centroid;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, computeCentroid)
{
- std::vector<int> indices;
+ Indices indices;
PointXYZI point;
PointCloud<PointXYZI> cloud;
PointXYZINormal centroid;
EXPECT_XYZ_NEAR (cloud_demean[0], PointXYZ (0.034503, 0.010837, 0.013447), 1e-4);
EXPECT_XYZ_NEAR (cloud_demean[cloud_demean.size () - 1], PointXYZ (-0.048849, 0.072507, -0.071702), 1e-4);
- std::vector<int> indices (cloud.size ());
+ Indices indices (cloud.size ());
for (int i = 0; i < static_cast<int> (indices.size ()); ++i) { indices[i] = i; }
// Check standard demean w/ indices
#include <pcl/common/eigen.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
-
-#include <pcl/common/centroid.h>
+#include <pcl/common/point_tests.h> // for isFinite
using namespace pcl;
{
PointXYZ p;
p.x = std::numeric_limits<float>::quiet_NaN ();
- EXPECT_EQ (isFinite (p), false);
+ EXPECT_FALSE (isFinite (p));
Normal n;
n.normal_x = std::numeric_limits<float>::quiet_NaN ();
- EXPECT_EQ (isFinite (n), false);
+ EXPECT_FALSE (isFinite (n));
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cloud.width = 640;
cloud.height = 480;
- EXPECT_EQ (cloud.isOrganized (), true);
+ EXPECT_TRUE (cloud.isOrganized ());
cloud.height = 1;
- EXPECT_EQ (cloud.isOrganized (), false);
+ EXPECT_FALSE (cloud.isOrganized ());
cloud.width = 10;
for (std::uint32_t i = 0; i < cloud.width*cloud.height; ++i)
cloud.height = 480;
cloud.insert (cloud.end (), PointXYZ (1, 1, 1));
- EXPECT_EQ (cloud.isOrganized (), false);
+ EXPECT_FALSE (cloud.isOrganized ());
EXPECT_EQ (cloud.width, 1);
cloud.insert (cloud.end (), 5, PointXYZ (1, 1, 1));
- EXPECT_EQ (cloud.isOrganized (), false);
+ EXPECT_FALSE (cloud.isOrganized ());
EXPECT_EQ (cloud.width, 6);
cloud.erase (cloud.end () - 1);
- EXPECT_EQ (cloud.isOrganized (), false);
+ EXPECT_FALSE (cloud.isOrganized ());
EXPECT_EQ (cloud.width, 5);
cloud.erase (cloud.begin (), cloud.end ());
- EXPECT_EQ (cloud.isOrganized (), false);
+ EXPECT_FALSE (cloud.isOrganized ());
EXPECT_EQ (cloud.width, 0);
cloud.emplace (cloud.end (), 1, 1, 1);
- EXPECT_EQ (cloud.isOrganized (), false);
+ EXPECT_FALSE (cloud.isOrganized ());
EXPECT_EQ (cloud.width, 1);
auto& new_point = cloud.emplace_back (1, 1, 1);
- EXPECT_EQ (cloud.isOrganized (), false);
+ EXPECT_FALSE (cloud.isOrganized ());
EXPECT_EQ (cloud.width, 2);
EXPECT_EQ (&new_point, &cloud.back ());
}
yline[0] = 0.493479f; yline[1] = 0.169246f; yline[2] = 1.22677f; yline[3] = 0.5992f; yline[4] = 0.0505085f; yline[5] = 0.405749f;
Eigen::Vector4f pt;
- EXPECT_EQ ((pcl::lineWithLineIntersection (zline, yline, pt)), true);
+ EXPECT_TRUE (pcl::lineWithLineIntersection (zline, yline, pt));
EXPECT_NEAR (pt[0], 0.574544, 1e-3);
EXPECT_NEAR (pt[1], 0.175526, 1e-3);
EXPECT_NEAR (pt[2], 1.27636, 1e-3);
zline << 0.545203f, -0.514419f, 1.31967f, 0.0243372f, 0.597946f, -0.0413579f;
yline << 0.492706f, 0.164196f, 1.23192f, 0.598704f, 0.0442014f, 0.411328f;
- EXPECT_EQ ((pcl::lineWithLineIntersection (zline, yline, pt)), false);
+ EXPECT_FALSE (pcl::lineWithLineIntersection (zline, yline, pt));
//intersection: [ 3.06416e+08 15.2237 3.06416e+08 4.04468e-34 ]
}
rgb_val = std::numeric_limits<float>::quiet_NaN ();
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "x", is_x, x_val));
- EXPECT_EQ (is_x, true);
+ EXPECT_TRUE (is_x);
EXPECT_EQ (x_val, 1.0);
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "y", is_y, y_val));
- EXPECT_EQ (is_y, true);
+ EXPECT_TRUE (is_y);
EXPECT_EQ (y_val, 2.0);
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "z", is_z, z_val));
- EXPECT_EQ (is_z, true);
+ EXPECT_TRUE (is_z);
EXPECT_EQ (z_val, 3.0);
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "rgb", is_rgb, rgb_val));
- EXPECT_EQ (is_rgb, true);
+ EXPECT_TRUE (is_rgb);
std::uint32_t rgb;
std::memcpy (&rgb, &rgb_val, sizeof(rgb_val));
EXPECT_EQ (rgb, 0xff7f40fe); // alpha is 255
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "normal_x", is_normal_x, normal_x_val));
- EXPECT_EQ (is_normal_x, true);
+ EXPECT_TRUE (is_normal_x);
EXPECT_EQ (normal_x_val, 1.0);
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "normal_y", is_normal_y, normal_y_val));
- EXPECT_EQ (is_normal_y, true);
+ EXPECT_TRUE (is_normal_y);
EXPECT_EQ (normal_y_val, 0.0);
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "normal_z", is_normal_z, normal_z_val));
- EXPECT_EQ (is_normal_z, true);
+ EXPECT_TRUE (is_normal_z);
EXPECT_EQ (normal_z_val, 0.0);
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "x", x_val));
EXPECT_EQ (xx_val, -1.0);
bool is_xx = true;
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "xx", is_xx, xx_val));
- EXPECT_EQ (is_xx, false);
+ EXPECT_FALSE (is_xx);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
test::EXPECT_EQ_VECTORS (max_exp_pt, max_pt);
// Specifying indices
- std::vector<int> idx (2);
+ Indices idx (2);
idx[0] = 1; idx[1] = 2;
max_exp_pt = cloud[2].getVector4fMap ();
getMaxDistance (cloud, idx, pivot_pt, max_pt);
#include <pcl/common/eigen.h>
using namespace pcl;
-using namespace std;
namespace
{
#include <pcl/common/io.h>
using namespace pcl;
-using namespace std;
using CloudXYZRGBA = PointCloud<PointXYZRGBA>;
using CloudXYZRGB = PointCloud<PointXYZRGB>;
EXPECT_EQ (cloud_xyz_rgba[i].rgba, cloud_xyz_rgb_normal[i].rgba);
}
- std::vector<int> indices;
+ Indices indices;
indices.push_back (0); indices.push_back (1);
pcl::copyPointCloud (cloud_xyz_rgba, indices, cloud_xyz_rgb_normal);
ASSERT_EQ (int (cloud_xyz_rgb_normal.size ()), 2);
EXPECT_EQ (cloud_xyz_rgba[i].rgba, cloud_xyz_rgb_normal[i].rgba);
}
- std::vector<int, Eigen::aligned_allocator<int> > indices_aligned;
+ IndicesAllocator< Eigen::aligned_allocator<int> > indices_aligned;
indices_aligned.push_back (1); indices_aligned.push_back (2); indices_aligned.push_back (3);
pcl::copyPointCloud (cloud_xyz_rgba, indices_aligned, cloud_xyz_rgb_normal);
ASSERT_EQ (int (cloud_xyz_rgb_normal.size ()), 3);
CloudXYZRGB cloud_xyz_rgb;
CloudXYZRGBA cloud_xyz_rgba (5, 1, pt_xyz_rgba);
- std::vector<int> indices;
+ Indices indices;
indices.push_back (2);
indices.push_back (3);
#include <pcl/pcl_tests.h>
#include <pcl/console/parse.h>
-using namespace std;
-
///////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, parse_double)
{
#include <pcl/test/gtest.h>
#include <pcl/pcl_tests.h>
#include <pcl/point_types.h>
-#include <pcl/point_cloud.h>
#include <pcl/point_types_conversion.h>
-#include <iomanip>
-#include <iostream>
using namespace pcl;
using namespace pcl::test;
pcl::PointCloud<pcl::PointXYZRGBNormal> p_xyz_normal, p_xyz_normal_trans;
// Indices, every second point
- std::vector<int> indices;
+ Indices indices;
PCL_MAKE_ALIGNED_OPERATOR_NEW;
};
affine = transformation;
- std::vector<int> indices (1); indices[0] = 0;
+ Indices indices (1); indices[0] = 0;
pcl::transformPointCloud (c, indices, ct, affine);
EXPECT_NEAR (pt.x, ct[0].x, 1e-4);
// operators added by Eigen for C++14 or lower standards
/** \todo Remove for C++17 (or future standards)
*/
+ #ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-local-typedef"
+ #endif
PCL_MAKE_ALIGNED_OPERATOR_NEW
+ #ifdef __clang__
#pragma clang diagnostic pop
+ #endif
};
struct Bar
*/
/** \author Bastian Steder */
-#include <pcl/pcl_macros.h>
#include <iostream>
-#include <sstream>
#include <pcl/test/gtest.h>
#include <pcl/common/vector_average.h>
using namespace pcl;
TEST (PointCloud, sq_brackets_wrapper)
{
for (std::uint32_t i = 0; i < size; ++i)
- EXPECT_EQ_VECTORS (cloud.points[i].getVector3fMap (),
+ EXPECT_EQ_VECTORS (cloud[i].getVector3fMap (),
cloud[i].getVector3fMap ());
}
FILES test_gasd_estimation.cpp
LINK_WITH pcl_gtest pcl_io pcl_features
ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_organized_edge_detection test_organized_edge_detection
+ FILES test_organized_edge_detection.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io)
if(BUILD_keypoints)
PCL_ADD_TEST(feature_brisk test_brisk
FILES test_brisk.cpp
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
indices[i] = static_cast<int> (i);
using namespace pcl;
using namespace pcl::test;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
indices[i] = static_cast<int> (i);
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
// isBoundaryPoint (indices)
bool pt = false;
pt = b.isBoundaryPoint (cloud, 0, indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, false);
+ EXPECT_FALSE (pt);
pt = b.isBoundaryPoint (cloud, static_cast<int> (indices.size ()) / 3, indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, false);
+ EXPECT_FALSE (pt);
pt = b.isBoundaryPoint (cloud, static_cast<int> (indices.size ()) / 2, indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, false);
+ EXPECT_FALSE (pt);
pt = b.isBoundaryPoint (cloud, static_cast<int> (indices.size ()) - 1, indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, true);
+ EXPECT_TRUE (pt);
// isBoundaryPoint (points)
- pt = b.isBoundaryPoint (cloud, cloud.points[0], indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, false);
- pt = b.isBoundaryPoint (cloud, cloud.points[indices.size () / 3], indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, false);
- pt = b.isBoundaryPoint (cloud, cloud.points[indices.size () / 2], indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, false);
- pt = b.isBoundaryPoint (cloud, cloud.points[indices.size () - 1], indices, u, v, float (M_PI) / 2.0);
- EXPECT_EQ (pt, true);
+ pt = b.isBoundaryPoint (cloud, cloud[0], indices, u, v, float (M_PI) / 2.0);
+ EXPECT_FALSE (pt);
+ pt = b.isBoundaryPoint (cloud, cloud[indices.size () / 3], indices, u, v, float (M_PI) / 2.0);
+ EXPECT_FALSE (pt);
+ pt = b.isBoundaryPoint (cloud, cloud[indices.size () / 2], indices, u, v, float (M_PI) / 2.0);
+ EXPECT_FALSE (pt);
+ pt = b.isBoundaryPoint (cloud, cloud[indices.size () - 1], indices, u, v, float (M_PI) / 2.0);
+ EXPECT_TRUE (pt);
// Object
PointCloud<Boundary>::Ptr bps (new PointCloud<Boundary> ());
// estimate
b.compute (*bps);
- EXPECT_EQ (bps->points.size (), indices.size ());
-
- pt = bps->points[0].boundary_point;
- EXPECT_EQ (pt, false);
- pt = bps->points[indices.size () / 3].boundary_point;
- EXPECT_EQ (pt, false);
- pt = bps->points[indices.size () / 2].boundary_point;
- EXPECT_EQ (pt, false);
- pt = bps->points[indices.size () - 1].boundary_point;
- EXPECT_EQ (pt, true);
+ EXPECT_EQ (bps->size (), indices.size ());
+
+ pt = (*bps)[0].boundary_point;
+ EXPECT_FALSE (pt);
+ pt = (*bps)[indices.size () / 3].boundary_point;
+ EXPECT_FALSE (pt);
+ pt = (*bps)[indices.size () / 2].boundary_point;
+ EXPECT_FALSE (pt);
+ pt = (*bps)[indices.size () - 1].boundary_point;
+ EXPECT_TRUE (pt);
}
/* ---[ */
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
indices[i] = static_cast<int> (i);
#include <pcl/io/pcd_io.h>
#include <pcl/keypoints/brisk_2d.h>
#include <pcl/features/brisk_2d.h>
-#include <set>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using PointT = PointXYZRGBA;
using KeyPointT = PointWithScale;
EXPECT_EQ (feature_cloud->size (), cloud->size () * cloud->size ());
// Now check for a few values in the feature cloud
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f1));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f2));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f3));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f4));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f5));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f6));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f7));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f8));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f9));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f10));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].alpha_m));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f1));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f2));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f3));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f4));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f5));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f6));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f7));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f8));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f9));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f10));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].alpha_m));
- EXPECT_NEAR (feature_cloud->points[2572].f1, 0.0568356, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f2, -0.1988939, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f3, 0.7854938, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f4, 0.0533117, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f5, 0.1875000, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f6, 0.0733944, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f7, 0.4274509, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f8, 0.2380952, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f9, 0.0619469, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f10, 0.4431372, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].alpha_m, -1.847514, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f1, 0.0568356, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f2, -0.1988939, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f3, 0.7854938, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f4, 0.0533117, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f5, 0.1875000, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f6, 0.0733944, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f7, 0.4274509, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f8, 0.2380952, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f9, 0.0619469, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].f10, 0.4431372, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[2572].alpha_m, -1.847514, 1e-4);
}
/* ---[ */
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
// estimate
pc.compute (*pcs);
- EXPECT_EQ (pcs->points.size (), indices.size ());
+ EXPECT_EQ (pcs->size (), indices.size ());
// Adjust for small numerical inconsitencies (due to nn_indices not being sorted)
- EXPECT_NEAR (std::abs (pcs->points[0].principal_curvature[0]), 0.98509, 1e-4);
- EXPECT_NEAR (std::abs (pcs->points[0].principal_curvature[1]), 0.10713, 1e-4);
- EXPECT_NEAR (std::abs (pcs->points[0].principal_curvature[2]), 0.13462, 1e-4);
- EXPECT_NEAR (std::abs (pcs->points[0].pc1), 0.23997458815574646, 1e-4);
- EXPECT_NEAR (std::abs (pcs->points[0].pc2), 0.19400238990783691, 1e-4);
-
- EXPECT_NEAR (pcs->points[2].principal_curvature[0], 0.98079, 1e-4);
- EXPECT_NEAR (pcs->points[2].principal_curvature[1], -0.04019, 1e-4);
- EXPECT_NEAR (pcs->points[2].principal_curvature[2], 0.19086, 1e-4);
- EXPECT_NEAR (pcs->points[2].pc1, 0.27207502722740173, 1e-4);
- EXPECT_NEAR (pcs->points[2].pc2, 0.1946497857570648, 1e-4);
-
- EXPECT_NEAR (pcs->points[indices.size () - 3].principal_curvature[0], 0.86725, 1e-4);
- EXPECT_NEAR (pcs->points[indices.size () - 3].principal_curvature[1], -0.37599, 1e-4);
- EXPECT_NEAR (pcs->points[indices.size () - 3].principal_curvature[2], 0.32636, 1e-4);
- EXPECT_NEAR (pcs->points[indices.size () - 3].pc1, 0.2590007483959198, 1e-4);
- EXPECT_NEAR (pcs->points[indices.size () - 3].pc2, 0.17906941473484039, 1e-4);
-
- EXPECT_NEAR (pcs->points[indices.size () - 1].principal_curvature[0], 0.86725, 1e-4);
- EXPECT_NEAR (pcs->points[indices.size () - 1].principal_curvature[1], -0.375851, 1e-3);
- EXPECT_NEAR (pcs->points[indices.size () - 1].principal_curvature[2], 0.32636, 1e-4);
- EXPECT_NEAR (pcs->points[indices.size () - 1].pc1, 0.25900065898895264, 1e-4);
- EXPECT_NEAR (pcs->points[indices.size () - 1].pc2, 0.17906941473484039, 1e-4);
+ EXPECT_NEAR (std::abs ((*pcs)[0].principal_curvature[0]), 0.98509, 1e-4);
+ EXPECT_NEAR (std::abs ((*pcs)[0].principal_curvature[1]), 0.10713, 1e-4);
+ EXPECT_NEAR (std::abs ((*pcs)[0].principal_curvature[2]), 0.13462, 1e-4);
+ EXPECT_NEAR (std::abs ((*pcs)[0].pc1), 0.23997458815574646, 1e-4);
+ EXPECT_NEAR (std::abs ((*pcs)[0].pc2), 0.19400238990783691, 1e-4);
+
+ EXPECT_NEAR ((*pcs)[2].principal_curvature[0], 0.98079, 1e-4);
+ EXPECT_NEAR ((*pcs)[2].principal_curvature[1], -0.04019, 1e-4);
+ EXPECT_NEAR ((*pcs)[2].principal_curvature[2], 0.19086, 1e-4);
+ EXPECT_NEAR ((*pcs)[2].pc1, 0.27207502722740173, 1e-4);
+ EXPECT_NEAR ((*pcs)[2].pc2, 0.1946497857570648, 1e-4);
+
+ EXPECT_NEAR ((*pcs)[indices.size () - 3].principal_curvature[0], 0.86725, 1e-4);
+ EXPECT_NEAR ((*pcs)[indices.size () - 3].principal_curvature[1], -0.37599, 1e-4);
+ EXPECT_NEAR ((*pcs)[indices.size () - 3].principal_curvature[2], 0.32636, 1e-4);
+ EXPECT_NEAR ((*pcs)[indices.size () - 3].pc1, 0.2590007483959198, 1e-4);
+ EXPECT_NEAR ((*pcs)[indices.size () - 3].pc2, 0.17906941473484039, 1e-4);
+
+ EXPECT_NEAR ((*pcs)[indices.size () - 1].principal_curvature[0], 0.86725, 1e-4);
+ EXPECT_NEAR ((*pcs)[indices.size () - 1].principal_curvature[1], -0.375851, 1e-3);
+ EXPECT_NEAR ((*pcs)[indices.size () - 1].principal_curvature[2], 0.32636, 1e-4);
+ EXPECT_NEAR ((*pcs)[indices.size () - 1].pc1, 0.25900065898895264, 1e-4);
+ EXPECT_NEAR ((*pcs)[indices.size () - 1].pc2, 0.17906941473484039, 1e-4);
}
/* ---[ */
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
indices[i] = static_cast<int> (i);
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
using CloudPtr = PointCloud<PointXYZ>::Ptr;
// estimate
cvfh.compute (*vfhs);
- EXPECT_EQ (static_cast<int>(vfhs->points.size ()), 1);
+ EXPECT_EQ (static_cast<int>(vfhs->size ()), 1);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// estimate
cvfh.compute (*vfhs);
- EXPECT_EQ (static_cast<int>(vfhs->points.size ()), 2);
+ EXPECT_EQ (static_cast<int>(vfhs->size ()), 2);
}
/* ---[ */
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
{
indices[i] = static_cast<int>(i);
sampled_cloud.reset (new pcl::PointCloud<pcl::PointXYZ> ());
std::vector<int> sampled_indices;
- for (float sa = 0.0f; sa < (float)cloud->points.size (); sa += sampling_incr)
+ for (float sa = 0.0f; sa < (float)cloud->size (); sa += sampling_incr)
sampled_indices.push_back (static_cast<int> (sa));
copyPointCloud (*cloud, sampled_indices, *sampled_cloud);
void
createColorCloud (pcl::PointCloud<pcl::PointXYZRGBA> &colorCloud)
{
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
pcl::PointXYZRGBA p;
- p.getVector3fMap () = cloud->points[i].getVector3fMap ();
+ p.getVector3fMap () = (*cloud)[i].getVector3fMap ();
p.rgba = ( (i % 255) << 16) + ( ( (255 - i) % 255) << 8) + ( (i * 37) % 255);
colorCloud.push_back (p);
2.0202, 0.252525, 0, 0, 0, 0, 0, 0, 2.52525, 0, 0, 0, 0, 0, 0, 0, 0.50505, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
- EXPECT_EQ (descriptor.points.size (), 1);
- for (std::size_t i = 0; i < std::size_t (descriptor.points[0].descriptorSize ()); ++i)
+ EXPECT_EQ (descriptor.size (), 1);
+ for (std::size_t i = 0; i < std::size_t (descriptor[0].descriptorSize ()); ++i)
{
- EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ EXPECT_NEAR (descriptor[0].histogram[i], ref_values[i], 1e-5);
}
}
0.209958, 0, 0, 0, 0, 0, 0.0603114, 0.412731, 0.0294292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0};
- EXPECT_EQ (descriptor.points.size (), 1);
- for (std::size_t i = 0; i < std::size_t (descriptor.points[0].descriptorSize ()); ++i)
+ EXPECT_EQ (descriptor.size (), 1);
+ for (std::size_t i = 0; i < std::size_t (descriptor[0].descriptorSize ()); ++i)
{
- EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ EXPECT_NEAR (descriptor[0].histogram[i], ref_values[i], 1e-5);
}
}
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0};
- EXPECT_EQ (descriptor.points.size (), 1);
- for (std::size_t i = 0; i < std::size_t (descriptor.points[0].descriptorSize ()); ++i)
+ EXPECT_EQ (descriptor.size (), 1);
+ for (std::size_t i = 0; i < std::size_t (descriptor[0].descriptorSize ()); ++i)
{
- EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ EXPECT_NEAR (descriptor[0].histogram[i], ref_values[i], 1e-5);
}
}
0.0640188, 0.0967736, 0.0307571, 0.00109069, 7.53513e-005, 0.000807341, 0.007594, 0.00643415, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0};
- EXPECT_EQ (descriptor.points.size (), 1);
- for (std::size_t i = 0; i < std::size_t( descriptor.points[0].descriptorSize ()); ++i)
+ EXPECT_EQ (descriptor.size (), 1);
+ for (std::size_t i = 0; i < std::size_t( descriptor[0].descriptorSize ()); ++i)
{
- EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ EXPECT_NEAR (descriptor[0].histogram[i], ref_values[i], 1e-5);
}
}
using namespace pcl;
using namespace pcl::io;
-using namespace std;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, IntensityGradientEstimation)
cloud_xyzi.points.push_back (p);
}
}
- cloud_xyzi.width = static_cast<std::uint32_t> (cloud_xyzi.points.size ());
+ cloud_xyzi.width = cloud_xyzi.size ();
PointCloud<PointXYZI>::ConstPtr cloud_ptr = cloud_xyzi.makeShared ();
// Estimate surface normals
grad_est.compute (gradient);
// Compare to gradient estimates to actual values
- for (std::size_t i = 0; i < cloud_ptr->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_ptr->size (); ++i)
{
- const PointXYZI &p = cloud_ptr->points[i];
+ const PointXYZI &p = (*cloud_ptr)[i];
// A pointer to the estimated gradient values
- const float * g_est = gradient.points[i].gradient;
+ const float * g_est = gradient[i].gradient;
// Compute the surface normal analytically.
float nx = -0.2f * p.x;
using namespace pcl;
using namespace pcl::io;
-using namespace std;
search::KdTree<PointXYZ>::Ptr tree (new search::KdTree<PointXYZ> ());
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ> ());
n.setRadiusSearch (rad);
n.compute (*normals);
- EXPECT_NEAR (normals->points[103].normal_x, 0.694, 0.1);
- EXPECT_NEAR (normals->points[103].normal_y, -0.562, 0.1);
- EXPECT_NEAR (normals->points[103].normal_z, -0.448, 0.1);
+ EXPECT_NEAR ((*normals)[103].normal_x, 0.694, 0.1);
+ EXPECT_NEAR ((*normals)[103].normal_y, -0.562, 0.1);
+ EXPECT_NEAR ((*normals)[103].normal_z, -0.448, 0.1);
// GRSDEstimation
double rsd_radius = 0.03;
grsd.setRadiusSearch (rsd_radius);
grsd.compute (*grsd_desc);
- EXPECT_EQ (12, grsd_desc->points[0].histogram[2]);
- EXPECT_EQ (104, grsd_desc->points[0].histogram[4]);
- EXPECT_EQ (0, grsd_desc->points[0].histogram[6]);
- EXPECT_EQ (0, grsd_desc->points[0].histogram[8]);
- EXPECT_EQ (0, grsd_desc->points[0].histogram[10]);
- EXPECT_EQ (34, grsd_desc->points[0].histogram[12]);
- EXPECT_EQ (167, grsd_desc->points[0].histogram[14]);
- EXPECT_EQ (68, grsd_desc->points[0].histogram[16]);
- EXPECT_EQ (204, grsd_desc->points[0].histogram[18]);
- EXPECT_EQ (0, grsd_desc->points[0].histogram[20]);
+ EXPECT_EQ (12, (*grsd_desc)[0].histogram[2]);
+ EXPECT_EQ (104, (*grsd_desc)[0].histogram[4]);
+ EXPECT_EQ (0, (*grsd_desc)[0].histogram[6]);
+ EXPECT_EQ (0, (*grsd_desc)[0].histogram[8]);
+ EXPECT_EQ (0, (*grsd_desc)[0].histogram[10]);
+ EXPECT_EQ (34, (*grsd_desc)[0].histogram[12]);
+ EXPECT_EQ (167, (*grsd_desc)[0].histogram[14]);
+ EXPECT_EQ (68, (*grsd_desc)[0].histogram[16]);
+ EXPECT_EQ (204, (*grsd_desc)[0].histogram[18]);
+ EXPECT_EQ (0, (*grsd_desc)[0].histogram[20]);
}
#include <iostream>
using namespace pcl;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
PointCloud<PointXYZ> cloud;
PointCloud<Normal> output;
n.compute (output);
- EXPECT_EQ (output.points.size (), cloud.points.size ());
+ EXPECT_EQ (output.size (), cloud.size ());
EXPECT_EQ (output.width, cloud.width);
EXPECT_EQ (output.height, cloud.height);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (const auto& point: output)
{
- EXPECT_NEAR (std::abs (output.points[i].normal_x), 0, 1e-2);
- EXPECT_NEAR (std::abs (output.points[i].normal_y), 0, 1e-2);
- EXPECT_NEAR (std::abs (output.points[i].normal_z), 1.0, 1e-2);
+ EXPECT_NEAR (std::abs (point.normal_x), 0, 1e-2);
+ EXPECT_NEAR (std::abs (point.normal_y), 0, 1e-2);
+ EXPECT_NEAR (std::abs (point.normal_z), 1.0, 1e-2);
}
}
ne.setNormalEstimationMethod (ne.COVARIANCE_MATRIX);
ne.compute (output);
- EXPECT_EQ (output.points.size (), cloud.points.size ());
+ EXPECT_EQ (output.size (), cloud.size ());
EXPECT_EQ (output.width, cloud.width);
EXPECT_EQ (output.height, cloud.height);
ne.setNormalEstimationMethod (ne.AVERAGE_3D_GRADIENT);
ne.compute (output);
- EXPECT_EQ (output.points.size (), cloud.points.size ());
+ EXPECT_EQ (output.size (), cloud.size ());
EXPECT_EQ (output.width, cloud.width);
EXPECT_EQ (output.height, cloud.height);
ne.setNormalEstimationMethod (ne.AVERAGE_DEPTH_CHANGE);
ne.compute (output);
- EXPECT_EQ (output.points.size (), cloud.points.size ());
+ EXPECT_EQ (output.size (), cloud.size ());
EXPECT_EQ (output.width, cloud.width);
EXPECT_EQ (output.height, cloud.height);
ne.setNormalEstimationMethod (ne.SIMPLE_3D_GRADIENT);
ne.compute (output);
- EXPECT_EQ (output.points.size (), cloud.points.size ());
+ EXPECT_EQ (output.size (), cloud.size ());
EXPECT_EQ (output.width, cloud.width);
EXPECT_EQ (output.height, cloud.height);
ne.setNormalEstimationMethod (ne.SIMPLE_3D_GRADIENT);
ne.compute (output);
- EXPECT_EQ (output.points.size (), 0);
+ EXPECT_EQ (output.size (), 0);
EXPECT_EQ (output.width, 0);
EXPECT_EQ (output.height, 0);
}
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
// estimate
mi.compute (*moments);
- EXPECT_EQ (moments->points.size (), indices.size ());
+ EXPECT_EQ (moments->size (), indices.size ());
for (const auto &point : moments->points)
{
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (int i = 0; i < static_cast<int> (indices.size ()); ++i)
indices[i] = i;
*/
#include <iostream>
-#include <sstream>
#include <pcl/test/gtest.h>
#include <pcl/features/narf.h>
#include <pcl/common/eigen.h>
+#include <pcl/common/angles.h> // for deg2rad
using namespace pcl;
#include <pcl/test/gtest.h>
#include <pcl/point_cloud.h>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/features/normal_3d.h>
#include <pcl/features/normal_3d_omp.h>
#include <pcl/features/integral_image_normal.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
EXPECT_NEAR (curvature, 0.0693136, 1e-4);
// flipNormalTowardsViewpoint (Vector)
- flipNormalTowardsViewpoint (cloud.points[0], 0, 0, 0, plane_parameters);
+ flipNormalTowardsViewpoint (cloud[0], 0, 0, 0, plane_parameters);
EXPECT_NEAR (plane_parameters[0], -0.035592, 1e-4);
EXPECT_NEAR (plane_parameters[1], -0.369596, 1e-4);
EXPECT_NEAR (plane_parameters[2], -0.928511, 1e-4);
EXPECT_NEAR (plane_parameters[3], 0.0799743, 1e-4);
// flipNormalTowardsViewpoint
- flipNormalTowardsViewpoint (cloud.points[0], 0, 0, 0, nx, ny, nz);
+ flipNormalTowardsViewpoint (cloud[0], 0, 0, 0, nx, ny, nz);
EXPECT_NEAR (nx, -0.035592, 1e-4);
EXPECT_NEAR (ny, -0.369596, 1e-4);
EXPECT_NEAR (nz, -0.928511, 1e-4);
// estimate
n.compute (*normals);
- EXPECT_EQ (normals->points.size (), indices.size ());
+ EXPECT_EQ (normals->size (), indices.size ());
for (const auto &point : normals->points)
{
surfaceptr->points.resize (640 * 480);
surfaceptr->width = 640;
surfaceptr->height = 480;
- EXPECT_EQ (surfaceptr->points.size (), surfaceptr->width * surfaceptr->height);
+ EXPECT_EQ (surfaceptr->size (), surfaceptr->width * surfaceptr->height);
n.setSearchSurface (surfaceptr);
tree.reset ();
n.setSearchMethod (tree);
// estimate
n.compute (*normals);
- EXPECT_EQ (normals->points.size (), indices.size ());
+ EXPECT_EQ (normals->size (), indices.size ());
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
virtual int nearestKSearch (const PointT &point, int k, std::vector<int> &k_indices,
std::vector<float> &k_sqr_distances ) const
{
- (void)point;
+ pcl::utils::ignore(point);
EXPECT_GE (k_indices.size(), k);
EXPECT_GE (k_sqr_distances.size(), k);
virtual int radiusSearch (const PointT& point, double radius, std::vector<int>& k_indices,
std::vector<float>& k_sqr_distances, unsigned int max_nn = 0 ) const
{
- (void)point;
- (void)radius;
- (void)k_indices;
- (void)k_sqr_distances;
+ pcl::utils::ignore(point, radius, k_indices, k_sqr_distances);
return max_nn;
}
// estimate
n.compute (*normals);
- EXPECT_EQ (normals->points.size (), indices.size ());
+ EXPECT_EQ (normals->size (), indices.size ());
for (const auto &point : normals->points)
{
double y = ypos;
double x = xpos;
- cloudptr->points[idx++] = PointXYZ(float(x), float(y), float(z));
+ (*cloudptr)[idx++] = PointXYZ(float(x), float(y), float(z));
}
}
normalsVec.resize(normals->size());
for(std::size_t i = 0; i < normals->size(); ++i )
{
- normalsVec[i].x = normals->points[i].normal_x;
- normalsVec[i].y = normals->points[i].normal_y;
- normalsVec[i].z = normals->points[i].normal_z;
+ normalsVec[i].x = (*normals)[i].normal_x;
+ normalsVec[i].y = (*normals)[i].normal_y;
+ normalsVec[i].z = (*normals)[i].normal_z;
}
for (const auto &point : normals->points)
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (int i = 0; i < static_cast<int> (indices.size ()); ++i)
indices[i] = i;
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
+ *
+ * All rights reserved
+ */
+
+#include <pcl/features/organized_edge_detection.h>
+#include <pcl/test/gtest.h>
+#include <pcl/point_cloud.h>
+#include <pcl/point_types.h>
+
+namespace {
+class OrganizedPlaneDetectionTestFixture : public ::testing::Test {
+protected:
+ const int INNER_SQUARE_EDGE_LENGTH = 50;
+ const int OUTER_SQUARE_EDGE_LENGTH = INNER_SQUARE_EDGE_LENGTH * 2;
+ const float SYNTHETIC_CLOUD_BASE_DEPTH = 2.0;
+ const float SYNTHETIC_CLOUD_DEPTH_DISCONTINUITY = .02f;
+ const float SYNTHETIC_CLOUD_RESOLUTION = 0.01f;
+
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_;
+ std::set<pcl::index_t> outer_perimeter_;
+ std::set<pcl::index_t> inner_perimeter_;
+
+ void
+ SetUp() override
+ {
+ cloud_ = generateSyntheticEdgeDetectionCloud();
+ }
+
+private:
+ pcl::PointCloud<pcl::PointXYZ>::Ptr
+ generateSyntheticEdgeDetectionCloud()
+ {
+ auto organized_test_cloud = pcl::make_shared<pcl::PointCloud<pcl::PointXYZ>>(
+ OUTER_SQUARE_EDGE_LENGTH, OUTER_SQUARE_EDGE_LENGTH);
+
+ // Draw a smaller square in front of a larger square both centered on the
+ // view axis to generate synthetic occluding and occluded edges based on depth
+ // discontinuity between neighboring pixels. The base depth and resolution are
+ // arbitrary and useful for visualizing the cloud. The discontinuity of the
+ // generated cloud must be greater than the threshold set when running the
+ // organized edge detection algorithm.
+ const auto outer_square_ctr = OUTER_SQUARE_EDGE_LENGTH / 2;
+ const auto inner_square_ctr = INNER_SQUARE_EDGE_LENGTH / 2;
+ const auto left_col = outer_square_ctr - inner_square_ctr;
+ const auto right_col = outer_square_ctr + inner_square_ctr;
+ const auto top_row = outer_square_ctr - inner_square_ctr;
+ const auto bottom_row = outer_square_ctr + inner_square_ctr;
+
+ for (auto row = 0; row < OUTER_SQUARE_EDGE_LENGTH; ++row) {
+ for (auto col = 0; col < OUTER_SQUARE_EDGE_LENGTH; ++col) {
+ const float x = col - outer_square_ctr;
+ const float y = row - inner_square_ctr;
+
+ auto depth = SYNTHETIC_CLOUD_BASE_DEPTH;
+
+ // If pixels correspond to smaller box, then set depth and color appropriately
+ if (col >= left_col && col < right_col) {
+ if (row >= top_row && row < bottom_row) {
+
+ depth = SYNTHETIC_CLOUD_BASE_DEPTH - SYNTHETIC_CLOUD_DEPTH_DISCONTINUITY;
+
+ // Record indices of the outer perimeter points of small inner square that
+ // correspond to the occluding edge points
+ if ((col == left_col || col == right_col - 1) ||
+ (row == top_row || row == bottom_row - 1)) {
+ outer_perimeter_.insert(row * organized_test_cloud->width + col);
+ }
+ }
+ }
+
+ // Record indices of the inner perimeter points of large outer square that
+ // correspond to the occluded edge points
+ if (((row == top_row - 1 || row == bottom_row) &&
+ (col >= left_col - 1 && col <= right_col)) ||
+ ((row >= top_row && row < bottom_row) &&
+ (col == left_col - 1 || col == right_col))) {
+ inner_perimeter_.insert(row * organized_test_cloud->width + col);
+ }
+
+ organized_test_cloud->at(col, row).x = x * SYNTHETIC_CLOUD_RESOLUTION;
+ organized_test_cloud->at(col, row).y = y * SYNTHETIC_CLOUD_RESOLUTION;
+ organized_test_cloud->at(col, row).z = depth;
+ }
+ }
+
+ return organized_test_cloud;
+ }
+};
+} // namespace
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+/*
+This test is designed to ensure that the organized edge detection appropriately
+classifies occluding and occluding edges and to specifically detect the type of
+regression detailed in PR 4275 (https://github.com/PointCloudLibrary/pcl/pull/4275).
+This test works by generating a synthetic cloud of one square slightly in front of
+another square, so that occluding edges and occluded edges are generated. The
+regression introduced in PCL 1.10.1 was a logic bug that caused both occluding and
+occluded edges to be miscategorized as occluding edges. This test should catch
+this and similar bugs.
+*/
+TEST_F(OrganizedPlaneDetectionTestFixture, OccludedAndOccludingEdges)
+{
+ constexpr auto MAX_SEARCH_NEIGHBORS = 8;
+
+ // The depth discontinuity check to determine whether an edge exists is linearly
+ // dependent on the depth of the points in the cloud (not a fixed distance). The
+ // algorithm iterates through each point in the cloud and finding the neighboring
+ // point with largest discontinuity value. That value is compared against a threshold
+ // multiplied by the actual depth value of the point. Therefore:
+ // abs(SYNTHETIC_CLOUD_DEPTH_DISCONTINUITY) must be greater than
+ // DEPTH_DISCONTINUITY_THRESHOLD * abs(SYNTHETIC_CLOUD_BASE_DEPTH)
+ const auto DEPTH_DISCONTINUITY_THRESHOLD =
+ SYNTHETIC_CLOUD_DEPTH_DISCONTINUITY / (SYNTHETIC_CLOUD_BASE_DEPTH * 1.1f);
+
+ auto oed = pcl::OrganizedEdgeBase<pcl::PointXYZ, pcl::Label>();
+ auto labels = pcl::PointCloud<pcl::Label>();
+ auto label_indices = std::vector<pcl::PointIndices>();
+
+ oed.setEdgeType(oed.EDGELABEL_OCCLUDING | oed.EDGELABEL_OCCLUDED);
+ oed.setInputCloud(cloud_);
+ oed.setDepthDisconThreshold(DEPTH_DISCONTINUITY_THRESHOLD);
+ oed.setMaxSearchNeighbors(MAX_SEARCH_NEIGHBORS);
+ oed.compute(labels, label_indices);
+
+ const auto occluding_indices = std::set<pcl::index_t>(
+ label_indices[1].indices.begin(), label_indices[1].indices.end());
+ EXPECT_EQ(occluding_indices, outer_perimeter_);
+
+ const auto occluded_indices = std::set<pcl::index_t>(label_indices[2].indices.begin(),
+ label_indices[2].indices.end());
+ EXPECT_EQ(occluded_indices, inner_perimeter_);
+}
+
+/* ---[ */
+int
+main(int argc, char** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return (RUN_ALL_TESTS());
+}
+/* ]--- */
{
for (int j = 0; j < ndims; ++j)
{
- ASSERT_EQ (output0.points[i].histogram[j], output1.points[i].histogram[j]);
- ASSERT_EQ (output1.points[i].histogram[j], output2.points[i].histogram[j]);
+ ASSERT_EQ (output0[i].histogram[j], output1[i].histogram[j]);
+ ASSERT_EQ (output1[i].histogram[j], output2[i].histogram[j]);
}
}
{
for (int j = 0; j < ndims; ++j)
{
- ASSERT_EQ (output3.points[i].histogram[j], output4.points[i].histogram[j]);
+ ASSERT_EQ (output3[i].histogram[j], output4[i].histogram[j]);
}
}
}
// estimate
pfh.compute (*pfhs);
- EXPECT_EQ (pfhs->points.size (), indices.size ());
+ EXPECT_EQ (pfhs->size (), indices.size ());
for (const auto &point : pfhs->points)
{
EXPECT_NEAR (point.histogram[25], 0.223902 , 1e-4);
EXPECT_NEAR (point.histogram[26], 0.07633 , 1e-4);
}
- //Eigen::Map<Eigen::VectorXf> h (&(pfhs->points[0].histogram[0]), 125);
+ //Eigen::Map<Eigen::VectorXf> h (&((*pfhs)[0].histogram[0]), 125);
//std::cerr << h.head<27> () << std::endl;
// Test results when setIndices and/or setSearchSurface are used
// estimate
fpfh.compute (*fpfhs);
- EXPECT_EQ (fpfhs->points.size (), indices.size ());
-
- EXPECT_NEAR (fpfhs->points[0].histogram[0], 1.58591, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[1], 1.68365, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[2], 6.71 , 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[3], 23.0717, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[4], 33.3844, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[5], 20.4002, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[6], 7.31067, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[7], 1.02635, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[8], 0.48591, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[9], 1.47069, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[10], 2.87061, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[11], 1.78321, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[12], 4.30795, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[13], 7.05514, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[14], 9.37615, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[15], 17.963 , 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[16], 18.2801, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[17], 14.2766, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[18], 10.8542, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[19], 6.07925, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[20], 5.28565, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[21], 4.73887, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[22], 0.56984, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[23], 3.29826, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[24], 5.28156, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[25], 5.26939, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[26], 3.13191, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[27], 1.74453, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[28], 9.41971, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[29], 21.5894, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[30], 24.6302, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[31], 17.7764, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[32], 7.28878, 1e-2);
+ EXPECT_EQ (fpfhs->size (), indices.size ());
+
+ EXPECT_NEAR ((*fpfhs)[0].histogram[0], 1.58591, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[1], 1.68365, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[2], 6.71 , 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[3], 23.0717, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[4], 33.3844, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[5], 20.4002, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[6], 7.31067, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[7], 1.02635, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[8], 0.48591, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[9], 1.47069, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[10], 2.87061, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[11], 1.78321, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[12], 4.30795, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[13], 7.05514, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[14], 9.37615, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[15], 17.963 , 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[16], 18.2801, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[17], 14.2766, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[18], 10.8542, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[19], 6.07925, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[20], 5.28565, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[21], 4.73887, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[22], 0.56984, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[23], 3.29826, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[24], 5.28156, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[25], 5.26939, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[26], 3.13191, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[27], 1.74453, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[28], 9.41971, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[29], 21.5894, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[30], 24.6302, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[31], 17.7764, 1e-2);
+ EXPECT_NEAR ((*fpfhs)[0].histogram[32], 7.28878, 1e-2);
// Test results when setIndices and/or setSearchSurface are used
// estimate
vfh.compute (*vfhs);
- EXPECT_EQ (int (vfhs->points.size ()), 1);
+ EXPECT_EQ (int (vfhs->size ()), 1);
//for (std::size_t d = 0; d < 308; ++d)
- // std::cerr << vfhs.points[0].histogram[d] << std::endl;
+ // std::cerr << vfhs[0].histogram[d] << std::endl;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
p.z = static_cast<float> (z);
cloud->points.push_back (p);
}
- cloud->width = static_cast<std::uint32_t> (cloud->points.size ());
+ cloud->width = cloud->size ();
cloud->height = 1;
pcl::GFPFHEstimation<PointXYZL, PointXYZL, GFPFHSignature16> gfpfh;
const float ref_values[] = { 1877, 6375, 5361, 14393, 6674, 2471, 2248, 2753, 3117, 4585, 14388, 32407, 15122, 3061, 3202, 794 };
- EXPECT_EQ (descriptor.points.size (), 1);
- for (std::size_t i = 0; i < std::size_t (descriptor.points[0].descriptorSize ()); ++i)
+ EXPECT_EQ (descriptor.size (), 1);
+ for (std::size_t i = 0; i < std::size_t (descriptor[0].descriptorSize ()); ++i)
{
- EXPECT_EQ (descriptor.points[0].histogram[i], ref_values[i]);
+ EXPECT_EQ (descriptor[0].histogram[i], ref_values[i]);
}
}
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
ppf_estimation.compute (*feature_cloud);
// Check for size of output
- EXPECT_EQ (feature_cloud->points.size (), indices.size () * cloud.points.size ());
+ EXPECT_EQ (feature_cloud->size (), indices.size () * cloud.size ());
// Now check for a few values in the feature cloud
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f1));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f2));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f3));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].f4));
- EXPECT_TRUE (std::isnan (feature_cloud->points[0].alpha_m));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f1));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f2));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f3));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].f4));
+ EXPECT_TRUE (std::isnan ((*feature_cloud)[0].alpha_m));
- EXPECT_NEAR (feature_cloud->points[15127].f1, -2.51637, 1e-4);
- EXPECT_NEAR (feature_cloud->points[15127].f2, -0.00365916, 1e-4);
- EXPECT_NEAR (feature_cloud->points[15127].f3, -0.521141, 1e-4);
- EXPECT_NEAR (feature_cloud->points[15127].f4, 0.0106809, 1e-4);
- EXPECT_NEAR (feature_cloud->points[15127].alpha_m, -0.255664, 1e-4);
- EXPECT_NEAR (feature_cloud->points[30254].f1, 0.185142, 1e-4);
- EXPECT_NEAR (feature_cloud->points[30254].f2, 0.0425001, 1e-4);
- EXPECT_NEAR (feature_cloud->points[30254].f3, -0.191276, 1e-4);
- EXPECT_NEAR (feature_cloud->points[30254].f4, 0.0138508, 1e-4);
- EXPECT_NEAR (feature_cloud->points[30254].alpha_m, 2.42955, 1e-4);
- EXPECT_NEAR (feature_cloud->points[45381].f1, -1.96263, 1e-4);
- EXPECT_NEAR (feature_cloud->points[45381].f2, -0.431919, 1e-4);
- EXPECT_NEAR (feature_cloud->points[45381].f3, 0.868716, 1e-4);
- EXPECT_NEAR (feature_cloud->points[45381].f4, 0.140129, 1e-4);
- EXPECT_NEAR (feature_cloud->points[45381].alpha_m, -1.97276, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[15127].f1, -2.51637, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[15127].f2, -0.00365916, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[15127].f3, -0.521141, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[15127].f4, 0.0106809, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[15127].alpha_m, -0.255664, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[30254].f1, 0.185142, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[30254].f2, 0.0425001, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[30254].f3, -0.191276, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[30254].f4, 0.0138508, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[30254].alpha_m, 2.42955, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[45381].f1, -1.96263, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[45381].f2, -0.431919, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[45381].f3, 0.868716, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[45381].f4, 0.140129, 1e-4);
+ EXPECT_NEAR ((*feature_cloud)[45381].alpha_m, -1.97276, 1e-4);
}
/* ---[ */
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (int i = 0; i < static_cast<int> (indices.size ()); ++i)
indices[i] = i;
#include <pcl/features/normal_based_signature.h>
using namespace pcl;
-using namespace std;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, FeaturePtr)
#include <pcl/features/rift.h>
using namespace pcl;
-using namespace std;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, RIFTEstimation)
cloud_xyzi.points.push_back (p);
}
}
- cloud_xyzi.width = static_cast<std::uint32_t> (cloud_xyzi.points.size ());
+ cloud_xyzi.width = cloud_xyzi.size ();
// Generate the intensity gradient data
PointCloud<IntensityGradient> gradient;
gradient.height = 1;
- gradient.width = static_cast<std::uint32_t> (cloud_xyzi.points.size ());
+ gradient.width = cloud_xyzi.size ();
gradient.is_dense = true;
gradient.points.resize (gradient.width);
- for (std::size_t i = 0; i < cloud_xyzi.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_xyzi.size (); ++i)
{
- const PointXYZI &p = cloud_xyzi.points[i];
+ const PointXYZI &p = cloud_xyzi[i];
// Compute the surface normal analytically.
float nx = p.x;
float gy = (-ny * nx) * tmpx + (1 - ny * ny) * tmpy + (-ny * nz) * tmpz;
float gz = (-nz * nx) * tmpx + (-nz * ny) * tmpy + (1 - nz * nz) * tmpz;
- gradient.points[i].gradient[0] = gx;
- gradient.points[i].gradient[1] = gy;
- gradient.points[i].gradient[2] = gz;
+ gradient[i].gradient[0] = gx;
+ gradient[i].gradient[1] = gy;
+ gradient[i].gradient[2] = gz;
}
// Compute the RIFT features
rift_est.compute (rift_output);
// Compare to independently verified values
- const RIFTDescriptor &rift = rift_output.points[220];
+ const RIFTDescriptor &rift = rift_output[220];
float correct_rift_feature_values[32];
unsigned major, minor, patch;
pcl::PointCloud<pcl::Histogram <135> >::Ptr histograms (new pcl::PointCloud <pcl::Histogram <135> > ());
feature_estimator.compute (*histograms);
- EXPECT_NE (0, histograms->points.size ());
+ EXPECT_NE (0, histograms->size ());
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::vector <pcl::Vertices> empty_triangles;
feature_estimator.setTriangles (empty_triangles);
feature_estimator.compute (*histograms);
- EXPECT_EQ (0, histograms->points.size ());
+ EXPECT_EQ (0, histograms->size ());
}
/* ---[ */
using namespace pcl;
using namespace pcl::io;
-using namespace std;
search::KdTree<PointXYZ>::Ptr tree (new search::KdTree<PointXYZ> ());
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ> ());
n.setRadiusSearch (rad);
n.compute (*normals);
- EXPECT_NEAR (normals->points[103].normal_x, 0.694, 0.1);
- EXPECT_NEAR (normals->points[103].normal_y, -0.562, 0.1);
- EXPECT_NEAR (normals->points[103].normal_z, -0.448, 0.1);
+ EXPECT_NEAR ((*normals)[103].normal_x, 0.694, 0.1);
+ EXPECT_NEAR ((*normals)[103].normal_y, -0.562, 0.1);
+ EXPECT_NEAR ((*normals)[103].normal_z, -0.448, 0.1);
// RSDEstimation
double max_plane_radius = 0.1;
#include <pcl/test/gtest.h>
#include <pcl/point_cloud.h>
-#include <pcl/features/normal_3d_omp.h>
+#include <pcl/features/normal_3d.h> // for NormalEstimation
#include <pcl/io/pcd_io.h>
#include <pcl/features/shot.h>
#include <pcl/features/shot_omp.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
{
ASSERT_EQ (d0.size (), d1.size ());
for (std::size_t i = 0; i < d1.size (); ++i)
- for (std::size_t j = 0; j < d0.points[i].descriptor.size(); ++j)
- ASSERT_EQ (d0.points[i].descriptor[j], d1.points[i].descriptor[j]);
+ for (std::size_t j = 0; j < d0[i].descriptor.size(); ++j)
+ ASSERT_EQ (d0[i].descriptor[j], d1[i].descriptor[j]);
}
///////////////////////////////////////////////////////////////////////////////////
ASSERT_EQ (d0.size (), d1.size ());
for (std::size_t i = 0; i < d1.size (); ++i)
for (std::size_t j = 0; j < 352; ++j)
- ASSERT_EQ (d0.points[i].descriptor[j], d1.points[i].descriptor[j]);
+ ASSERT_EQ (d0[i].descriptor[j], d1[i].descriptor[j]);
}
///////////////////////////////////////////////////////////////////////////////////
ASSERT_EQ (d0.size (), d1.size ());
for (std::size_t i = 0; i < d1.size (); ++i)
for (std::size_t j = 0; j < 1344; ++j)
- ASSERT_EQ (d0.points[i].descriptor[j], d1.points[i].descriptor[j]);
+ ASSERT_EQ (d0[i].descriptor[j], d1[i].descriptor[j]);
}
///////////////////////////////////////////////////////////////////////////////////
ASSERT_EQ (d0.size (), d1.size ());
for (std::size_t i = 0; i < d1.size (); ++i)
for (std::size_t j = 0; j < 1980; ++j)
- ASSERT_EQ (d0.points[i].descriptor[j], d1.points[i].descriptor[j]);
+ ASSERT_EQ (d0[i].descriptor[j], d1[i].descriptor[j]);
}
///////////////////////////////////////////////////////////////////////////////////
ASSERT_EQ (d0.size (), d1.size ());
for (std::size_t i = 0; i < d1.size (); ++i)
for (std::size_t j = 0; j < 1960; ++j)
- ASSERT_EQ (d0.points[i].descriptor[j], d1.points[i].descriptor[j]);
+ ASSERT_EQ (d0[i].descriptor[j], d1[i].descriptor[j]);
}
///////////////////////////////////////////////////////////////////////////////////
// Check frames
pcl::PointCloud<pcl::ReferenceFrame>::ConstPtr f = est.getInputReferenceFrames ();
pcl::PointCloud<pcl::ReferenceFrame>::ConstPtr f2 = est2.getInputReferenceFrames ();
- ASSERT_EQ (frames->points.size (), f->points.size ());
- ASSERT_EQ (f2->points.size (), f->points.size ());
- for (int i = 0; i < static_cast<int> (frames->points.size ()); ++i)
+ ASSERT_EQ (frames->size (), f->size ());
+ ASSERT_EQ (f2->size (), f->size ());
+ for (int i = 0; i < static_cast<int> (frames->size ()); ++i)
{
for (unsigned j = 0; j < 9; ++j)
- ASSERT_EQ (frames->points[i].rf[j], f->points[i].rf[j]);
+ ASSERT_EQ ((*frames)[i].rf[j], (*f)[i].rf[j]);
for (unsigned j = 0; j < 9; ++j)
- ASSERT_EQ (frames->points[i].rf[j], f2->points[i].rf[j]);
+ ASSERT_EQ ((*frames)[i].rf[j], (*f2)[i].rf[j]);
}
// The two cases above should produce equivalent results
n.setRadiusSearch (20 * mr);
n.compute (*normals);
- EXPECT_NEAR (normals->points[103].normal_x, 0.36683175, 1e-4);
- EXPECT_NEAR (normals->points[103].normal_y, -0.44696972, 1e-4);
- EXPECT_NEAR (normals->points[103].normal_z, -0.81587529, 1e-4);
- EXPECT_NEAR (normals->points[200].normal_x, -0.71414840, 1e-4);
- EXPECT_NEAR (normals->points[200].normal_y, -0.06002361, 1e-4);
- EXPECT_NEAR (normals->points[200].normal_z, -0.69741613, 1e-4);
+ EXPECT_NEAR ((*normals)[103].normal_x, 0.36683175, 1e-4);
+ EXPECT_NEAR ((*normals)[103].normal_y, -0.44696972, 1e-4);
+ EXPECT_NEAR ((*normals)[103].normal_z, -0.81587529, 1e-4);
+ EXPECT_NEAR ((*normals)[200].normal_x, -0.71414840, 1e-4);
+ EXPECT_NEAR ((*normals)[200].normal_y, -0.06002361, 1e-4);
+ EXPECT_NEAR ((*normals)[200].normal_z, -0.69741613, 1e-4);
- EXPECT_NEAR (normals->points[140].normal_x, -0.45109111, 1e-4);
- EXPECT_NEAR (normals->points[140].normal_y, -0.19499126, 1e-4);
- EXPECT_NEAR (normals->points[140].normal_z, -0.87091631, 1e-4);
+ EXPECT_NEAR ((*normals)[140].normal_x, -0.45109111, 1e-4);
+ EXPECT_NEAR ((*normals)[140].normal_y, -0.19499126, 1e-4);
+ EXPECT_NEAR ((*normals)[140].normal_z, -0.87091631, 1e-4);
/*
SHOTEstimation<PointXYZ, Normal, SHOT> shot;
// estimate
shot.compute (*shots);
- EXPECT_EQ (shots->points.size (), indices.size ());
-
- EXPECT_NEAR (shots->points[103].descriptor[9 ], 0.0072018504, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[10], 0.0023103887, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[11], 0.0024724449, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[19], 0.0031367359, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[20], 0.17439659, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[21], 0.070665278, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[42], 0.013304681, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[53], 0.0073520984, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[54], 0.013584172, 1e-4);
- EXPECT_NEAR (shots->points[103].descriptor[55], 0.0050609680, 1e-4);
+ EXPECT_EQ (shots->size (), indices.size ());
+
+ EXPECT_NEAR ((*shots)[103].descriptor[9 ], 0.0072018504, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[10], 0.0023103887, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[11], 0.0024724449, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[19], 0.0031367359, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[20], 0.17439659, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[21], 0.070665278, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[42], 0.013304681, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[53], 0.0073520984, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[54], 0.013584172, 1e-4);
+ EXPECT_NEAR ((*shots)[103].descriptor[55], 0.0050609680, 1e-4);
*/
// SHOT352
// estimate
shot352.compute (*shots352);
- EXPECT_EQ (shots352->points.size (), indices.size ());
+ EXPECT_EQ (shots352->size (), indices.size ());
- EXPECT_NEAR (shots352->points[103].descriptor[9 ], 0.0072018504, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[10], 0.0023103887, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[11], 0.0024724449, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[19], 0.0031367359, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[20], 0.17439659, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[21], 0.06542316, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[42], 0.013304681, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[53], 0.0073520984, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[54], 0.013584172, 1e-4);
- EXPECT_NEAR (shots352->points[103].descriptor[55], 0.0050609680, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[9 ], 0.0072018504, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[10], 0.0023103887, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[11], 0.0024724449, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[19], 0.0031367359, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[20], 0.17439659, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[21], 0.06542316, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[42], 0.013304681, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[53], 0.0073520984, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[54], 0.013584172, 1e-4);
+ EXPECT_NEAR ((*shots352)[103].descriptor[55], 0.0050609680, 1e-4);
// Test results when setIndices and/or setSearchSurface are used
// estimate
shot.compute (*shots);
- EXPECT_EQ (shots->points.size (), indices.size ());
-
- EXPECT_NEAR (shots->points[103].descriptor[18], 0.0077019366, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[19], 0.0024708188, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[21], 0.0079652183, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[38], 0.0067090928, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[39], 0.17498907, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[40], 0.078413926, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[81], 0.014228539, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[103], 0.022390056, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[105], 0.0058866320, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[123], 0.019105887, 1e-5);
+ EXPECT_EQ (shots->size (), indices.size ());
+
+ EXPECT_NEAR ((*shots)[103].descriptor[18], 0.0077019366, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[19], 0.0024708188, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[21], 0.0079652183, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[38], 0.0067090928, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[39], 0.17498907, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[40], 0.078413926, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[81], 0.014228539, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[103], 0.022390056, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[105], 0.0058866320, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[123], 0.019105887, 1e-5);
// Test results when setIndices and/or setSearchSurface are used
pcl::IndicesPtr test_indices (new pcl::Indices (0));
// Create fake point cloud with colors
PointCloud<PointXYZRGBA> cloudWithColors;
- for (int i = 0; i < static_cast<int> (cloud.points.size ()); ++i)
+ for (int i = 0; i < static_cast<int> (cloud.size ()); ++i)
{
PointXYZRGBA p;
- p.x = cloud.points[i].x;
- p.y = cloud.points[i].y;
- p.z = cloud.points[i].z;
+ p.x = cloud[i].x;
+ p.y = cloud[i].y;
+ p.z = cloud[i].z;
p.rgba = ( (i%255) << 16 ) + ( ( (255 - i ) %255) << 8) + ( ( i*37 ) %255);
cloudWithColors.push_back(p);
// estimate
shot.compute (*shots);
- EXPECT_EQ (shots->points.size (), indices.size ());
-
- EXPECT_NEAR (shots->points[103].descriptor[10], 0.0020453099, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[11], 0.0021887729, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[21], 0.062557608, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[42], 0.011778189, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[53], 0.0065085669, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[54], 0.012025614, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[55], 0.0044803056, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[64], 0.064429596, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[65], 0.046486385, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[86], 0.011518310, 1e-5);
-
- EXPECT_NEAR (shots->points[103].descriptor[357], 0.0020453099, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[360], 0.0027993850, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[386], 0.045115642, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[387], 0.059068538, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[389], 0.0047547864, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[453], 0.0051176427, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[481], 0.0053625242, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[482], 0.012025614, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[511], 0.0057367259, 1e-5);
- EXPECT_NEAR (shots->points[103].descriptor[512], 0.048357654, 1e-5);
+ EXPECT_EQ (shots->size (), indices.size ());
+
+ EXPECT_NEAR ((*shots)[103].descriptor[10], 0.0020453099, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[11], 0.0021887729, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[21], 0.062557608, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[42], 0.011778189, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[53], 0.0065085669, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[54], 0.012025614, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[55], 0.0044803056, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[64], 0.064429596, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[65], 0.046486385, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[86], 0.011518310, 1e-5);
+
+ EXPECT_NEAR ((*shots)[103].descriptor[357], 0.0020453099, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[360], 0.0027993850, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[386], 0.045115642, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[387], 0.059068538, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[389], 0.0047547864, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[453], 0.0051176427, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[481], 0.0053625242, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[482], 0.012025614, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[511], 0.0057367259, 1e-5);
+ EXPECT_NEAR ((*shots)[103].descriptor[512], 0.048357654, 1e-5);
*/
// SHOT1344
// estimate
shot1344.compute (*shots1344);
- EXPECT_EQ (shots1344->points.size (), indices.size ());
-
- EXPECT_NEAR (shots1344->points[103].descriptor[10], 0.0020453099, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[11], 0.0021887729, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[21], 0.0579300672, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[42], 0.011778189, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[53], 0.0065085669, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[54], 0.012025614, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[55], 0.0044803056, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[64], 0.064453065, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[65], 0.046504568, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[86], 0.011518310, 1e-5);
-
- EXPECT_NEAR (shots1344->points[103].descriptor[357], 0.0020453099, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[360], 0.0027993850, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[386], 0.0451327376, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[387], 0.0544394031, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[389], 0.0047547864, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[453], 0.0051176427, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[481], 0.0053625242, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[482], 0.012025614, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[511], 0.0057367259, 1e-5);
- EXPECT_NEAR (shots1344->points[103].descriptor[512], 0.048375979, 1e-5);
+ EXPECT_EQ (shots1344->size (), indices.size ());
+
+ EXPECT_NEAR ((*shots1344)[103].descriptor[10], 0.0020453099, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[11], 0.0021887729, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[21], 0.0579300672, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[42], 0.011778189, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[53], 0.0065085669, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[54], 0.012025614, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[55], 0.0044803056, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[64], 0.064453065, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[65], 0.046504568, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[86], 0.011518310, 1e-5);
+
+ EXPECT_NEAR ((*shots1344)[103].descriptor[357], 0.0020453099, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[360], 0.0027993850, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[386], 0.0451327376, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[387], 0.0544394031, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[389], 0.0047547864, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[453], 0.0051176427, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[481], 0.0053625242, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[482], 0.012025614, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[511], 0.0057367259, 1e-5);
+ EXPECT_NEAR ((*shots1344)[103].descriptor[512], 0.048375979, 1e-5);
// Test results when setIndices and/or setSearchSurface are used
pcl::IndicesPtr test_indices (new pcl::Indices (0));
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
indices[i] = static_cast<int> (i);
using namespace pcl;
using namespace pcl::test;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
indices[i] = static_cast<int> (i);
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using KdTreePtr = search::KdTree<PointXYZ>::Ptr;
n.setRadiusSearch (20 * mr);
n.compute (*normals);
- EXPECT_NEAR (normals->points[103].normal_x, 0.36683175, 1e-4);
- EXPECT_NEAR (normals->points[103].normal_y, -0.44696972, 1e-4);
- EXPECT_NEAR (normals->points[103].normal_z, -0.81587529, 1e-4);
- EXPECT_NEAR (normals->points[200].normal_x, -0.71414840, 1e-4);
- EXPECT_NEAR (normals->points[200].normal_y, -0.06002361, 1e-4);
- EXPECT_NEAR (normals->points[200].normal_z, -0.69741613, 1e-4);
+ EXPECT_NEAR ((*normals)[103].normal_x, 0.36683175, 1e-4);
+ EXPECT_NEAR ((*normals)[103].normal_y, -0.44696972, 1e-4);
+ EXPECT_NEAR ((*normals)[103].normal_z, -0.81587529, 1e-4);
+ EXPECT_NEAR ((*normals)[200].normal_x, -0.71414840, 1e-4);
+ EXPECT_NEAR ((*normals)[200].normal_y, -0.06002361, 1e-4);
+ EXPECT_NEAR ((*normals)[200].normal_z, -0.69741613, 1e-4);
- EXPECT_NEAR (normals->points[140].normal_x, -0.45109111, 1e-4);
- EXPECT_NEAR (normals->points[140].normal_y, -0.19499126, 1e-4);
- EXPECT_NEAR (normals->points[140].normal_z, -0.87091631, 1e-4);
+ EXPECT_NEAR ((*normals)[140].normal_x, -0.45109111, 1e-4);
+ EXPECT_NEAR ((*normals)[140].normal_y, -0.19499126, 1e-4);
+ EXPECT_NEAR ((*normals)[140].normal_z, -0.87091631, 1e-4);
using SpinImage = Histogram<153>;
SpinImageEstimation<PointXYZ, Normal, SpinImage> spin_est(8, 0.5, 16);
// estimate
spin_est.compute (*spin_images);
- EXPECT_EQ (spin_images->points.size (), indices.size ());
-
- EXPECT_NEAR (spin_images->points[100].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[24], 0.00233226, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[48], 8.48662e-005, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[60], 0.0266387, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[96], 0.0414662, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[108], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[132], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[144], 0.0128513, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[24], 0.00932424, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[48], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[60], 0.0145733, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[96], 0.00034457, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[108], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[132], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[144], 0.0121195, 1e-4);
+ EXPECT_EQ (spin_images->size (), indices.size ());
+
+ EXPECT_NEAR ((*spin_images)[100].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[24], 0.00233226, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[48], 8.48662e-005, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[60], 0.0266387, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[96], 0.0414662, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[108], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[132], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[144], 0.0128513, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[24], 0.00932424, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[48], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[60], 0.0145733, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[96], 0.00034457, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[108], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[132], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[144], 0.0121195, 1e-4);
// radial SI, angular spin-images
spin_est.setAngularDomain ();
// estimate
spin_est.compute (*spin_images);
- EXPECT_EQ (spin_images->points.size (), indices.size ());
-
- EXPECT_NEAR (spin_images->points[100].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[24], 0.132139, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[48], 0.908814, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[60], 0.63875, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[96], 0.550392, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[108], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[132], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[144], 0.257136, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[24], 0.230605, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[48], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[60], 0.764872, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[96], 1.02824, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[108], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[132], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[144], 0.293567, 1e-4);
+ EXPECT_EQ (spin_images->size (), indices.size ());
+
+ EXPECT_NEAR ((*spin_images)[100].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[24], 0.132139, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[48], 0.908814, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[60], 0.63875, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[96], 0.550392, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[108], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[132], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[144], 0.257136, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[24], 0.230605, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[48], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[60], 0.764872, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[96], 1.02824, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[108], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[132], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[144], 0.293567, 1e-4);
// rectangular SI
spin_est.setRadialStructure (false);
// estimate
spin_est.compute (*spin_images);
- EXPECT_EQ (spin_images->points.size (), indices.size ());
-
- EXPECT_NEAR (spin_images->points[100].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[24], 0.000889345, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[48], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[60], 0.0489534, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[96], 0.0747141, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[108], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[132], 0.0173423, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[144], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[24], 0.0267132, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[48], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[60], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[96], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[108], 0.0209709, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[132], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[144], 0.029372, 1e-4);
+ EXPECT_EQ (spin_images->size (), indices.size ());
+
+ EXPECT_NEAR ((*spin_images)[100].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[24], 0.000889345, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[48], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[60], 0.0489534, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[96], 0.0747141, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[108], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[132], 0.0173423, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[144], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[24], 0.0267132, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[48], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[60], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[96], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[108], 0.0209709, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[132], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[144], 0.029372, 1e-4);
// rectangular SI, angular spin-images
spin_est.setAngularDomain ();
// estimate
spin_est.compute (*spin_images);
- EXPECT_EQ (spin_images->points.size (), indices.size ());
-
- EXPECT_NEAR (spin_images->points[100].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[24], 0.132139, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[48], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[60], 0.38800787925720215, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[96], 0.468881, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[108], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[132], 0.67901438474655151, 1e-4);
- EXPECT_NEAR (spin_images->points[100].histogram[144], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[0], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[12], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[24], 0.143845, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[36], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[48], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[60], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[72], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[84], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[96], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[108], 0.706084, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[120], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[132], 0, 1e-4);
- EXPECT_NEAR (spin_images->points[300].histogram[144], 0.272542, 1e-4);
+ EXPECT_EQ (spin_images->size (), indices.size ());
+
+ EXPECT_NEAR ((*spin_images)[100].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[24], 0.132139, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[48], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[60], 0.38800787925720215, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[96], 0.468881, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[108], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[132], 0.67901438474655151, 1e-4);
+ EXPECT_NEAR ((*spin_images)[100].histogram[144], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[0], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[12], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[24], 0.143845, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[36], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[48], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[60], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[72], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[84], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[96], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[108], 0.706084, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[120], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[132], 0, 1e-4);
+ EXPECT_NEAR ((*spin_images)[300].histogram[144], 0.272542, 1e-4);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cloud_xyzi.points.push_back (p);
}
}
- cloud_xyzi.width = static_cast<std::uint32_t> (cloud_xyzi.points.size ());
+ cloud_xyzi.width = cloud_xyzi.size ();
// Compute the intensity-domain spin features
using IntensitySpin = Histogram<20>;
ispin_est.compute (ispin_output);
// Compare to independently verified values
- const IntensitySpin &ispin = ispin_output.points[220];
+ const IntensitySpin &ispin = ispin_output[220];
const float correct_ispin_feature_values[20] = {2.4387f, 9.4737f, 21.3232f, 28.3025f, 22.5639f, 13.2426f, 35.7026f, 60.0755f,
66.9240f, 50.4225f, 42.7086f, 83.5818f, 105.4513f, 97.8454f, 67.3801f,
75.7127f, 119.4726f, 120.9649f, 93.4829f, 55.4045f};
return (-1);
}
- indices.resize (cloud.points.size ());
+ indices.resize (cloud.size ());
for (std::size_t i = 0; i < indices.size (); ++i)
indices[i] = static_cast<int> (i);
FILES test_morphological.cpp
LINK_WITH pcl_gtest pcl_common pcl_filters)
+PCL_ADD_TEST(filters_functor test_filters_functor
+ FILES test_functor_filter.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_filters)
+
PCL_ADD_TEST(filters_local_maximum test_filters_local_maximum
FILES test_local_maximum.cpp
LINK_WITH pcl_gtest pcl_common pcl_filters pcl_search pcl_octree)
FILES test_uniform_sampling.cpp
LINK_WITH pcl_gtest pcl_common pcl_filters)
+PCL_ADD_TEST(filters_convolution test_convolution
+ FILES test_convolution.cpp
+ LINK_WITH pcl_gtest pcl_filters)
+
if(BUILD_io)
PCL_ADD_TEST(filters_bilateral test_filters_bilateral
FILES test_bilateral.cpp
fbf_omp.filter (*cloud_filtered_omp);
PCL_INFO ("[FastBilateralFilterOMP] filtering took %f ms\n", tt.toc ());
- EXPECT_EQ (cloud_filtered_omp->points.size (), cloud_filtered->points.size ());
+ EXPECT_EQ (cloud_filtered_omp->size (), cloud_filtered->size ());
for (std::size_t j = 0; j < cloud_filtered_omp->size (); ++j)
{
if (std::isnan (cloud_filtered_omp->at (j).x))
#include <pcl/common/eigen.h>
using namespace pcl;
-using namespace std;
using namespace Eigen;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <pcl/pcl_tests.h>
#include <pcl/filters/convolution.h>
#include <pcl/point_types.h>
+#include <cmath>
+#include <array>
using namespace pcl;
+using namespace pcl::common;
+using namespace pcl::filters;
using namespace pcl::test;
-PointCloud<PointXYZI>::Ptr input (new PointCloud<PointXYZI> ());
-Eigen::ArrayXf filter(7);
+std::array<RGB, 5> colormap {
+ RGB(74, 19, 139),
+ RGB(29, 135, 228),
+ RGB(139, 194, 74),
+ RGB(255, 235, 59),
+ RGB(255, 0, 0),
+};
-TEST (Convolution, convolveRows)
+RGB interpolate_color(float lower_bound, float upper_bound, float value)
{
- using namespace pcl::common;
- using namespace pcl::filters;
+ if (value <= lower_bound) return colormap[0];
+ if (value >= upper_bound) return colormap.back();
+ float step_size = (upper_bound - lower_bound) / static_cast<float>(colormap.size() - 1);
+ std::size_t lower_index = static_cast<std::size_t>((value - lower_bound) / step_size);
+ value -= (lower_bound + static_cast<float>(lower_index) * step_size);
+ if (value == 0) return colormap[lower_index];
+ auto interpolate = [](std::uint8_t lower, std::uint8_t upper, float step_size, float value) {
+ return (lower == upper) ? lower : static_cast<std::uint8_t>(static_cast<float>(lower) + ((static_cast<float>(upper) - static_cast<float>(lower)) / step_size) * value);
+ };
+ return RGB(
+ interpolate(colormap[lower_index].r, colormap[lower_index + 1].r, step_size, value),
+ interpolate(colormap[lower_index].g, colormap[lower_index + 1].g, step_size, value),
+ interpolate(colormap[lower_index].b, colormap[lower_index + 1].b, step_size, value)
+ );
+}
+
+TEST (Convolution, convolveRowsXYZI)
+{
+ // input
+ Eigen::ArrayXf filter(7);
+ filter << 0.00443305, 0.0540056, 0.242036, 0.39905, 0.242036, 0.0540056, 0.00443305;
+ auto input = pcl::make_shared<PointCloud<PointXYZI>>();
+ input->width = 64;
+ input->height = 48;
+ input->resize (input->width * input->height);
+ for (std::uint32_t r = 0; r < input->height; r++)
+ {
+ float y = 40.0f + 40.0f / 65.0f * static_cast<float>(r);
+ float z = (r % 2 == 0) ? 1.0f : -1.0f;
+ for(std::uint32_t c = 0; c < input->width; c++)
+ {
+ float x = 65.0f - 30.0f / 65.0f * static_cast<float>(c);
+ z += (c % 2 == 0) ? 1.0f : -1.0f;
+ (*input) (c,r).intensity = x + y + z;
+ }
+ }
+
+ // filter
+ auto output = pcl::make_shared<PointCloud<PointXYZI>>();
pcl::filters::Convolution<PointXYZI, PointXYZI> convolve;
- PointCloud<PointXYZI>::Ptr output;
- output.reset (new PointCloud<PointXYZI> ());
+ convolve.setInputCloud(input);
+ convolve.setKernel(filter);
+ convolve.convolveRows(*output);
+
+ // output rows
+ // note: this is because of border policy ignore which sets the border of width = filter_size/2 as NaN for xyz and 0 for values
+ std::array<float, 128> output_intensity {
+ // row 0 row 47
+ 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f,
+ 105.10825348f, 132.03129578f,
+ 104.66084290f, 131.58390808f,
+ 104.18517303f, 131.10823059f,
+ 103.73776245f, 130.66082764f,
+ 103.26208496f, 130.18515015f,
+ 102.81467438f, 129.73773193f,
+ 102.33901978f, 129.26208496f,
+ 101.89160919f, 128.81466675f,
+ 101.41593933f, 128.33900452f,
+ 100.96853638f, 127.89158630f,
+ 100.49286652f, 127.41592407f,
+ 100.04545593f, 126.96851349f,
+ 99.56979370f, 126.49285126f,
+ 99.12238312f, 126.04543304f,
+ 98.64672089f, 125.56977844f,
+ 98.19929504f, 125.12236786f,
+ 97.72364044f, 124.64669800f,
+ 97.27622223f, 124.19929504f,
+ 96.80056000f, 123.72362518f,
+ 96.35314941f, 123.27621460f,
+ 95.87748718f, 122.80054474f,
+ 95.43006897f, 122.35313416f,
+ 94.95440674f, 121.87747192f,
+ 94.50699615f, 121.43005371f,
+ 94.03132629f, 120.95440674f,
+ 93.58392334f, 120.50697327f,
+ 93.10826111f, 120.03132629f,
+ 92.66084290f, 119.58390808f,
+ 92.18517303f, 119.10823822f,
+ 91.73777008f, 118.66082764f,
+ 91.26210785f, 118.18516541f,
+ 90.81468964f, 117.73775482f,
+ 90.33903503f, 117.26208496f,
+ 89.89160156f, 116.81467438f,
+ 89.41595459f, 116.33901215f,
+ 88.96853638f, 115.89160919f,
+ 88.49288177f, 115.41593170f,
+ 88.04545593f, 114.96851349f,
+ 87.56979370f, 114.49285889f,
+ 87.12238312f, 114.04544830f,
+ 86.64672089f, 113.56979370f,
+ 86.19931030f, 113.12236023f,
+ 85.72364044f, 112.64670563f,
+ 85.27622986f, 112.19928741f,
+ 84.80056763f, 111.72362518f,
+ 84.35315704f, 111.27622223f,
+ 83.87749481f, 110.80056000f,
+ 83.43008423f, 110.35313416f,
+ 82.95442200f, 109.87748718f,
+ 82.50699615f, 109.43006134f,
+ 82.03134155f, 108.95440674f,
+ 81.58392334f, 108.50699615f,
+ 81.10826874f, 108.03132629f,
+ 80.66085052f, 107.58391571f,
+ 80.18519592f, 107.10824585f,
+ 79.73777008f, 106.66084290f,
+ 79.26210785f, 106.18517303f,
+ 78.81469727f, 105.73775482f,
+ 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f,
+ 0.00000000f, 0.00000000f,
+ };
+
+ // check result
+ for (std::uint32_t i = 0; i < output->width ; ++i)
+ {
+ EXPECT_FLOAT_EQ ((*output) (i, 0).intensity, output_intensity[i * 2 + 0]);
+ EXPECT_FLOAT_EQ ((*output) (i, 47).intensity, output_intensity[i * 2 + 1]);
+ }
+}
+
+TEST (Convolution, convolveRowsRGB)
+{
+ // input
+ Eigen::ArrayXf filter(7);
+ filter << 0.00443305, 0.0540056, 0.242036, 0.39905, 0.242036, 0.0540056, 0.00443305;
+ auto input = pcl::make_shared<PointCloud<RGB>>();
+ input->width = 64;
+ input->height = 48;
+ input->resize(input->width * input->height);
+ for (std::uint32_t r = 0; r < input->height; r++)
+ for (std::uint32_t c = 0; c < input->width; c++)
+ {
+ float x1 = -2.0f + (4.0f / (float)input->width) * (float)c;
+ float y1 = -2.0f + (4.0f / (float)input->height) * (float)r;
+ float x2 = -M_PI + (2.0f * M_PI / (float)input->width) * (float)c;
+ float y2 = -2.0f + (4.0f / (float)input->height) * (float)r;
+ float z = x1 * exp(-(x1 * x1 + y1 * y1)) * 2.5f + sin(x2) * sin(y2);
+ (*input) (c, r) = interpolate_color(-1.6f, 1.6f, z);
+ }
+
+ // filter
+ auto output = pcl::make_shared<PointCloud<RGB>>();
+ pcl::filters::Convolution<RGB, RGB> convolve;
convolve.setInputCloud (input);
convolve.setKernel (filter);
convolve.convolveRows (*output);
- // first output row
- Eigen::ArrayXf output_0 (64);
- output_0[0] = 0; output_0[1] = 0; output_0[2] = 0; output_0[3] = 94.1702; output_0[4] = 93.3588; output_0[5] = 92.8969; output_0[6] = 92.8653; output_0[7] = 92.484; output_0[8] = 92.0451; output_0[9] = 91.6455; output_0[10] = 90.9415; output_0[11] = 90.0539; output_0[12] = 89.3048; output_0[13] = 88.7623; output_0[14] = 88.3004; output_0[15] = 88.0044; output_0[16] = 87.7579; output_0[17] = 87.5424; output_0[18] = 87.4441; output_0[19] = 87.0711; output_0[20] = 86.1525; output_0[21] = 84.9014; output_0[22] = 84.1213; output_0[23] = 83.7668; output_0[24] = 83.5469; output_0[25] = 83.5159; output_0[26] = 83.538; output_0[27] = 83.641; output_0[28] = 83.3946; output_0[29] = 82.7889; output_0[30] = 82.3278; output_0[31] = 82.0983; output_0[32] = 82.2605; output_0[33] = 82.376; output_0[34] = 82.1481; output_0[35] = 81.5513; output_0[36] = 81.1708; output_0[37] = 81.3049; output_0[38] = 81.6366; output_0[39] = 81.5826; output_0[40] = 80.9955; output_0[41] = 80.3633; output_0[42] = 80.1213; output_0[43] = 80.3048; output_0[44] = 80.695; output_0[45] = 80.8831; output_0[46] = 80.6995; output_0[47] = 80.3633; output_0[48] = 80.435; output_0[49] = 81.278; output_0[50] = 83.3201; output_0[51] = 86.7523; output_0[52] = 88.1634; output_0[53] = 83.7249; output_0[54] = 77.6666; output_0[55] = 75.8489; output_0[56] = 77.8468; output_0[57] = 79.8108; output_0[58] = 80.4707; output_0[59] = 80.9244; output_0[60] = 81.8518; output_0[61] = 0; output_0[62] = 0; output_0[63] = 0;
+ // output rows (first and last row)
+ // note: this is because of border policy ignore which sets the border of width = filter_size/2 as NaN for xyz and 0 for rgb values
+ std::array<RGB, 128> output_results {
+ // row 0 row 47
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(175, 206, 68), RGB(100, 173, 126),
+ RGB(187, 210, 67), RGB(88, 166, 143),
+ RGB(199, 214, 65), RGB(77, 160, 159),
+ RGB(210, 218, 64), RGB(66, 154, 175),
+ RGB(220, 222, 63), RGB(55, 149, 189),
+ RGB(230, 225, 61), RGB(46, 144, 202),
+ RGB(238, 228, 60), RGB(38, 139, 214),
+ RGB(246, 231, 59), RGB(32, 134, 221),
+ RGB(251, 230, 58), RGB(31, 128, 221),
+ RGB(254, 225, 56), RGB(33, 122, 218),
+ RGB(254, 218, 54), RGB(34, 117, 214),
+ RGB(254, 213, 53), RGB(36, 114, 212),
+ RGB(254, 209, 52), RGB(37, 112, 210),
+ RGB(254, 208, 52), RGB(37, 111, 209),
+ RGB(254, 210, 52), RGB(37, 111, 210),
+ RGB(254, 214, 53), RGB(36, 113, 211),
+ RGB(254, 220, 55), RGB(35, 116, 213),
+ RGB(253, 226, 56), RGB(33, 121, 217),
+ RGB(250, 230, 58), RGB(32, 126, 220),
+ RGB(244, 230, 59), RGB(32, 132, 221),
+ RGB(237, 228, 60), RGB(36, 137, 216),
+ RGB(228, 225, 62), RGB(44, 142, 205),
+ RGB(219, 222, 63), RGB(53, 148, 192),
+ RGB(209, 218, 64), RGB(64, 153, 177),
+ RGB(198, 214, 65), RGB(75, 159, 161),
+ RGB(187, 210, 67), RGB(87, 166, 145),
+ RGB(175, 206, 68), RGB(99, 172, 127),
+ RGB(163, 202, 70), RGB(112, 179, 110),
+ RGB(151, 197, 72), RGB(125, 186, 93),
+ RGB(139, 192, 79), RGB(138, 192, 79),
+ RGB(127, 187, 91), RGB(152, 198, 73),
+ RGB(115, 181, 106), RGB(165, 203, 70),
+ RGB(103, 175, 122), RGB(179, 207, 68),
+ RGB(92, 169, 138), RGB(192, 212, 66),
+ RGB(81, 163, 153), RGB(204, 216, 64),
+ RGB(71, 157, 167), RGB(216, 220, 63),
+ RGB(61, 152, 180), RGB(227, 224, 61),
+ RGB(52, 147, 193), RGB(238, 228, 60),
+ RGB(44, 143, 204), RGB(246, 229, 59),
+ RGB(37, 139, 214), RGB(252, 226, 57),
+ RGB(33, 135, 220), RGB(254, 218, 54),
+ RGB(31, 131, 223), RGB(254, 207, 51),
+ RGB(31, 127, 221), RGB(254, 198, 49),
+ RGB(32, 124, 219), RGB(254, 192, 48),
+ RGB(33, 122, 218), RGB(254, 188, 47),
+ RGB(33, 121, 217), RGB(254, 187, 46),
+ RGB(33, 122, 218), RGB(254, 189, 47),
+ RGB(32, 123, 219), RGB(254, 194, 48),
+ RGB(31, 126, 221), RGB(254, 201, 50),
+ RGB(31, 130, 223), RGB(254, 210, 52),
+ RGB(32, 134, 221), RGB(254, 220, 55),
+ RGB(36, 138, 215), RGB(251, 228, 57),
+ RGB(43, 142, 206), RGB(244, 229, 59),
+ RGB(51, 146, 195), RGB(235, 227, 61),
+ RGB(60, 151, 182), RGB(225, 224, 62),
+ RGB(70, 156, 169), RGB(214, 220, 63),
+ RGB(80, 162, 154), RGB(203, 216, 65),
+ RGB(91, 168, 139), RGB(191, 212, 66),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ };
- // last output row
- Eigen::ArrayXf output_47 (64);
- output_47[0] = 0; output_47[1] = 0; output_47[2] = 0; output_47[3] = 121.887; output_47[4] = 121.395; output_47[5] = 120.547; output_47[6] = 119.866; output_47[7] = 119.493; output_47[8] = 119.153; output_47[9] = 118.484; output_47[10] = 117.753; output_47[11] = 117.009; output_47[12] = 116.35; output_47[13] = 115.941; output_47[14] = 115.349; output_47[15] = 114.35; output_47[16] = 113.588; output_47[17] = 112.99; output_47[18] = 111.645; output_47[19] = 110.009; output_47[20] = 109.13; output_47[21] = 108.771; output_47[22] = 108.592; output_47[23] = 108.641; output_47[24] = 108.345; output_47[25] = 107.592; output_47[26] = 107.063; output_47[27] = 106.767; output_47[28] = 106.592; output_47[29] = 106.645; output_47[30] = 106.399; output_47[31] = 105.843; output_47[32] = 105.57; output_47[33] = 105.502; output_47[34] = 105.557; output_47[35] = 105.672; output_47[36] = 105.56; output_47[37] = 104.897; output_47[38] = 104.642; output_47[39] = 104.708; output_47[40] = 104.054; output_47[41] = 103.556; output_47[42] = 103.982; output_47[43] = 104.43; output_47[44] = 104.682; output_47[45] = 104.596; output_47[46] = 104.104; output_47[47] = 103.852; output_47[48] = 103.964; output_47[49] = 103.91; output_47[50] = 103.556; output_47[51] = 102.614; output_47[52] = 102.126; output_47[53] = 102.762; output_47[54] = 103.466; output_47[55] = 103.273; output_47[56] = 102.292; output_47[57] = 101.363; output_47[58] = 100.821; output_47[59] = 100.538; output_47[60] = 100.332; output_47[61] = 0; output_47[62] = 0; output_47[63] = 0;
-
- std::uint32_t j = 0;
- for (std::uint32_t i = 0; i < output->width ; ++i)
- EXPECT_NEAR ((*output) (i,j).intensity, output_0[i], 1e-3);
- j = 47;
+ // check result
for (std::uint32_t i = 0; i < output->width ; ++i)
- EXPECT_NEAR ((*output) (i,j).intensity, output_47[i], 1e-3);
+ {
+ EXPECT_EQ ((*output) (i, 0).r, output_results[i * 2 + 0].r);
+ EXPECT_EQ ((*output) (i, 0).g, output_results[i * 2 + 0].g);
+ EXPECT_EQ ((*output) (i, 0).b, output_results[i * 2 + 0].b);
+ EXPECT_EQ ((*output) (i, 47).r, output_results[i * 2 + 1].r);
+ EXPECT_EQ ((*output) (i, 47).g, output_results[i * 2 + 1].g);
+ EXPECT_EQ ((*output) (i, 47).b, output_results[i * 2 + 1].b);
+ }
}
-int
-main (int argc, char** argv)
+TEST (Convolution, convolveRowsXYZRGB)
{
+ // input
+ Eigen::ArrayXf filter(7);
filter << 0.00443305, 0.0540056, 0.242036, 0.39905, 0.242036, 0.0540056, 0.00443305;
+ auto input = pcl::make_shared<PointCloud<PointXYZRGB>>();
input->width = 64;
input->height = 48;
- input->resize (64 * 48);
- // To avoid linking to pcl_io, set a point cloud 64x48
- (*input) (0,0).intensity = 97;
- (*input) (1,0).intensity = 96;
- (*input) (2,0).intensity = 94;
- (*input) (3,0).intensity = 95;
- (*input) (4,0).intensity = 93;
- (*input) (5,0).intensity = 92;
- (*input) (6,0).intensity = 94;
- (*input) (7,0).intensity = 92;
- (*input) (8,0).intensity = 92;
- (*input) (9,0).intensity = 92;
- (*input) (10,0).intensity = 91;
- (*input) (11,0).intensity = 90;
- (*input) (12,0).intensity = 89;
- (*input) (13,0).intensity = 89;
- (*input) (14,0).intensity = 88;
- (*input) (15,0).intensity = 88;
- (*input) (16,0).intensity = 88;
- (*input) (17,0).intensity = 87;
- (*input) (18,0).intensity = 88;
- (*input) (19,0).intensity = 87;
- (*input) (20,0).intensity = 87;
- (*input) (21,0).intensity = 84;
- (*input) (22,0).intensity = 84;
- (*input) (23,0).intensity = 84;
- (*input) (24,0).intensity = 83;
- (*input) (25,0).intensity = 84;
- (*input) (26,0).intensity = 83;
- (*input) (27,0).intensity = 84;
- (*input) (28,0).intensity = 84;
- (*input) (29,0).intensity = 82;
- (*input) (30,0).intensity = 83;
- (*input) (31,0).intensity = 81;
- (*input) (32,0).intensity = 83;
- (*input) (33,0).intensity = 82;
- (*input) (34,0).intensity = 83;
- (*input) (35,0).intensity = 81;
- (*input) (36,0).intensity = 81;
- (*input) (37,0).intensity = 81;
- (*input) (38,0).intensity = 82;
- (*input) (39,0).intensity = 82;
- (*input) (40,0).intensity = 81;
- (*input) (41,0).intensity = 80;
- (*input) (42,0).intensity = 80;
- (*input) (43,0).intensity = 80;
- (*input) (44,0).intensity = 81;
- (*input) (45,0).intensity = 81;
- (*input) (46,0).intensity = 81;
- (*input) (47,0).intensity = 80;
- (*input) (48,0).intensity = 80;
- (*input) (49,0).intensity = 81;
- (*input) (50,0).intensity = 82;
- (*input) (51,0).intensity = 86;
- (*input) (52,0).intensity = 95;
- (*input) (53,0).intensity = 84;
- (*input) (54,0).intensity = 74;
- (*input) (55,0).intensity = 73;
- (*input) (56,0).intensity = 79;
- (*input) (57,0).intensity = 81;
- (*input) (58,0).intensity = 80;
- (*input) (59,0).intensity = 81;
- (*input) (60,0).intensity = 81;
- (*input) (61,0).intensity = 84;
- (*input) (62,0).intensity = 84;
- (*input) (63,0).intensity = 85;
- (*input) (0,1).intensity = 97;
- (*input) (1,1).intensity = 96;
- (*input) (2,1).intensity = 95;
- (*input) (3,1).intensity = 93;
- (*input) (4,1).intensity = 94;
- (*input) (5,1).intensity = 92;
- (*input) (6,1).intensity = 93;
- (*input) (7,1).intensity = 92;
- (*input) (8,1).intensity = 92;
- (*input) (9,1).intensity = 91;
- (*input) (10,1).intensity = 91;
- (*input) (11,1).intensity = 89;
- (*input) (12,1).intensity = 88;
- (*input) (13,1).intensity = 88;
- (*input) (14,1).intensity = 88;
- (*input) (15,1).intensity = 88;
- (*input) (16,1).intensity = 86;
- (*input) (17,1).intensity = 86;
- (*input) (18,1).intensity = 89;
- (*input) (19,1).intensity = 87;
- (*input) (20,1).intensity = 88;
- (*input) (21,1).intensity = 85;
- (*input) (22,1).intensity = 85;
- (*input) (23,1).intensity = 82;
- (*input) (24,1).intensity = 83;
- (*input) (25,1).intensity = 84;
- (*input) (26,1).intensity = 82;
- (*input) (27,1).intensity = 83;
- (*input) (28,1).intensity = 82;
- (*input) (29,1).intensity = 83;
- (*input) (30,1).intensity = 80;
- (*input) (31,1).intensity = 79;
- (*input) (32,1).intensity = 81;
- (*input) (33,1).intensity = 81;
- (*input) (34,1).intensity = 82;
- (*input) (35,1).intensity = 79;
- (*input) (36,1).intensity = 79;
- (*input) (37,1).intensity = 80;
- (*input) (38,1).intensity = 79;
- (*input) (39,1).intensity = 81;
- (*input) (40,1).intensity = 78;
- (*input) (41,1).intensity = 79;
- (*input) (42,1).intensity = 77;
- (*input) (43,1).intensity = 77;
- (*input) (44,1).intensity = 80;
- (*input) (45,1).intensity = 78;
- (*input) (46,1).intensity = 79;
- (*input) (47,1).intensity = 78;
- (*input) (48,1).intensity = 77;
- (*input) (49,1).intensity = 78;
- (*input) (50,1).intensity = 79;
- (*input) (51,1).intensity = 81;
- (*input) (52,1).intensity = 89;
- (*input) (53,1).intensity = 92;
- (*input) (54,1).intensity = 88;
- (*input) (55,1).intensity = 80;
- (*input) (56,1).intensity = 77;
- (*input) (57,1).intensity = 77;
- (*input) (58,1).intensity = 78;
- (*input) (59,1).intensity = 78;
- (*input) (60,1).intensity = 77;
- (*input) (61,1).intensity = 79;
- (*input) (62,1).intensity = 77;
- (*input) (63,1).intensity = 79;
- (*input) (0,2).intensity = 96;
- (*input) (1,2).intensity = 96;
- (*input) (2,2).intensity = 94;
- (*input) (3,2).intensity = 94;
- (*input) (4,2).intensity = 92;
- (*input) (5,2).intensity = 92;
- (*input) (6,2).intensity = 94;
- (*input) (7,2).intensity = 92;
- (*input) (8,2).intensity = 90;
- (*input) (9,2).intensity = 88;
- (*input) (10,2).intensity = 89;
- (*input) (11,2).intensity = 88;
- (*input) (12,2).intensity = 87;
- (*input) (13,2).intensity = 85;
- (*input) (14,2).intensity = 86;
- (*input) (15,2).intensity = 85;
- (*input) (16,2).intensity = 83;
- (*input) (17,2).intensity = 83;
- (*input) (18,2).intensity = 87;
- (*input) (19,2).intensity = 85;
- (*input) (20,2).intensity = 86;
- (*input) (21,2).intensity = 82;
- (*input) (22,2).intensity = 83;
- (*input) (23,2).intensity = 82;
- (*input) (24,2).intensity = 81;
- (*input) (25,2).intensity = 82;
- (*input) (26,2).intensity = 82;
- (*input) (27,2).intensity = 82;
- (*input) (28,2).intensity = 82;
- (*input) (29,2).intensity = 82;
- (*input) (30,2).intensity = 81;
- (*input) (31,2).intensity = 80;
- (*input) (32,2).intensity = 81;
- (*input) (33,2).intensity = 79;
- (*input) (34,2).intensity = 81;
- (*input) (35,2).intensity = 79;
- (*input) (36,2).intensity = 81;
- (*input) (37,2).intensity = 83;
- (*input) (38,2).intensity = 79;
- (*input) (39,2).intensity = 78;
- (*input) (40,2).intensity = 81;
- (*input) (41,2).intensity = 83;
- (*input) (42,2).intensity = 81;
- (*input) (43,2).intensity = 80;
- (*input) (44,2).intensity = 81;
- (*input) (45,2).intensity = 80;
- (*input) (46,2).intensity = 78;
- (*input) (47,2).intensity = 79;
- (*input) (48,2).intensity = 79;
- (*input) (49,2).intensity = 80;
- (*input) (50,2).intensity = 80;
- (*input) (51,2).intensity = 81;
- (*input) (52,2).intensity = 82;
- (*input) (53,2).intensity = 83;
- (*input) (54,2).intensity = 84;
- (*input) (55,2).intensity = 81;
- (*input) (56,2).intensity = 79;
- (*input) (57,2).intensity = 78;
- (*input) (58,2).intensity = 77;
- (*input) (59,2).intensity = 77;
- (*input) (60,2).intensity = 76;
- (*input) (61,2).intensity = 74;
- (*input) (62,2).intensity = 77;
- (*input) (63,2).intensity = 79;
- (*input) (0,3).intensity = 93;
- (*input) (1,3).intensity = 93;
- (*input) (2,3).intensity = 94;
- (*input) (3,3).intensity = 92;
- (*input) (4,3).intensity = 92;
- (*input) (5,3).intensity = 92;
- (*input) (6,3).intensity = 93;
- (*input) (7,3).intensity = 93;
- (*input) (8,3).intensity = 92;
- (*input) (9,3).intensity = 90;
- (*input) (10,3).intensity = 92;
- (*input) (11,3).intensity = 91;
- (*input) (12,3).intensity = 90;
- (*input) (13,3).intensity = 89;
- (*input) (14,3).intensity = 90;
- (*input) (15,3).intensity = 90;
- (*input) (16,3).intensity = 89;
- (*input) (17,3).intensity = 90;
- (*input) (18,3).intensity = 91;
- (*input) (19,3).intensity = 91;
- (*input) (20,3).intensity = 93;
- (*input) (21,3).intensity = 89;
- (*input) (22,3).intensity = 91;
- (*input) (23,3).intensity = 91;
- (*input) (24,3).intensity = 90;
- (*input) (25,3).intensity = 91;
- (*input) (26,3).intensity = 89;
- (*input) (27,3).intensity = 88;
- (*input) (28,3).intensity = 91;
- (*input) (29,3).intensity = 90;
- (*input) (30,3).intensity = 90;
- (*input) (31,3).intensity = 91;
- (*input) (32,3).intensity = 89;
- (*input) (33,3).intensity = 89;
- (*input) (34,3).intensity = 87;
- (*input) (35,3).intensity = 88;
- (*input) (36,3).intensity = 90;
- (*input) (37,3).intensity = 91;
- (*input) (38,3).intensity = 90;
- (*input) (39,3).intensity = 87;
- (*input) (40,3).intensity = 92;
- (*input) (41,3).intensity = 91;
- (*input) (42,3).intensity = 91;
- (*input) (43,3).intensity = 91;
- (*input) (44,3).intensity = 91;
- (*input) (45,3).intensity = 90;
- (*input) (46,3).intensity = 89;
- (*input) (47,3).intensity = 87;
- (*input) (48,3).intensity = 90;
- (*input) (49,3).intensity = 90;
- (*input) (50,3).intensity = 90;
- (*input) (51,3).intensity = 88;
- (*input) (52,3).intensity = 89;
- (*input) (53,3).intensity = 87;
- (*input) (54,3).intensity = 88;
- (*input) (55,3).intensity = 87;
- (*input) (56,3).intensity = 88;
- (*input) (57,3).intensity = 85;
- (*input) (58,3).intensity = 86;
- (*input) (59,3).intensity = 85;
- (*input) (60,3).intensity = 84;
- (*input) (61,3).intensity = 82;
- (*input) (62,3).intensity = 79;
- (*input) (63,3).intensity = 68;
- (*input) (0,4).intensity = 99;
- (*input) (1,4).intensity = 96;
- (*input) (2,4).intensity = 98;
- (*input) (3,4).intensity = 98;
- (*input) (4,4).intensity = 96;
- (*input) (5,4).intensity = 98;
- (*input) (6,4).intensity = 98;
- (*input) (7,4).intensity = 98;
- (*input) (8,4).intensity = 96;
- (*input) (9,4).intensity = 96;
- (*input) (10,4).intensity = 98;
- (*input) (11,4).intensity = 99;
- (*input) (12,4).intensity = 99;
- (*input) (13,4).intensity = 97;
- (*input) (14,4).intensity = 97;
- (*input) (15,4).intensity = 98;
- (*input) (16,4).intensity = 98;
- (*input) (17,4).intensity = 99;
- (*input) (18,4).intensity = 97;
- (*input) (19,4).intensity = 95;
- (*input) (20,4).intensity = 98;
- (*input) (21,4).intensity = 96;
- (*input) (22,4).intensity = 98;
- (*input) (23,4).intensity = 98;
- (*input) (24,4).intensity = 97;
- (*input) (25,4).intensity = 97;
- (*input) (26,4).intensity = 99;
- (*input) (27,4).intensity = 97;
- (*input) (28,4).intensity = 96;
- (*input) (29,4).intensity = 96;
- (*input) (30,4).intensity = 96;
- (*input) (31,4).intensity = 97;
- (*input) (32,4).intensity = 96;
- (*input) (33,4).intensity = 96;
- (*input) (34,4).intensity = 96;
- (*input) (35,4).intensity = 97;
- (*input) (36,4).intensity = 97;
- (*input) (37,4).intensity = 96;
- (*input) (38,4).intensity = 96;
- (*input) (39,4).intensity = 94;
- (*input) (40,4).intensity = 96;
- (*input) (41,4).intensity = 95;
- (*input) (42,4).intensity = 97;
- (*input) (43,4).intensity = 97;
- (*input) (44,4).intensity = 97;
- (*input) (45,4).intensity = 96;
- (*input) (46,4).intensity = 94;
- (*input) (47,4).intensity = 94;
- (*input) (48,4).intensity = 96;
- (*input) (49,4).intensity = 96;
- (*input) (50,4).intensity = 97;
- (*input) (51,4).intensity = 93;
- (*input) (52,4).intensity = 95;
- (*input) (53,4).intensity = 92;
- (*input) (54,4).intensity = 92;
- (*input) (55,4).intensity = 94;
- (*input) (56,4).intensity = 93;
- (*input) (57,4).intensity = 93;
- (*input) (58,4).intensity = 92;
- (*input) (59,4).intensity = 92;
- (*input) (60,4).intensity = 90;
- (*input) (61,4).intensity = 86;
- (*input) (62,4).intensity = 70;
- (*input) (63,4).intensity = 49;
- (*input) (0,5).intensity = 95;
- (*input) (1,5).intensity = 93;
- (*input) (2,5).intensity = 93;
- (*input) (3,5).intensity = 94;
- (*input) (4,5).intensity = 93;
- (*input) (5,5).intensity = 94;
- (*input) (6,5).intensity = 93;
- (*input) (7,5).intensity = 92;
- (*input) (8,5).intensity = 93;
- (*input) (9,5).intensity = 93;
- (*input) (10,5).intensity = 90;
- (*input) (11,5).intensity = 92;
- (*input) (12,5).intensity = 89;
- (*input) (13,5).intensity = 90;
- (*input) (14,5).intensity = 89;
- (*input) (15,5).intensity = 89;
- (*input) (16,5).intensity = 90;
- (*input) (17,5).intensity = 90;
- (*input) (18,5).intensity = 88;
- (*input) (19,5).intensity = 86;
- (*input) (20,5).intensity = 87;
- (*input) (21,5).intensity = 89;
- (*input) (22,5).intensity = 88;
- (*input) (23,5).intensity = 89;
- (*input) (24,5).intensity = 88;
- (*input) (25,5).intensity = 90;
- (*input) (26,5).intensity = 91;
- (*input) (27,5).intensity = 89;
- (*input) (28,5).intensity = 87;
- (*input) (29,5).intensity = 87;
- (*input) (30,5).intensity = 89;
- (*input) (31,5).intensity = 87;
- (*input) (32,5).intensity = 89;
- (*input) (33,5).intensity = 89;
- (*input) (34,5).intensity = 88;
- (*input) (35,5).intensity = 92;
- (*input) (36,5).intensity = 92;
- (*input) (37,5).intensity = 90;
- (*input) (38,5).intensity = 91;
- (*input) (39,5).intensity = 90;
- (*input) (40,5).intensity = 91;
- (*input) (41,5).intensity = 87;
- (*input) (42,5).intensity = 91;
- (*input) (43,5).intensity = 91;
- (*input) (44,5).intensity = 91;
- (*input) (45,5).intensity = 91;
- (*input) (46,5).intensity = 90;
- (*input) (47,5).intensity = 92;
- (*input) (48,5).intensity = 91;
- (*input) (49,5).intensity = 91;
- (*input) (50,5).intensity = 91;
- (*input) (51,5).intensity = 90;
- (*input) (52,5).intensity = 90;
- (*input) (53,5).intensity = 88;
- (*input) (54,5).intensity = 86;
- (*input) (55,5).intensity = 91;
- (*input) (56,5).intensity = 89;
- (*input) (57,5).intensity = 89;
- (*input) (58,5).intensity = 90;
- (*input) (59,5).intensity = 88;
- (*input) (60,5).intensity = 86;
- (*input) (61,5).intensity = 80;
- (*input) (62,5).intensity = 64;
- (*input) (63,5).intensity = 46;
- (*input) (0,6).intensity = 93;
- (*input) (1,6).intensity = 90;
- (*input) (2,6).intensity = 91;
- (*input) (3,6).intensity = 91;
- (*input) (4,6).intensity = 93;
- (*input) (5,6).intensity = 92;
- (*input) (6,6).intensity = 91;
- (*input) (7,6).intensity = 90;
- (*input) (8,6).intensity = 90;
- (*input) (9,6).intensity = 90;
- (*input) (10,6).intensity = 88;
- (*input) (11,6).intensity = 90;
- (*input) (12,6).intensity = 89;
- (*input) (13,6).intensity = 89;
- (*input) (14,6).intensity = 85;
- (*input) (15,6).intensity = 87;
- (*input) (16,6).intensity = 89;
- (*input) (17,6).intensity = 87;
- (*input) (18,6).intensity = 88;
- (*input) (19,6).intensity = 86;
- (*input) (20,6).intensity = 85;
- (*input) (21,6).intensity = 87;
- (*input) (22,6).intensity = 87;
- (*input) (23,6).intensity = 87;
- (*input) (24,6).intensity = 86;
- (*input) (25,6).intensity = 87;
- (*input) (26,6).intensity = 87;
- (*input) (27,6).intensity = 88;
- (*input) (28,6).intensity = 85;
- (*input) (29,6).intensity = 86;
- (*input) (30,6).intensity = 85;
- (*input) (31,6).intensity = 83;
- (*input) (32,6).intensity = 86;
- (*input) (33,6).intensity = 87;
- (*input) (34,6).intensity = 85;
- (*input) (35,6).intensity = 86;
- (*input) (36,6).intensity = 87;
- (*input) (37,6).intensity = 85;
- (*input) (38,6).intensity = 86;
- (*input) (39,6).intensity = 85;
- (*input) (40,6).intensity = 85;
- (*input) (41,6).intensity = 84;
- (*input) (42,6).intensity = 85;
- (*input) (43,6).intensity = 85;
- (*input) (44,6).intensity = 84;
- (*input) (45,6).intensity = 84;
- (*input) (46,6).intensity = 85;
- (*input) (47,6).intensity = 84;
- (*input) (48,6).intensity = 82;
- (*input) (49,6).intensity = 84;
- (*input) (50,6).intensity = 85;
- (*input) (51,6).intensity = 83;
- (*input) (52,6).intensity = 84;
- (*input) (53,6).intensity = 83;
- (*input) (54,6).intensity = 82;
- (*input) (55,6).intensity = 85;
- (*input) (56,6).intensity = 83;
- (*input) (57,6).intensity = 82;
- (*input) (58,6).intensity = 82;
- (*input) (59,6).intensity = 83;
- (*input) (60,6).intensity = 80;
- (*input) (61,6).intensity = 77;
- (*input) (62,6).intensity = 62;
- (*input) (63,6).intensity = 52;
- (*input) (0,7).intensity = 91;
- (*input) (1,7).intensity = 88;
- (*input) (2,7).intensity = 86;
- (*input) (3,7).intensity = 87;
- (*input) (4,7).intensity = 90;
- (*input) (5,7).intensity = 87;
- (*input) (6,7).intensity = 86;
- (*input) (7,7).intensity = 84;
- (*input) (8,7).intensity = 82;
- (*input) (9,7).intensity = 82;
- (*input) (10,7).intensity = 79;
- (*input) (11,7).intensity = 80;
- (*input) (12,7).intensity = 79;
- (*input) (13,7).intensity = 81;
- (*input) (14,7).intensity = 75;
- (*input) (15,7).intensity = 75;
- (*input) (16,7).intensity = 77;
- (*input) (17,7).intensity = 75;
- (*input) (18,7).intensity = 77;
- (*input) (19,7).intensity = 77;
- (*input) (20,7).intensity = 74;
- (*input) (21,7).intensity = 74;
- (*input) (22,7).intensity = 75;
- (*input) (23,7).intensity = 75;
- (*input) (24,7).intensity = 75;
- (*input) (25,7).intensity = 75;
- (*input) (26,7).intensity = 74;
- (*input) (27,7).intensity = 75;
- (*input) (28,7).intensity = 76;
- (*input) (29,7).intensity = 76;
- (*input) (30,7).intensity = 76;
- (*input) (31,7).intensity = 76;
- (*input) (32,7).intensity = 76;
- (*input) (33,7).intensity = 74;
- (*input) (34,7).intensity = 75;
- (*input) (35,7).intensity = 76;
- (*input) (36,7).intensity = 78;
- (*input) (37,7).intensity = 77;
- (*input) (38,7).intensity = 76;
- (*input) (39,7).intensity = 74;
- (*input) (40,7).intensity = 76;
- (*input) (41,7).intensity = 76;
- (*input) (42,7).intensity = 76;
- (*input) (43,7).intensity = 75;
- (*input) (44,7).intensity = 76;
- (*input) (45,7).intensity = 74;
- (*input) (46,7).intensity = 75;
- (*input) (47,7).intensity = 75;
- (*input) (48,7).intensity = 73;
- (*input) (49,7).intensity = 75;
- (*input) (50,7).intensity = 74;
- (*input) (51,7).intensity = 75;
- (*input) (52,7).intensity = 75;
- (*input) (53,7).intensity = 73;
- (*input) (54,7).intensity = 74;
- (*input) (55,7).intensity = 74;
- (*input) (56,7).intensity = 74;
- (*input) (57,7).intensity = 73;
- (*input) (58,7).intensity = 73;
- (*input) (59,7).intensity = 74;
- (*input) (60,7).intensity = 73;
- (*input) (61,7).intensity = 73;
- (*input) (62,7).intensity = 74;
- (*input) (63,7).intensity = 67;
- (*input) (0,8).intensity = 86;
- (*input) (1,8).intensity = 87;
- (*input) (2,8).intensity = 85;
- (*input) (3,8).intensity = 84;
- (*input) (4,8).intensity = 87;
- (*input) (5,8).intensity = 87;
- (*input) (6,8).intensity = 86;
- (*input) (7,8).intensity = 83;
- (*input) (8,8).intensity = 83;
- (*input) (9,8).intensity = 79;
- (*input) (10,8).intensity = 82;
- (*input) (11,8).intensity = 82;
- (*input) (12,8).intensity = 84;
- (*input) (13,8).intensity = 83;
- (*input) (14,8).intensity = 78;
- (*input) (15,8).intensity = 79;
- (*input) (16,8).intensity = 80;
- (*input) (17,8).intensity = 82;
- (*input) (18,8).intensity = 83;
- (*input) (19,8).intensity = 84;
- (*input) (20,8).intensity = 84;
- (*input) (21,8).intensity = 80;
- (*input) (22,8).intensity = 84;
- (*input) (23,8).intensity = 84;
- (*input) (24,8).intensity = 83;
- (*input) (25,8).intensity = 82;
- (*input) (26,8).intensity = 80;
- (*input) (27,8).intensity = 82;
- (*input) (28,8).intensity = 84;
- (*input) (29,8).intensity = 83;
- (*input) (30,8).intensity = 83;
- (*input) (31,8).intensity = 84;
- (*input) (32,8).intensity = 83;
- (*input) (33,8).intensity = 83;
- (*input) (34,8).intensity = 83;
- (*input) (35,8).intensity = 80;
- (*input) (36,8).intensity = 81;
- (*input) (37,8).intensity = 83;
- (*input) (38,8).intensity = 81;
- (*input) (39,8).intensity = 81;
- (*input) (40,8).intensity = 82;
- (*input) (41,8).intensity = 81;
- (*input) (42,8).intensity = 82;
- (*input) (43,8).intensity = 81;
- (*input) (44,8).intensity = 81;
- (*input) (45,8).intensity = 80;
- (*input) (46,8).intensity = 81;
- (*input) (47,8).intensity = 80;
- (*input) (48,8).intensity = 80;
- (*input) (49,8).intensity = 79;
- (*input) (50,8).intensity = 80;
- (*input) (51,8).intensity = 77;
- (*input) (52,8).intensity = 79;
- (*input) (53,8).intensity = 80;
- (*input) (54,8).intensity = 79;
- (*input) (55,8).intensity = 79;
- (*input) (56,8).intensity = 78;
- (*input) (57,8).intensity = 76;
- (*input) (58,8).intensity = 76;
- (*input) (59,8).intensity = 77;
- (*input) (60,8).intensity = 75;
- (*input) (61,8).intensity = 76;
- (*input) (62,8).intensity = 75;
- (*input) (63,8).intensity = 58;
- (*input) (0,9).intensity = 93;
- (*input) (1,9).intensity = 93;
- (*input) (2,9).intensity = 92;
- (*input) (3,9).intensity = 91;
- (*input) (4,9).intensity = 92;
- (*input) (5,9).intensity = 92;
- (*input) (6,9).intensity = 89;
- (*input) (7,9).intensity = 87;
- (*input) (8,9).intensity = 88;
- (*input) (9,9).intensity = 83;
- (*input) (10,9).intensity = 85;
- (*input) (11,9).intensity = 86;
- (*input) (12,9).intensity = 90;
- (*input) (13,9).intensity = 89;
- (*input) (14,9).intensity = 84;
- (*input) (15,9).intensity = 87;
- (*input) (16,9).intensity = 85;
- (*input) (17,9).intensity = 87;
- (*input) (18,9).intensity = 89;
- (*input) (19,9).intensity = 89;
- (*input) (20,9).intensity = 89;
- (*input) (21,9).intensity = 86;
- (*input) (22,9).intensity = 89;
- (*input) (23,9).intensity = 90;
- (*input) (24,9).intensity = 87;
- (*input) (25,9).intensity = 89;
- (*input) (26,9).intensity = 85;
- (*input) (27,9).intensity = 87;
- (*input) (28,9).intensity = 87;
- (*input) (29,9).intensity = 86;
- (*input) (30,9).intensity = 87;
- (*input) (31,9).intensity = 88;
- (*input) (32,9).intensity = 86;
- (*input) (33,9).intensity = 86;
- (*input) (34,9).intensity = 87;
- (*input) (35,9).intensity = 87;
- (*input) (36,9).intensity = 86;
- (*input) (37,9).intensity = 85;
- (*input) (38,9).intensity = 84;
- (*input) (39,9).intensity = 84;
- (*input) (40,9).intensity = 83;
- (*input) (41,9).intensity = 84;
- (*input) (42,9).intensity = 85;
- (*input) (43,9).intensity = 85;
- (*input) (44,9).intensity = 84;
- (*input) (45,9).intensity = 82;
- (*input) (46,9).intensity = 80;
- (*input) (47,9).intensity = 82;
- (*input) (48,9).intensity = 82;
- (*input) (49,9).intensity = 81;
- (*input) (50,9).intensity = 82;
- (*input) (51,9).intensity = 78;
- (*input) (52,9).intensity = 78;
- (*input) (53,9).intensity = 82;
- (*input) (54,9).intensity = 81;
- (*input) (55,9).intensity = 79;
- (*input) (56,9).intensity = 79;
- (*input) (57,9).intensity = 76;
- (*input) (58,9).intensity = 76;
- (*input) (59,9).intensity = 77;
- (*input) (60,9).intensity = 76;
- (*input) (61,9).intensity = 66;
- (*input) (62,9).intensity = 48;
- (*input) (63,9).intensity = 33;
- (*input) (0,10).intensity = 96;
- (*input) (1,10).intensity = 97;
- (*input) (2,10).intensity = 94;
- (*input) (3,10).intensity = 95;
- (*input) (4,10).intensity = 93;
- (*input) (5,10).intensity = 92;
- (*input) (6,10).intensity = 89;
- (*input) (7,10).intensity = 88;
- (*input) (8,10).intensity = 90;
- (*input) (9,10).intensity = 85;
- (*input) (10,10).intensity = 87;
- (*input) (11,10).intensity = 88;
- (*input) (12,10).intensity = 90;
- (*input) (13,10).intensity = 89;
- (*input) (14,10).intensity = 88;
- (*input) (15,10).intensity = 88;
- (*input) (16,10).intensity = 88;
- (*input) (17,10).intensity = 89;
- (*input) (18,10).intensity = 88;
- (*input) (19,10).intensity = 88;
- (*input) (20,10).intensity = 88;
- (*input) (21,10).intensity = 87;
- (*input) (22,10).intensity = 88;
- (*input) (23,10).intensity = 87;
- (*input) (24,10).intensity = 88;
- (*input) (25,10).intensity = 88;
- (*input) (26,10).intensity = 85;
- (*input) (27,10).intensity = 88;
- (*input) (28,10).intensity = 86;
- (*input) (29,10).intensity = 85;
- (*input) (30,10).intensity = 87;
- (*input) (31,10).intensity = 86;
- (*input) (32,10).intensity = 83;
- (*input) (33,10).intensity = 84;
- (*input) (34,10).intensity = 86;
- (*input) (35,10).intensity = 85;
- (*input) (36,10).intensity = 85;
- (*input) (37,10).intensity = 84;
- (*input) (38,10).intensity = 82;
- (*input) (39,10).intensity = 83;
- (*input) (40,10).intensity = 83;
- (*input) (41,10).intensity = 83;
- (*input) (42,10).intensity = 83;
- (*input) (43,10).intensity = 83;
- (*input) (44,10).intensity = 84;
- (*input) (45,10).intensity = 81;
- (*input) (46,10).intensity = 80;
- (*input) (47,10).intensity = 81;
- (*input) (48,10).intensity = 82;
- (*input) (49,10).intensity = 81;
- (*input) (50,10).intensity = 81;
- (*input) (51,10).intensity = 79;
- (*input) (52,10).intensity = 79;
- (*input) (53,10).intensity = 81;
- (*input) (54,10).intensity = 80;
- (*input) (55,10).intensity = 80;
- (*input) (56,10).intensity = 79;
- (*input) (57,10).intensity = 75;
- (*input) (58,10).intensity = 75;
- (*input) (59,10).intensity = 77;
- (*input) (60,10).intensity = 74;
- (*input) (61,10).intensity = 50;
- (*input) (62,10).intensity = 31;
- (*input) (63,10).intensity = 19;
- (*input) (0,11).intensity = 97;
- (*input) (1,11).intensity = 97;
- (*input) (2,11).intensity = 97;
- (*input) (3,11).intensity = 96;
- (*input) (4,11).intensity = 94;
- (*input) (5,11).intensity = 91;
- (*input) (6,11).intensity = 90;
- (*input) (7,11).intensity = 90;
- (*input) (8,11).intensity = 92;
- (*input) (9,11).intensity = 89;
- (*input) (10,11).intensity = 90;
- (*input) (11,11).intensity = 89;
- (*input) (12,11).intensity = 90;
- (*input) (13,11).intensity = 91;
- (*input) (14,11).intensity = 91;
- (*input) (15,11).intensity = 92;
- (*input) (16,11).intensity = 90;
- (*input) (17,11).intensity = 89;
- (*input) (18,11).intensity = 89;
- (*input) (19,11).intensity = 89;
- (*input) (20,11).intensity = 89;
- (*input) (21,11).intensity = 89;
- (*input) (22,11).intensity = 89;
- (*input) (23,11).intensity = 87;
- (*input) (24,11).intensity = 88;
- (*input) (25,11).intensity = 87;
- (*input) (26,11).intensity = 87;
- (*input) (27,11).intensity = 88;
- (*input) (28,11).intensity = 87;
- (*input) (29,11).intensity = 85;
- (*input) (30,11).intensity = 87;
- (*input) (31,11).intensity = 85;
- (*input) (32,11).intensity = 84;
- (*input) (33,11).intensity = 83;
- (*input) (34,11).intensity = 85;
- (*input) (35,11).intensity = 85;
- (*input) (36,11).intensity = 85;
- (*input) (37,11).intensity = 83;
- (*input) (38,11).intensity = 82;
- (*input) (39,11).intensity = 83;
- (*input) (40,11).intensity = 82;
- (*input) (41,11).intensity = 83;
- (*input) (42,11).intensity = 84;
- (*input) (43,11).intensity = 83;
- (*input) (44,11).intensity = 83;
- (*input) (45,11).intensity = 81;
- (*input) (46,11).intensity = 80;
- (*input) (47,11).intensity = 80;
- (*input) (48,11).intensity = 81;
- (*input) (49,11).intensity = 82;
- (*input) (50,11).intensity = 80;
- (*input) (51,11).intensity = 79;
- (*input) (52,11).intensity = 79;
- (*input) (53,11).intensity = 80;
- (*input) (54,11).intensity = 80;
- (*input) (55,11).intensity = 79;
- (*input) (56,11).intensity = 79;
- (*input) (57,11).intensity = 75;
- (*input) (58,11).intensity = 75;
- (*input) (59,11).intensity = 79;
- (*input) (60,11).intensity = 76;
- (*input) (61,11).intensity = 43;
- (*input) (62,11).intensity = 32;
- (*input) (63,11).intensity = 24;
- (*input) (0,12).intensity = 98;
- (*input) (1,12).intensity = 98;
- (*input) (2,12).intensity = 98;
- (*input) (3,12).intensity = 97;
- (*input) (4,12).intensity = 96;
- (*input) (5,12).intensity = 93;
- (*input) (6,12).intensity = 94;
- (*input) (7,12).intensity = 94;
- (*input) (8,12).intensity = 93;
- (*input) (9,12).intensity = 92;
- (*input) (10,12).intensity = 92;
- (*input) (11,12).intensity = 91;
- (*input) (12,12).intensity = 91;
- (*input) (13,12).intensity = 92;
- (*input) (14,12).intensity = 93;
- (*input) (15,12).intensity = 91;
- (*input) (16,12).intensity = 92;
- (*input) (17,12).intensity = 92;
- (*input) (18,12).intensity = 90;
- (*input) (19,12).intensity = 90;
- (*input) (20,12).intensity = 90;
- (*input) (21,12).intensity = 90;
- (*input) (22,12).intensity = 89;
- (*input) (23,12).intensity = 87;
- (*input) (24,12).intensity = 89;
- (*input) (25,12).intensity = 88;
- (*input) (26,12).intensity = 88;
- (*input) (27,12).intensity = 89;
- (*input) (28,12).intensity = 88;
- (*input) (29,12).intensity = 86;
- (*input) (30,12).intensity = 86;
- (*input) (31,12).intensity = 87;
- (*input) (32,12).intensity = 85;
- (*input) (33,12).intensity = 84;
- (*input) (34,12).intensity = 85;
- (*input) (35,12).intensity = 84;
- (*input) (36,12).intensity = 85;
- (*input) (37,12).intensity = 83;
- (*input) (38,12).intensity = 83;
- (*input) (39,12).intensity = 82;
- (*input) (40,12).intensity = 82;
- (*input) (41,12).intensity = 83;
- (*input) (42,12).intensity = 83;
- (*input) (43,12).intensity = 82;
- (*input) (44,12).intensity = 84;
- (*input) (45,12).intensity = 81;
- (*input) (46,12).intensity = 81;
- (*input) (47,12).intensity = 80;
- (*input) (48,12).intensity = 81;
- (*input) (49,12).intensity = 81;
- (*input) (50,12).intensity = 81;
- (*input) (51,12).intensity = 80;
- (*input) (52,12).intensity = 80;
- (*input) (53,12).intensity = 81;
- (*input) (54,12).intensity = 80;
- (*input) (55,12).intensity = 80;
- (*input) (56,12).intensity = 81;
- (*input) (57,12).intensity = 78;
- (*input) (58,12).intensity = 80;
- (*input) (59,12).intensity = 83;
- (*input) (60,12).intensity = 68;
- (*input) (61,12).intensity = 48;
- (*input) (62,12).intensity = 49;
- (*input) (63,12).intensity = 51;
- (*input) (0,13).intensity = 102;
- (*input) (1,13).intensity = 98;
- (*input) (2,13).intensity = 98;
- (*input) (3,13).intensity = 97;
- (*input) (4,13).intensity = 97;
- (*input) (5,13).intensity = 93;
- (*input) (6,13).intensity = 96;
- (*input) (7,13).intensity = 97;
- (*input) (8,13).intensity = 94;
- (*input) (9,13).intensity = 94;
- (*input) (10,13).intensity = 95;
- (*input) (11,13).intensity = 93;
- (*input) (12,13).intensity = 92;
- (*input) (13,13).intensity = 94;
- (*input) (14,13).intensity = 95;
- (*input) (15,13).intensity = 90;
- (*input) (16,13).intensity = 93;
- (*input) (17,13).intensity = 93;
- (*input) (18,13).intensity = 91;
- (*input) (19,13).intensity = 90;
- (*input) (20,13).intensity = 90;
- (*input) (21,13).intensity = 90;
- (*input) (22,13).intensity = 89;
- (*input) (23,13).intensity = 88;
- (*input) (24,13).intensity = 90;
- (*input) (25,13).intensity = 88;
- (*input) (26,13).intensity = 90;
- (*input) (27,13).intensity = 89;
- (*input) (28,13).intensity = 89;
- (*input) (29,13).intensity = 87;
- (*input) (30,13).intensity = 86;
- (*input) (31,13).intensity = 87;
- (*input) (32,13).intensity = 87;
- (*input) (33,13).intensity = 85;
- (*input) (34,13).intensity = 85;
- (*input) (35,13).intensity = 84;
- (*input) (36,13).intensity = 85;
- (*input) (37,13).intensity = 84;
- (*input) (38,13).intensity = 85;
- (*input) (39,13).intensity = 82;
- (*input) (40,13).intensity = 82;
- (*input) (41,13).intensity = 83;
- (*input) (42,13).intensity = 83;
- (*input) (43,13).intensity = 82;
- (*input) (44,13).intensity = 84;
- (*input) (45,13).intensity = 83;
- (*input) (46,13).intensity = 82;
- (*input) (47,13).intensity = 80;
- (*input) (48,13).intensity = 81;
- (*input) (49,13).intensity = 81;
- (*input) (50,13).intensity = 81;
- (*input) (51,13).intensity = 81;
- (*input) (52,13).intensity = 81;
- (*input) (53,13).intensity = 82;
- (*input) (54,13).intensity = 80;
- (*input) (55,13).intensity = 81;
- (*input) (56,13).intensity = 81;
- (*input) (57,13).intensity = 81;
- (*input) (58,13).intensity = 86;
- (*input) (59,13).intensity = 81;
- (*input) (60,13).intensity = 53;
- (*input) (61,13).intensity = 57;
- (*input) (62,13).intensity = 115;
- (*input) (63,13).intensity = 116;
- (*input) (0,14).intensity = 101;
- (*input) (1,14).intensity = 99;
- (*input) (2,14).intensity = 98;
- (*input) (3,14).intensity = 99;
- (*input) (4,14).intensity = 98;
- (*input) (5,14).intensity = 98;
- (*input) (6,14).intensity = 99;
- (*input) (7,14).intensity = 98;
- (*input) (8,14).intensity = 95;
- (*input) (9,14).intensity = 95;
- (*input) (10,14).intensity = 95;
- (*input) (11,14).intensity = 94;
- (*input) (12,14).intensity = 93;
- (*input) (13,14).intensity = 93;
- (*input) (14,14).intensity = 94;
- (*input) (15,14).intensity = 91;
- (*input) (16,14).intensity = 92;
- (*input) (17,14).intensity = 94;
- (*input) (18,14).intensity = 92;
- (*input) (19,14).intensity = 89;
- (*input) (20,14).intensity = 90;
- (*input) (21,14).intensity = 92;
- (*input) (22,14).intensity = 91;
- (*input) (23,14).intensity = 91;
- (*input) (24,14).intensity = 91;
- (*input) (25,14).intensity = 90;
- (*input) (26,14).intensity = 90;
- (*input) (27,14).intensity = 90;
- (*input) (28,14).intensity = 89;
- (*input) (29,14).intensity = 88;
- (*input) (30,14).intensity = 88;
- (*input) (31,14).intensity = 89;
- (*input) (32,14).intensity = 88;
- (*input) (33,14).intensity = 88;
- (*input) (34,14).intensity = 88;
- (*input) (35,14).intensity = 85;
- (*input) (36,14).intensity = 87;
- (*input) (37,14).intensity = 86;
- (*input) (38,14).intensity = 86;
- (*input) (39,14).intensity = 84;
- (*input) (40,14).intensity = 83;
- (*input) (41,14).intensity = 83;
- (*input) (42,14).intensity = 84;
- (*input) (43,14).intensity = 83;
- (*input) (44,14).intensity = 86;
- (*input) (45,14).intensity = 85;
- (*input) (46,14).intensity = 83;
- (*input) (47,14).intensity = 82;
- (*input) (48,14).intensity = 83;
- (*input) (49,14).intensity = 81;
- (*input) (50,14).intensity = 84;
- (*input) (51,14).intensity = 82;
- (*input) (52,14).intensity = 81;
- (*input) (53,14).intensity = 83;
- (*input) (54,14).intensity = 82;
- (*input) (55,14).intensity = 83;
- (*input) (56,14).intensity = 83;
- (*input) (57,14).intensity = 84;
- (*input) (58,14).intensity = 86;
- (*input) (59,14).intensity = 75;
- (*input) (60,14).intensity = 34;
- (*input) (61,14).intensity = 50;
- (*input) (62,14).intensity = 126;
- (*input) (63,14).intensity = 143;
- (*input) (0,15).intensity = 102;
- (*input) (1,15).intensity = 100;
- (*input) (2,15).intensity = 99;
- (*input) (3,15).intensity = 100;
- (*input) (4,15).intensity = 99;
- (*input) (5,15).intensity = 99;
- (*input) (6,15).intensity = 100;
- (*input) (7,15).intensity = 98;
- (*input) (8,15).intensity = 98;
- (*input) (9,15).intensity = 97;
- (*input) (10,15).intensity = 95;
- (*input) (11,15).intensity = 96;
- (*input) (12,15).intensity = 94;
- (*input) (13,15).intensity = 96;
- (*input) (14,15).intensity = 95;
- (*input) (15,15).intensity = 93;
- (*input) (16,15).intensity = 93;
- (*input) (17,15).intensity = 93;
- (*input) (18,15).intensity = 93;
- (*input) (19,15).intensity = 90;
- (*input) (20,15).intensity = 92;
- (*input) (21,15).intensity = 92;
- (*input) (22,15).intensity = 93;
- (*input) (23,15).intensity = 93;
- (*input) (24,15).intensity = 92;
- (*input) (25,15).intensity = 90;
- (*input) (26,15).intensity = 91;
- (*input) (27,15).intensity = 91;
- (*input) (28,15).intensity = 89;
- (*input) (29,15).intensity = 89;
- (*input) (30,15).intensity = 90;
- (*input) (31,15).intensity = 89;
- (*input) (32,15).intensity = 89;
- (*input) (33,15).intensity = 89;
- (*input) (34,15).intensity = 89;
- (*input) (35,15).intensity = 86;
- (*input) (36,15).intensity = 88;
- (*input) (37,15).intensity = 87;
- (*input) (38,15).intensity = 88;
- (*input) (39,15).intensity = 86;
- (*input) (40,15).intensity = 85;
- (*input) (41,15).intensity = 84;
- (*input) (42,15).intensity = 85;
- (*input) (43,15).intensity = 83;
- (*input) (44,15).intensity = 87;
- (*input) (45,15).intensity = 86;
- (*input) (46,15).intensity = 84;
- (*input) (47,15).intensity = 84;
- (*input) (48,15).intensity = 85;
- (*input) (49,15).intensity = 83;
- (*input) (50,15).intensity = 85;
- (*input) (51,15).intensity = 84;
- (*input) (52,15).intensity = 83;
- (*input) (53,15).intensity = 85;
- (*input) (54,15).intensity = 83;
- (*input) (55,15).intensity = 85;
- (*input) (56,15).intensity = 84;
- (*input) (57,15).intensity = 86;
- (*input) (58,15).intensity = 88;
- (*input) (59,15).intensity = 68;
- (*input) (60,15).intensity = 33;
- (*input) (61,15).intensity = 51;
- (*input) (62,15).intensity = 119;
- (*input) (63,15).intensity = 144;
- (*input) (0,16).intensity = 102;
- (*input) (1,16).intensity = 101;
- (*input) (2,16).intensity = 100;
- (*input) (3,16).intensity = 101;
- (*input) (4,16).intensity = 100;
- (*input) (5,16).intensity = 100;
- (*input) (6,16).intensity = 99;
- (*input) (7,16).intensity = 98;
- (*input) (8,16).intensity = 98;
- (*input) (9,16).intensity = 97;
- (*input) (10,16).intensity = 96;
- (*input) (11,16).intensity = 96;
- (*input) (12,16).intensity = 95;
- (*input) (13,16).intensity = 96;
- (*input) (14,16).intensity = 94;
- (*input) (15,16).intensity = 94;
- (*input) (16,16).intensity = 93;
- (*input) (17,16).intensity = 94;
- (*input) (18,16).intensity = 93;
- (*input) (19,16).intensity = 91;
- (*input) (20,16).intensity = 92;
- (*input) (21,16).intensity = 92;
- (*input) (22,16).intensity = 95;
- (*input) (23,16).intensity = 93;
- (*input) (24,16).intensity = 92;
- (*input) (25,16).intensity = 91;
- (*input) (26,16).intensity = 92;
- (*input) (27,16).intensity = 91;
- (*input) (28,16).intensity = 91;
- (*input) (29,16).intensity = 91;
- (*input) (30,16).intensity = 92;
- (*input) (31,16).intensity = 90;
- (*input) (32,16).intensity = 89;
- (*input) (33,16).intensity = 90;
- (*input) (34,16).intensity = 89;
- (*input) (35,16).intensity = 88;
- (*input) (36,16).intensity = 90;
- (*input) (37,16).intensity = 88;
- (*input) (38,16).intensity = 88;
- (*input) (39,16).intensity = 88;
- (*input) (40,16).intensity = 87;
- (*input) (41,16).intensity = 85;
- (*input) (42,16).intensity = 88;
- (*input) (43,16).intensity = 85;
- (*input) (44,16).intensity = 87;
- (*input) (45,16).intensity = 87;
- (*input) (46,16).intensity = 86;
- (*input) (47,16).intensity = 86;
- (*input) (48,16).intensity = 87;
- (*input) (49,16).intensity = 85;
- (*input) (50,16).intensity = 86;
- (*input) (51,16).intensity = 86;
- (*input) (52,16).intensity = 84;
- (*input) (53,16).intensity = 86;
- (*input) (54,16).intensity = 86;
- (*input) (55,16).intensity = 85;
- (*input) (56,16).intensity = 86;
- (*input) (57,16).intensity = 86;
- (*input) (58,16).intensity = 89;
- (*input) (59,16).intensity = 72;
- (*input) (60,16).intensity = 47;
- (*input) (61,16).intensity = 48;
- (*input) (62,16).intensity = 76;
- (*input) (63,16).intensity = 120;
- (*input) (0,17).intensity = 102;
- (*input) (1,17).intensity = 102;
- (*input) (2,17).intensity = 101;
- (*input) (3,17).intensity = 100;
- (*input) (4,17).intensity = 101;
- (*input) (5,17).intensity = 100;
- (*input) (6,17).intensity = 99;
- (*input) (7,17).intensity = 99;
- (*input) (8,17).intensity = 99;
- (*input) (9,17).intensity = 98;
- (*input) (10,17).intensity = 96;
- (*input) (11,17).intensity = 97;
- (*input) (12,17).intensity = 97;
- (*input) (13,17).intensity = 98;
- (*input) (14,17).intensity = 97;
- (*input) (15,17).intensity = 96;
- (*input) (16,17).intensity = 95;
- (*input) (17,17).intensity = 94;
- (*input) (18,17).intensity = 94;
- (*input) (19,17).intensity = 93;
- (*input) (20,17).intensity = 95;
- (*input) (21,17).intensity = 94;
- (*input) (22,17).intensity = 94;
- (*input) (23,17).intensity = 94;
- (*input) (24,17).intensity = 92;
- (*input) (25,17).intensity = 92;
- (*input) (26,17).intensity = 92;
- (*input) (27,17).intensity = 92;
- (*input) (28,17).intensity = 91;
- (*input) (29,17).intensity = 92;
- (*input) (30,17).intensity = 94;
- (*input) (31,17).intensity = 90;
- (*input) (32,17).intensity = 90;
- (*input) (33,17).intensity = 90;
- (*input) (34,17).intensity = 89;
- (*input) (35,17).intensity = 89;
- (*input) (36,17).intensity = 90;
- (*input) (37,17).intensity = 89;
- (*input) (38,17).intensity = 89;
- (*input) (39,17).intensity = 89;
- (*input) (40,17).intensity = 89;
- (*input) (41,17).intensity = 87;
- (*input) (42,17).intensity = 88;
- (*input) (43,17).intensity = 86;
- (*input) (44,17).intensity = 88;
- (*input) (45,17).intensity = 88;
- (*input) (46,17).intensity = 88;
- (*input) (47,17).intensity = 88;
- (*input) (48,17).intensity = 90;
- (*input) (49,17).intensity = 87;
- (*input) (50,17).intensity = 88;
- (*input) (51,17).intensity = 88;
- (*input) (52,17).intensity = 88;
- (*input) (53,17).intensity = 87;
- (*input) (54,17).intensity = 88;
- (*input) (55,17).intensity = 87;
- (*input) (56,17).intensity = 89;
- (*input) (57,17).intensity = 89;
- (*input) (58,17).intensity = 89;
- (*input) (59,17).intensity = 84;
- (*input) (60,17).intensity = 65;
- (*input) (61,17).intensity = 56;
- (*input) (62,17).intensity = 64;
- (*input) (63,17).intensity = 98;
- (*input) (0,18).intensity = 102;
- (*input) (1,18).intensity = 102;
- (*input) (2,18).intensity = 103;
- (*input) (3,18).intensity = 101;
- (*input) (4,18).intensity = 103;
- (*input) (5,18).intensity = 102;
- (*input) (6,18).intensity = 99;
- (*input) (7,18).intensity = 100;
- (*input) (8,18).intensity = 100;
- (*input) (9,18).intensity = 99;
- (*input) (10,18).intensity = 98;
- (*input) (11,18).intensity = 97;
- (*input) (12,18).intensity = 98;
- (*input) (13,18).intensity = 97;
- (*input) (14,18).intensity = 98;
- (*input) (15,18).intensity = 97;
- (*input) (16,18).intensity = 96;
- (*input) (17,18).intensity = 96;
- (*input) (18,18).intensity = 96;
- (*input) (19,18).intensity = 94;
- (*input) (20,18).intensity = 96;
- (*input) (21,18).intensity = 96;
- (*input) (22,18).intensity = 95;
- (*input) (23,18).intensity = 96;
- (*input) (24,18).intensity = 94;
- (*input) (25,18).intensity = 92;
- (*input) (26,18).intensity = 94;
- (*input) (27,18).intensity = 92;
- (*input) (28,18).intensity = 93;
- (*input) (29,18).intensity = 93;
- (*input) (30,18).intensity = 94;
- (*input) (31,18).intensity = 92;
- (*input) (32,18).intensity = 91;
- (*input) (33,18).intensity = 91;
- (*input) (34,18).intensity = 91;
- (*input) (35,18).intensity = 90;
- (*input) (36,18).intensity = 92;
- (*input) (37,18).intensity = 91;
- (*input) (38,18).intensity = 90;
- (*input) (39,18).intensity = 91;
- (*input) (40,18).intensity = 90;
- (*input) (41,18).intensity = 90;
- (*input) (42,18).intensity = 89;
- (*input) (43,18).intensity = 89;
- (*input) (44,18).intensity = 89;
- (*input) (45,18).intensity = 90;
- (*input) (46,18).intensity = 88;
- (*input) (47,18).intensity = 89;
- (*input) (48,18).intensity = 90;
- (*input) (49,18).intensity = 89;
- (*input) (50,18).intensity = 89;
- (*input) (51,18).intensity = 90;
- (*input) (52,18).intensity = 88;
- (*input) (53,18).intensity = 87;
- (*input) (54,18).intensity = 89;
- (*input) (55,18).intensity = 88;
- (*input) (56,18).intensity = 89;
- (*input) (57,18).intensity = 89;
- (*input) (58,18).intensity = 90;
- (*input) (59,18).intensity = 95;
- (*input) (60,18).intensity = 87;
- (*input) (61,18).intensity = 85;
- (*input) (62,18).intensity = 99;
- (*input) (63,18).intensity = 101;
- (*input) (0,19).intensity = 103;
- (*input) (1,19).intensity = 103;
- (*input) (2,19).intensity = 102;
- (*input) (3,19).intensity = 101;
- (*input) (4,19).intensity = 102;
- (*input) (5,19).intensity = 102;
- (*input) (6,19).intensity = 100;
- (*input) (7,19).intensity = 100;
- (*input) (8,19).intensity = 99;
- (*input) (9,19).intensity = 101;
- (*input) (10,19).intensity = 99;
- (*input) (11,19).intensity = 98;
- (*input) (12,19).intensity = 99;
- (*input) (13,19).intensity = 98;
- (*input) (14,19).intensity = 98;
- (*input) (15,19).intensity = 97;
- (*input) (16,19).intensity = 97;
- (*input) (17,19).intensity = 96;
- (*input) (18,19).intensity = 96;
- (*input) (19,19).intensity = 95;
- (*input) (20,19).intensity = 96;
- (*input) (21,19).intensity = 98;
- (*input) (22,19).intensity = 95;
- (*input) (23,19).intensity = 97;
- (*input) (24,19).intensity = 94;
- (*input) (25,19).intensity = 94;
- (*input) (26,19).intensity = 94;
- (*input) (27,19).intensity = 93;
- (*input) (28,19).intensity = 93;
- (*input) (29,19).intensity = 93;
- (*input) (30,19).intensity = 96;
- (*input) (31,19).intensity = 93;
- (*input) (32,19).intensity = 94;
- (*input) (33,19).intensity = 92;
- (*input) (34,19).intensity = 93;
- (*input) (35,19).intensity = 92;
- (*input) (36,19).intensity = 92;
- (*input) (37,19).intensity = 92;
- (*input) (38,19).intensity = 93;
- (*input) (39,19).intensity = 91;
- (*input) (40,19).intensity = 91;
- (*input) (41,19).intensity = 92;
- (*input) (42,19).intensity = 89;
- (*input) (43,19).intensity = 90;
- (*input) (44,19).intensity = 90;
- (*input) (45,19).intensity = 92;
- (*input) (46,19).intensity = 91;
- (*input) (47,19).intensity = 90;
- (*input) (48,19).intensity = 91;
- (*input) (49,19).intensity = 89;
- (*input) (50,19).intensity = 91;
- (*input) (51,19).intensity = 89;
- (*input) (52,19).intensity = 89;
- (*input) (53,19).intensity = 89;
- (*input) (54,19).intensity = 90;
- (*input) (55,19).intensity = 89;
- (*input) (56,19).intensity = 90;
- (*input) (57,19).intensity = 91;
- (*input) (58,19).intensity = 90;
- (*input) (59,19).intensity = 96;
- (*input) (60,19).intensity = 94;
- (*input) (61,19).intensity = 107;
- (*input) (62,19).intensity = 127;
- (*input) (63,19).intensity = 94;
- (*input) (0,20).intensity = 103;
- (*input) (1,20).intensity = 103;
- (*input) (2,20).intensity = 103;
- (*input) (3,20).intensity = 103;
- (*input) (4,20).intensity = 103;
- (*input) (5,20).intensity = 103;
- (*input) (6,20).intensity = 102;
- (*input) (7,20).intensity = 101;
- (*input) (8,20).intensity = 100;
- (*input) (9,20).intensity = 101;
- (*input) (10,20).intensity = 99;
- (*input) (11,20).intensity = 99;
- (*input) (12,20).intensity = 99;
- (*input) (13,20).intensity = 98;
- (*input) (14,20).intensity = 98;
- (*input) (15,20).intensity = 98;
- (*input) (16,20).intensity = 97;
- (*input) (17,20).intensity = 97;
- (*input) (18,20).intensity = 96;
- (*input) (19,20).intensity = 95;
- (*input) (20,20).intensity = 96;
- (*input) (21,20).intensity = 98;
- (*input) (22,20).intensity = 97;
- (*input) (23,20).intensity = 98;
- (*input) (24,20).intensity = 97;
- (*input) (25,20).intensity = 95;
- (*input) (26,20).intensity = 96;
- (*input) (27,20).intensity = 94;
- (*input) (28,20).intensity = 95;
- (*input) (29,20).intensity = 95;
- (*input) (30,20).intensity = 95;
- (*input) (31,20).intensity = 94;
- (*input) (32,20).intensity = 94;
- (*input) (33,20).intensity = 93;
- (*input) (34,20).intensity = 94;
- (*input) (35,20).intensity = 93;
- (*input) (36,20).intensity = 94;
- (*input) (37,20).intensity = 92;
- (*input) (38,20).intensity = 93;
- (*input) (39,20).intensity = 93;
- (*input) (40,20).intensity = 92;
- (*input) (41,20).intensity = 92;
- (*input) (42,20).intensity = 91;
- (*input) (43,20).intensity = 92;
- (*input) (44,20).intensity = 92;
- (*input) (45,20).intensity = 91;
- (*input) (46,20).intensity = 92;
- (*input) (47,20).intensity = 91;
- (*input) (48,20).intensity = 90;
- (*input) (49,20).intensity = 90;
- (*input) (50,20).intensity = 91;
- (*input) (51,20).intensity = 90;
- (*input) (52,20).intensity = 88;
- (*input) (53,20).intensity = 89;
- (*input) (54,20).intensity = 90;
- (*input) (55,20).intensity = 90;
- (*input) (56,20).intensity = 89;
- (*input) (57,20).intensity = 90;
- (*input) (58,20).intensity = 90;
- (*input) (59,20).intensity = 93;
- (*input) (60,20).intensity = 101;
- (*input) (61,20).intensity = 115;
- (*input) (62,20).intensity = 121;
- (*input) (63,20).intensity = 81;
- (*input) (0,21).intensity = 104;
- (*input) (1,21).intensity = 105;
- (*input) (2,21).intensity = 104;
- (*input) (3,21).intensity = 104;
- (*input) (4,21).intensity = 103;
- (*input) (5,21).intensity = 102;
- (*input) (6,21).intensity = 103;
- (*input) (7,21).intensity = 102;
- (*input) (8,21).intensity = 101;
- (*input) (9,21).intensity = 101;
- (*input) (10,21).intensity = 100;
- (*input) (11,21).intensity = 100;
- (*input) (12,21).intensity = 99;
- (*input) (13,21).intensity = 99;
- (*input) (14,21).intensity = 98;
- (*input) (15,21).intensity = 98;
- (*input) (16,21).intensity = 98;
- (*input) (17,21).intensity = 97;
- (*input) (18,21).intensity = 97;
- (*input) (19,21).intensity = 96;
- (*input) (20,21).intensity = 95;
- (*input) (21,21).intensity = 97;
- (*input) (22,21).intensity = 97;
- (*input) (23,21).intensity = 97;
- (*input) (24,21).intensity = 96;
- (*input) (25,21).intensity = 96;
- (*input) (26,21).intensity = 96;
- (*input) (27,21).intensity = 96;
- (*input) (28,21).intensity = 96;
- (*input) (29,21).intensity = 96;
- (*input) (30,21).intensity = 95;
- (*input) (31,21).intensity = 95;
- (*input) (32,21).intensity = 96;
- (*input) (33,21).intensity = 94;
- (*input) (34,21).intensity = 94;
- (*input) (35,21).intensity = 93;
- (*input) (36,21).intensity = 94;
- (*input) (37,21).intensity = 92;
- (*input) (38,21).intensity = 94;
- (*input) (39,21).intensity = 94;
- (*input) (40,21).intensity = 93;
- (*input) (41,21).intensity = 92;
- (*input) (42,21).intensity = 93;
- (*input) (43,21).intensity = 93;
- (*input) (44,21).intensity = 92;
- (*input) (45,21).intensity = 92;
- (*input) (46,21).intensity = 93;
- (*input) (47,21).intensity = 91;
- (*input) (48,21).intensity = 90;
- (*input) (49,21).intensity = 91;
- (*input) (50,21).intensity = 91;
- (*input) (51,21).intensity = 89;
- (*input) (52,21).intensity = 90;
- (*input) (53,21).intensity = 92;
- (*input) (54,21).intensity = 90;
- (*input) (55,21).intensity = 91;
- (*input) (56,21).intensity = 89;
- (*input) (57,21).intensity = 91;
- (*input) (58,21).intensity = 91;
- (*input) (59,21).intensity = 92;
- (*input) (60,21).intensity = 100;
- (*input) (61,21).intensity = 110;
- (*input) (62,21).intensity = 99;
- (*input) (63,21).intensity = 82;
- (*input) (0,22).intensity = 105;
- (*input) (1,22).intensity = 104;
- (*input) (2,22).intensity = 105;
- (*input) (3,22).intensity = 105;
- (*input) (4,22).intensity = 103;
- (*input) (5,22).intensity = 104;
- (*input) (6,22).intensity = 103;
- (*input) (7,22).intensity = 102;
- (*input) (8,22).intensity = 102;
- (*input) (9,22).intensity = 101;
- (*input) (10,22).intensity = 102;
- (*input) (11,22).intensity = 100;
- (*input) (12,22).intensity = 99;
- (*input) (13,22).intensity = 99;
- (*input) (14,22).intensity = 99;
- (*input) (15,22).intensity = 99;
- (*input) (16,22).intensity = 98;
- (*input) (17,22).intensity = 98;
- (*input) (18,22).intensity = 98;
- (*input) (19,22).intensity = 98;
- (*input) (20,22).intensity = 96;
- (*input) (21,22).intensity = 96;
- (*input) (22,22).intensity = 98;
- (*input) (23,22).intensity = 98;
- (*input) (24,22).intensity = 97;
- (*input) (25,22).intensity = 97;
- (*input) (26,22).intensity = 96;
- (*input) (27,22).intensity = 97;
- (*input) (28,22).intensity = 97;
- (*input) (29,22).intensity = 97;
- (*input) (30,22).intensity = 97;
- (*input) (31,22).intensity = 96;
- (*input) (32,22).intensity = 95;
- (*input) (33,22).intensity = 94;
- (*input) (34,22).intensity = 94;
- (*input) (35,22).intensity = 95;
- (*input) (36,22).intensity = 96;
- (*input) (37,22).intensity = 93;
- (*input) (38,22).intensity = 94;
- (*input) (39,22).intensity = 94;
- (*input) (40,22).intensity = 94;
- (*input) (41,22).intensity = 92;
- (*input) (42,22).intensity = 93;
- (*input) (43,22).intensity = 93;
- (*input) (44,22).intensity = 93;
- (*input) (45,22).intensity = 91;
- (*input) (46,22).intensity = 93;
- (*input) (47,22).intensity = 90;
- (*input) (48,22).intensity = 90;
- (*input) (49,22).intensity = 92;
- (*input) (50,22).intensity = 92;
- (*input) (51,22).intensity = 91;
- (*input) (52,22).intensity = 92;
- (*input) (53,22).intensity = 90;
- (*input) (54,22).intensity = 90;
- (*input) (55,22).intensity = 92;
- (*input) (56,22).intensity = 90;
- (*input) (57,22).intensity = 90;
- (*input) (58,22).intensity = 92;
- (*input) (59,22).intensity = 90;
- (*input) (60,22).intensity = 94;
- (*input) (61,22).intensity = 89;
- (*input) (62,22).intensity = 87;
- (*input) (63,22).intensity = 89;
- (*input) (0,23).intensity = 106;
- (*input) (1,23).intensity = 105;
- (*input) (2,23).intensity = 107;
- (*input) (3,23).intensity = 105;
- (*input) (4,23).intensity = 104;
- (*input) (5,23).intensity = 102;
- (*input) (6,23).intensity = 103;
- (*input) (7,23).intensity = 104;
- (*input) (8,23).intensity = 103;
- (*input) (9,23).intensity = 102;
- (*input) (10,23).intensity = 102;
- (*input) (11,23).intensity = 100;
- (*input) (12,23).intensity = 100;
- (*input) (13,23).intensity = 101;
- (*input) (14,23).intensity = 99;
- (*input) (15,23).intensity = 100;
- (*input) (16,23).intensity = 97;
- (*input) (17,23).intensity = 98;
- (*input) (18,23).intensity = 98;
- (*input) (19,23).intensity = 99;
- (*input) (20,23).intensity = 97;
- (*input) (21,23).intensity = 97;
- (*input) (22,23).intensity = 98;
- (*input) (23,23).intensity = 99;
- (*input) (24,23).intensity = 97;
- (*input) (25,23).intensity = 97;
- (*input) (26,23).intensity = 97;
- (*input) (27,23).intensity = 97;
- (*input) (28,23).intensity = 98;
- (*input) (29,23).intensity = 97;
- (*input) (30,23).intensity = 97;
- (*input) (31,23).intensity = 96;
- (*input) (32,23).intensity = 96;
- (*input) (33,23).intensity = 96;
- (*input) (34,23).intensity = 95;
- (*input) (35,23).intensity = 95;
- (*input) (36,23).intensity = 96;
- (*input) (37,23).intensity = 94;
- (*input) (38,23).intensity = 94;
- (*input) (39,23).intensity = 94;
- (*input) (40,23).intensity = 94;
- (*input) (41,23).intensity = 94;
- (*input) (42,23).intensity = 93;
- (*input) (43,23).intensity = 94;
- (*input) (44,23).intensity = 94;
- (*input) (45,23).intensity = 92;
- (*input) (46,23).intensity = 93;
- (*input) (47,23).intensity = 92;
- (*input) (48,23).intensity = 92;
- (*input) (49,23).intensity = 94;
- (*input) (50,23).intensity = 92;
- (*input) (51,23).intensity = 93;
- (*input) (52,23).intensity = 94;
- (*input) (53,23).intensity = 92;
- (*input) (54,23).intensity = 91;
- (*input) (55,23).intensity = 93;
- (*input) (56,23).intensity = 91;
- (*input) (57,23).intensity = 90;
- (*input) (58,23).intensity = 93;
- (*input) (59,23).intensity = 92;
- (*input) (60,23).intensity = 93;
- (*input) (61,23).intensity = 87;
- (*input) (62,23).intensity = 91;
- (*input) (63,23).intensity = 93;
- (*input) (0,24).intensity = 107;
- (*input) (1,24).intensity = 105;
- (*input) (2,24).intensity = 106;
- (*input) (3,24).intensity = 105;
- (*input) (4,24).intensity = 104;
- (*input) (5,24).intensity = 105;
- (*input) (6,24).intensity = 103;
- (*input) (7,24).intensity = 102;
- (*input) (8,24).intensity = 103;
- (*input) (9,24).intensity = 102;
- (*input) (10,24).intensity = 103;
- (*input) (11,24).intensity = 100;
- (*input) (12,24).intensity = 101;
- (*input) (13,24).intensity = 100;
- (*input) (14,24).intensity = 100;
- (*input) (15,24).intensity = 100;
- (*input) (16,24).intensity = 99;
- (*input) (17,24).intensity = 98;
- (*input) (18,24).intensity = 99;
- (*input) (19,24).intensity = 100;
- (*input) (20,24).intensity = 99;
- (*input) (21,24).intensity = 98;
- (*input) (22,24).intensity = 99;
- (*input) (23,24).intensity = 99;
- (*input) (24,24).intensity = 98;
- (*input) (25,24).intensity = 98;
- (*input) (26,24).intensity = 98;
- (*input) (27,24).intensity = 97;
- (*input) (28,24).intensity = 98;
- (*input) (29,24).intensity = 97;
- (*input) (30,24).intensity = 99;
- (*input) (31,24).intensity = 97;
- (*input) (32,24).intensity = 96;
- (*input) (33,24).intensity = 96;
- (*input) (34,24).intensity = 96;
- (*input) (35,24).intensity = 95;
- (*input) (36,24).intensity = 96;
- (*input) (37,24).intensity = 96;
- (*input) (38,24).intensity = 97;
- (*input) (39,24).intensity = 96;
- (*input) (40,24).intensity = 94;
- (*input) (41,24).intensity = 96;
- (*input) (42,24).intensity = 94;
- (*input) (43,24).intensity = 94;
- (*input) (44,24).intensity = 96;
- (*input) (45,24).intensity = 93;
- (*input) (46,24).intensity = 94;
- (*input) (47,24).intensity = 91;
- (*input) (48,24).intensity = 93;
- (*input) (49,24).intensity = 95;
- (*input) (50,24).intensity = 94;
- (*input) (51,24).intensity = 93;
- (*input) (52,24).intensity = 95;
- (*input) (53,24).intensity = 91;
- (*input) (54,24).intensity = 92;
- (*input) (55,24).intensity = 93;
- (*input) (56,24).intensity = 93;
- (*input) (57,24).intensity = 91;
- (*input) (58,24).intensity = 92;
- (*input) (59,24).intensity = 91;
- (*input) (60,24).intensity = 92;
- (*input) (61,24).intensity = 90;
- (*input) (62,24).intensity = 92;
- (*input) (63,24).intensity = 93;
- (*input) (0,25).intensity = 108;
- (*input) (1,25).intensity = 106;
- (*input) (2,25).intensity = 105;
- (*input) (3,25).intensity = 106;
- (*input) (4,25).intensity = 105;
- (*input) (5,25).intensity = 103;
- (*input) (6,25).intensity = 104;
- (*input) (7,25).intensity = 102;
- (*input) (8,25).intensity = 104;
- (*input) (9,25).intensity = 103;
- (*input) (10,25).intensity = 103;
- (*input) (11,25).intensity = 100;
- (*input) (12,25).intensity = 101;
- (*input) (13,25).intensity = 101;
- (*input) (14,25).intensity = 101;
- (*input) (15,25).intensity = 101;
- (*input) (16,25).intensity = 99;
- (*input) (17,25).intensity = 99;
- (*input) (18,25).intensity = 100;
- (*input) (19,25).intensity = 99;
- (*input) (20,25).intensity = 100;
- (*input) (21,25).intensity = 99;
- (*input) (22,25).intensity = 99;
- (*input) (23,25).intensity = 100;
- (*input) (24,25).intensity = 99;
- (*input) (25,25).intensity = 99;
- (*input) (26,25).intensity = 99;
- (*input) (27,25).intensity = 98;
- (*input) (28,25).intensity = 98;
- (*input) (29,25).intensity = 98;
- (*input) (30,25).intensity = 98;
- (*input) (31,25).intensity = 96;
- (*input) (32,25).intensity = 98;
- (*input) (33,25).intensity = 96;
- (*input) (34,25).intensity = 96;
- (*input) (35,25).intensity = 96;
- (*input) (36,25).intensity = 96;
- (*input) (37,25).intensity = 96;
- (*input) (38,25).intensity = 96;
- (*input) (39,25).intensity = 96;
- (*input) (40,25).intensity = 96;
- (*input) (41,25).intensity = 96;
- (*input) (42,25).intensity = 96;
- (*input) (43,25).intensity = 96;
- (*input) (44,25).intensity = 96;
- (*input) (45,25).intensity = 94;
- (*input) (46,25).intensity = 96;
- (*input) (47,25).intensity = 94;
- (*input) (48,25).intensity = 94;
- (*input) (49,25).intensity = 94;
- (*input) (50,25).intensity = 93;
- (*input) (51,25).intensity = 96;
- (*input) (52,25).intensity = 95;
- (*input) (53,25).intensity = 92;
- (*input) (54,25).intensity = 93;
- (*input) (55,25).intensity = 93;
- (*input) (56,25).intensity = 94;
- (*input) (57,25).intensity = 91;
- (*input) (58,25).intensity = 92;
- (*input) (59,25).intensity = 93;
- (*input) (60,25).intensity = 92;
- (*input) (61,25).intensity = 92;
- (*input) (62,25).intensity = 91;
- (*input) (63,25).intensity = 94;
- (*input) (0,26).intensity = 110;
- (*input) (1,26).intensity = 106;
- (*input) (2,26).intensity = 105;
- (*input) (3,26).intensity = 106;
- (*input) (4,26).intensity = 105;
- (*input) (5,26).intensity = 105;
- (*input) (6,26).intensity = 105;
- (*input) (7,26).intensity = 101;
- (*input) (8,26).intensity = 104;
- (*input) (9,26).intensity = 103;
- (*input) (10,26).intensity = 102;
- (*input) (11,26).intensity = 99;
- (*input) (12,26).intensity = 101;
- (*input) (13,26).intensity = 100;
- (*input) (14,26).intensity = 101;
- (*input) (15,26).intensity = 101;
- (*input) (16,26).intensity = 101;
- (*input) (17,26).intensity = 101;
- (*input) (18,26).intensity = 100;
- (*input) (19,26).intensity = 99;
- (*input) (20,26).intensity = 101;
- (*input) (21,26).intensity = 98;
- (*input) (22,26).intensity = 99;
- (*input) (23,26).intensity = 101;
- (*input) (24,26).intensity = 99;
- (*input) (25,26).intensity = 100;
- (*input) (26,26).intensity = 99;
- (*input) (27,26).intensity = 98;
- (*input) (28,26).intensity = 98;
- (*input) (29,26).intensity = 99;
- (*input) (30,26).intensity = 99;
- (*input) (31,26).intensity = 98;
- (*input) (32,26).intensity = 98;
- (*input) (33,26).intensity = 97;
- (*input) (34,26).intensity = 96;
- (*input) (35,26).intensity = 97;
- (*input) (36,26).intensity = 97;
- (*input) (37,26).intensity = 97;
- (*input) (38,26).intensity = 97;
- (*input) (39,26).intensity = 98;
- (*input) (40,26).intensity = 96;
- (*input) (41,26).intensity = 97;
- (*input) (42,26).intensity = 96;
- (*input) (43,26).intensity = 96;
- (*input) (44,26).intensity = 96;
- (*input) (45,26).intensity = 93;
- (*input) (46,26).intensity = 96;
- (*input) (47,26).intensity = 94;
- (*input) (48,26).intensity = 96;
- (*input) (49,26).intensity = 94;
- (*input) (50,26).intensity = 94;
- (*input) (51,26).intensity = 94;
- (*input) (52,26).intensity = 96;
- (*input) (53,26).intensity = 93;
- (*input) (54,26).intensity = 94;
- (*input) (55,26).intensity = 93;
- (*input) (56,26).intensity = 96;
- (*input) (57,26).intensity = 93;
- (*input) (58,26).intensity = 91;
- (*input) (59,26).intensity = 93;
- (*input) (60,26).intensity = 93;
- (*input) (61,26).intensity = 92;
- (*input) (62,26).intensity = 92;
- (*input) (63,26).intensity = 93;
- (*input) (0,27).intensity = 110;
- (*input) (1,27).intensity = 107;
- (*input) (2,27).intensity = 106;
- (*input) (3,27).intensity = 106;
- (*input) (4,27).intensity = 106;
- (*input) (5,27).intensity = 105;
- (*input) (6,27).intensity = 106;
- (*input) (7,27).intensity = 103;
- (*input) (8,27).intensity = 105;
- (*input) (9,27).intensity = 103;
- (*input) (10,27).intensity = 103;
- (*input) (11,27).intensity = 102;
- (*input) (12,27).intensity = 102;
- (*input) (13,27).intensity = 101;
- (*input) (14,27).intensity = 101;
- (*input) (15,27).intensity = 102;
- (*input) (16,27).intensity = 100;
- (*input) (17,27).intensity = 101;
- (*input) (18,27).intensity = 100;
- (*input) (19,27).intensity = 100;
- (*input) (20,27).intensity = 101;
- (*input) (21,27).intensity = 99;
- (*input) (22,27).intensity = 100;
- (*input) (23,27).intensity = 100;
- (*input) (24,27).intensity = 99;
- (*input) (25,27).intensity = 100;
- (*input) (26,27).intensity = 100;
- (*input) (27,27).intensity = 99;
- (*input) (28,27).intensity = 98;
- (*input) (29,27).intensity = 100;
- (*input) (30,27).intensity = 99;
- (*input) (31,27).intensity = 98;
- (*input) (32,27).intensity = 98;
- (*input) (33,27).intensity = 97;
- (*input) (34,27).intensity = 97;
- (*input) (35,27).intensity = 98;
- (*input) (36,27).intensity = 96;
- (*input) (37,27).intensity = 98;
- (*input) (38,27).intensity = 97;
- (*input) (39,27).intensity = 97;
- (*input) (40,27).intensity = 97;
- (*input) (41,27).intensity = 98;
- (*input) (42,27).intensity = 97;
- (*input) (43,27).intensity = 96;
- (*input) (44,27).intensity = 96;
- (*input) (45,27).intensity = 95;
- (*input) (46,27).intensity = 97;
- (*input) (47,27).intensity = 96;
- (*input) (48,27).intensity = 96;
- (*input) (49,27).intensity = 93;
- (*input) (50,27).intensity = 94;
- (*input) (51,27).intensity = 94;
- (*input) (52,27).intensity = 96;
- (*input) (53,27).intensity = 94;
- (*input) (54,27).intensity = 95;
- (*input) (55,27).intensity = 94;
- (*input) (56,27).intensity = 96;
- (*input) (57,27).intensity = 94;
- (*input) (58,27).intensity = 93;
- (*input) (59,27).intensity = 94;
- (*input) (60,27).intensity = 93;
- (*input) (61,27).intensity = 94;
- (*input) (62,27).intensity = 92;
- (*input) (63,27).intensity = 92;
- (*input) (0,28).intensity = 111;
- (*input) (1,28).intensity = 108;
- (*input) (2,28).intensity = 109;
- (*input) (3,28).intensity = 107;
- (*input) (4,28).intensity = 106;
- (*input) (5,28).intensity = 106;
- (*input) (6,28).intensity = 106;
- (*input) (7,28).intensity = 105;
- (*input) (8,28).intensity = 104;
- (*input) (9,28).intensity = 103;
- (*input) (10,28).intensity = 104;
- (*input) (11,28).intensity = 101;
- (*input) (12,28).intensity = 103;
- (*input) (13,28).intensity = 102;
- (*input) (14,28).intensity = 100;
- (*input) (15,28).intensity = 102;
- (*input) (16,28).intensity = 100;
- (*input) (17,28).intensity = 101;
- (*input) (18,28).intensity = 99;
- (*input) (19,28).intensity = 101;
- (*input) (20,28).intensity = 101;
- (*input) (21,28).intensity = 99;
- (*input) (22,28).intensity = 101;
- (*input) (23,28).intensity = 100;
- (*input) (24,28).intensity = 99;
- (*input) (25,28).intensity = 101;
- (*input) (26,28).intensity = 100;
- (*input) (27,28).intensity = 99;
- (*input) (28,28).intensity = 99;
- (*input) (29,28).intensity = 99;
- (*input) (30,28).intensity = 99;
- (*input) (31,28).intensity = 99;
- (*input) (32,28).intensity = 99;
- (*input) (33,28).intensity = 98;
- (*input) (34,28).intensity = 99;
- (*input) (35,28).intensity = 98;
- (*input) (36,28).intensity = 97;
- (*input) (37,28).intensity = 98;
- (*input) (38,28).intensity = 97;
- (*input) (39,28).intensity = 98;
- (*input) (40,28).intensity = 98;
- (*input) (41,28).intensity = 98;
- (*input) (42,28).intensity = 98;
- (*input) (43,28).intensity = 96;
- (*input) (44,28).intensity = 97;
- (*input) (45,28).intensity = 96;
- (*input) (46,28).intensity = 97;
- (*input) (47,28).intensity = 96;
- (*input) (48,28).intensity = 96;
- (*input) (49,28).intensity = 95;
- (*input) (50,28).intensity = 95;
- (*input) (51,28).intensity = 95;
- (*input) (52,28).intensity = 95;
- (*input) (53,28).intensity = 94;
- (*input) (54,28).intensity = 95;
- (*input) (55,28).intensity = 94;
- (*input) (56,28).intensity = 96;
- (*input) (57,28).intensity = 96;
- (*input) (58,28).intensity = 93;
- (*input) (59,28).intensity = 93;
- (*input) (60,28).intensity = 94;
- (*input) (61,28).intensity = 93;
- (*input) (62,28).intensity = 94;
- (*input) (63,28).intensity = 92;
- (*input) (0,29).intensity = 114;
- (*input) (1,29).intensity = 110;
- (*input) (2,29).intensity = 110;
- (*input) (3,29).intensity = 108;
- (*input) (4,29).intensity = 108;
- (*input) (5,29).intensity = 107;
- (*input) (6,29).intensity = 106;
- (*input) (7,29).intensity = 106;
- (*input) (8,29).intensity = 104;
- (*input) (9,29).intensity = 105;
- (*input) (10,29).intensity = 106;
- (*input) (11,29).intensity = 103;
- (*input) (12,29).intensity = 103;
- (*input) (13,29).intensity = 102;
- (*input) (14,29).intensity = 101;
- (*input) (15,29).intensity = 103;
- (*input) (16,29).intensity = 101;
- (*input) (17,29).intensity = 101;
- (*input) (18,29).intensity = 101;
- (*input) (19,29).intensity = 102;
- (*input) (20,29).intensity = 101;
- (*input) (21,29).intensity = 99;
- (*input) (22,29).intensity = 101;
- (*input) (23,29).intensity = 99;
- (*input) (24,29).intensity = 101;
- (*input) (25,29).intensity = 100;
- (*input) (26,29).intensity = 101;
- (*input) (27,29).intensity = 101;
- (*input) (28,29).intensity = 101;
- (*input) (29,29).intensity = 100;
- (*input) (30,29).intensity = 101;
- (*input) (31,29).intensity = 99;
- (*input) (32,29).intensity = 99;
- (*input) (33,29).intensity = 99;
- (*input) (34,29).intensity = 100;
- (*input) (35,29).intensity = 98;
- (*input) (36,29).intensity = 99;
- (*input) (37,29).intensity = 99;
- (*input) (38,29).intensity = 97;
- (*input) (39,29).intensity = 98;
- (*input) (40,29).intensity = 99;
- (*input) (41,29).intensity = 99;
- (*input) (42,29).intensity = 98;
- (*input) (43,29).intensity = 97;
- (*input) (44,29).intensity = 97;
- (*input) (45,29).intensity = 98;
- (*input) (46,29).intensity = 98;
- (*input) (47,29).intensity = 98;
- (*input) (48,29).intensity = 97;
- (*input) (49,29).intensity = 96;
- (*input) (50,29).intensity = 97;
- (*input) (51,29).intensity = 97;
- (*input) (52,29).intensity = 96;
- (*input) (53,29).intensity = 96;
- (*input) (54,29).intensity = 96;
- (*input) (55,29).intensity = 94;
- (*input) (56,29).intensity = 96;
- (*input) (57,29).intensity = 96;
- (*input) (58,29).intensity = 95;
- (*input) (59,29).intensity = 94;
- (*input) (60,29).intensity = 94;
- (*input) (61,29).intensity = 95;
- (*input) (62,29).intensity = 94;
- (*input) (63,29).intensity = 92;
- (*input) (0,30).intensity = 115;
- (*input) (1,30).intensity = 112;
- (*input) (2,30).intensity = 111;
- (*input) (3,30).intensity = 109;
- (*input) (4,30).intensity = 109;
- (*input) (5,30).intensity = 107;
- (*input) (6,30).intensity = 106;
- (*input) (7,30).intensity = 106;
- (*input) (8,30).intensity = 105;
- (*input) (9,30).intensity = 106;
- (*input) (10,30).intensity = 106;
- (*input) (11,30).intensity = 103;
- (*input) (12,30).intensity = 103;
- (*input) (13,30).intensity = 104;
- (*input) (14,30).intensity = 103;
- (*input) (15,30).intensity = 103;
- (*input) (16,30).intensity = 101;
- (*input) (17,30).intensity = 102;
- (*input) (18,30).intensity = 101;
- (*input) (19,30).intensity = 102;
- (*input) (20,30).intensity = 101;
- (*input) (21,30).intensity = 100;
- (*input) (22,30).intensity = 102;
- (*input) (23,30).intensity = 99;
- (*input) (24,30).intensity = 101;
- (*input) (25,30).intensity = 101;
- (*input) (26,30).intensity = 101;
- (*input) (27,30).intensity = 101;
- (*input) (28,30).intensity = 100;
- (*input) (29,30).intensity = 99;
- (*input) (30,30).intensity = 101;
- (*input) (31,30).intensity = 101;
- (*input) (32,30).intensity = 100;
- (*input) (33,30).intensity = 99;
- (*input) (34,30).intensity = 101;
- (*input) (35,30).intensity = 99;
- (*input) (36,30).intensity = 99;
- (*input) (37,30).intensity = 99;
- (*input) (38,30).intensity = 97;
- (*input) (39,30).intensity = 98;
- (*input) (40,30).intensity = 97;
- (*input) (41,30).intensity = 98;
- (*input) (42,30).intensity = 99;
- (*input) (43,30).intensity = 98;
- (*input) (44,30).intensity = 98;
- (*input) (45,30).intensity = 98;
- (*input) (46,30).intensity = 98;
- (*input) (47,30).intensity = 98;
- (*input) (48,30).intensity = 98;
- (*input) (49,30).intensity = 97;
- (*input) (50,30).intensity = 97;
- (*input) (51,30).intensity = 96;
- (*input) (52,30).intensity = 95;
- (*input) (53,30).intensity = 96;
- (*input) (54,30).intensity = 96;
- (*input) (55,30).intensity = 95;
- (*input) (56,30).intensity = 96;
- (*input) (57,30).intensity = 97;
- (*input) (58,30).intensity = 95;
- (*input) (59,30).intensity = 94;
- (*input) (60,30).intensity = 96;
- (*input) (61,30).intensity = 94;
- (*input) (62,30).intensity = 94;
- (*input) (63,30).intensity = 94;
- (*input) (0,31).intensity = 115;
- (*input) (1,31).intensity = 114;
- (*input) (2,31).intensity = 111;
- (*input) (3,31).intensity = 111;
- (*input) (4,31).intensity = 111;
- (*input) (5,31).intensity = 109;
- (*input) (6,31).intensity = 107;
- (*input) (7,31).intensity = 106;
- (*input) (8,31).intensity = 106;
- (*input) (9,31).intensity = 105;
- (*input) (10,31).intensity = 106;
- (*input) (11,31).intensity = 105;
- (*input) (12,31).intensity = 103;
- (*input) (13,31).intensity = 104;
- (*input) (14,31).intensity = 102;
- (*input) (15,31).intensity = 103;
- (*input) (16,31).intensity = 101;
- (*input) (17,31).intensity = 101;
- (*input) (18,31).intensity = 103;
- (*input) (19,31).intensity = 102;
- (*input) (20,31).intensity = 101;
- (*input) (21,31).intensity = 100;
- (*input) (22,31).intensity = 102;
- (*input) (23,31).intensity = 100;
- (*input) (24,31).intensity = 101;
- (*input) (25,31).intensity = 100;
- (*input) (26,31).intensity = 101;
- (*input) (27,31).intensity = 100;
- (*input) (28,31).intensity = 101;
- (*input) (29,31).intensity = 100;
- (*input) (30,31).intensity = 101;
- (*input) (31,31).intensity = 100;
- (*input) (32,31).intensity = 100;
- (*input) (33,31).intensity = 99;
- (*input) (34,31).intensity = 101;
- (*input) (35,31).intensity = 98;
- (*input) (36,31).intensity = 100;
- (*input) (37,31).intensity = 100;
- (*input) (38,31).intensity = 98;
- (*input) (39,31).intensity = 99;
- (*input) (40,31).intensity = 99;
- (*input) (41,31).intensity = 99;
- (*input) (42,31).intensity = 99;
- (*input) (43,31).intensity = 98;
- (*input) (44,31).intensity = 98;
- (*input) (45,31).intensity = 98;
- (*input) (46,31).intensity = 99;
- (*input) (47,31).intensity = 99;
- (*input) (48,31).intensity = 98;
- (*input) (49,31).intensity = 98;
- (*input) (50,31).intensity = 98;
- (*input) (51,31).intensity = 97;
- (*input) (52,31).intensity = 97;
- (*input) (53,31).intensity = 96;
- (*input) (54,31).intensity = 96;
- (*input) (55,31).intensity = 94;
- (*input) (56,31).intensity = 95;
- (*input) (57,31).intensity = 97;
- (*input) (58,31).intensity = 96;
- (*input) (59,31).intensity = 94;
- (*input) (60,31).intensity = 96;
- (*input) (61,31).intensity = 96;
- (*input) (62,31).intensity = 94;
- (*input) (63,31).intensity = 94;
- (*input) (0,32).intensity = 115;
- (*input) (1,32).intensity = 114;
- (*input) (2,32).intensity = 111;
- (*input) (3,32).intensity = 111;
- (*input) (4,32).intensity = 111;
- (*input) (5,32).intensity = 110;
- (*input) (6,32).intensity = 108;
- (*input) (7,32).intensity = 107;
- (*input) (8,32).intensity = 107;
- (*input) (9,32).intensity = 105;
- (*input) (10,32).intensity = 106;
- (*input) (11,32).intensity = 105;
- (*input) (12,32).intensity = 103;
- (*input) (13,32).intensity = 105;
- (*input) (14,32).intensity = 104;
- (*input) (15,32).intensity = 103;
- (*input) (16,32).intensity = 102;
- (*input) (17,32).intensity = 102;
- (*input) (18,32).intensity = 102;
- (*input) (19,32).intensity = 102;
- (*input) (20,32).intensity = 102;
- (*input) (21,32).intensity = 101;
- (*input) (22,32).intensity = 102;
- (*input) (23,32).intensity = 102;
- (*input) (24,32).intensity = 101;
- (*input) (25,32).intensity = 99;
- (*input) (26,32).intensity = 101;
- (*input) (27,32).intensity = 101;
- (*input) (28,32).intensity = 101;
- (*input) (29,32).intensity = 100;
- (*input) (30,32).intensity = 101;
- (*input) (31,32).intensity = 101;
- (*input) (32,32).intensity = 101;
- (*input) (33,32).intensity = 100;
- (*input) (34,32).intensity = 101;
- (*input) (35,32).intensity = 99;
- (*input) (36,32).intensity = 99;
- (*input) (37,32).intensity = 99;
- (*input) (38,32).intensity = 99;
- (*input) (39,32).intensity = 100;
- (*input) (40,32).intensity = 98;
- (*input) (41,32).intensity = 99;
- (*input) (42,32).intensity = 99;
- (*input) (43,32).intensity = 97;
- (*input) (44,32).intensity = 98;
- (*input) (45,32).intensity = 98;
- (*input) (46,32).intensity = 98;
- (*input) (47,32).intensity = 98;
- (*input) (48,32).intensity = 98;
- (*input) (49,32).intensity = 98;
- (*input) (50,32).intensity = 98;
- (*input) (51,32).intensity = 96;
- (*input) (52,32).intensity = 97;
- (*input) (53,32).intensity = 96;
- (*input) (54,32).intensity = 96;
- (*input) (55,32).intensity = 97;
- (*input) (56,32).intensity = 96;
- (*input) (57,32).intensity = 96;
- (*input) (58,32).intensity = 96;
- (*input) (59,32).intensity = 94;
- (*input) (60,32).intensity = 96;
- (*input) (61,32).intensity = 94;
- (*input) (62,32).intensity = 94;
- (*input) (63,32).intensity = 95;
- (*input) (0,33).intensity = 115;
- (*input) (1,33).intensity = 115;
- (*input) (2,33).intensity = 112;
- (*input) (3,33).intensity = 111;
- (*input) (4,33).intensity = 112;
- (*input) (5,33).intensity = 110;
- (*input) (6,33).intensity = 110;
- (*input) (7,33).intensity = 108;
- (*input) (8,33).intensity = 109;
- (*input) (9,33).intensity = 105;
- (*input) (10,33).intensity = 108;
- (*input) (11,33).intensity = 105;
- (*input) (12,33).intensity = 105;
- (*input) (13,33).intensity = 104;
- (*input) (14,33).intensity = 103;
- (*input) (15,33).intensity = 103;
- (*input) (16,33).intensity = 103;
- (*input) (17,33).intensity = 102;
- (*input) (18,33).intensity = 102;
- (*input) (19,33).intensity = 102;
- (*input) (20,33).intensity = 102;
- (*input) (21,33).intensity = 102;
- (*input) (22,33).intensity = 101;
- (*input) (23,33).intensity = 103;
- (*input) (24,33).intensity = 101;
- (*input) (25,33).intensity = 101;
- (*input) (26,33).intensity = 102;
- (*input) (27,33).intensity = 101;
- (*input) (28,33).intensity = 101;
- (*input) (29,33).intensity = 100;
- (*input) (30,33).intensity = 101;
- (*input) (31,33).intensity = 100;
- (*input) (32,33).intensity = 100;
- (*input) (33,33).intensity = 100;
- (*input) (34,33).intensity = 100;
- (*input) (35,33).intensity = 98;
- (*input) (36,33).intensity = 100;
- (*input) (37,33).intensity = 99;
- (*input) (38,33).intensity = 100;
- (*input) (39,33).intensity = 100;
- (*input) (40,33).intensity = 100;
- (*input) (41,33).intensity = 100;
- (*input) (42,33).intensity = 100;
- (*input) (43,33).intensity = 98;
- (*input) (44,33).intensity = 98;
- (*input) (45,33).intensity = 98;
- (*input) (46,33).intensity = 99;
- (*input) (47,33).intensity = 99;
- (*input) (48,33).intensity = 99;
- (*input) (49,33).intensity = 98;
- (*input) (50,33).intensity = 97;
- (*input) (51,33).intensity = 98;
- (*input) (52,33).intensity = 97;
- (*input) (53,33).intensity = 96;
- (*input) (54,33).intensity = 97;
- (*input) (55,33).intensity = 97;
- (*input) (56,33).intensity = 97;
- (*input) (57,33).intensity = 96;
- (*input) (58,33).intensity = 96;
- (*input) (59,33).intensity = 94;
- (*input) (60,33).intensity = 95;
- (*input) (61,33).intensity = 95;
- (*input) (62,33).intensity = 96;
- (*input) (63,33).intensity = 96;
- (*input) (0,34).intensity = 115;
- (*input) (1,34).intensity = 115;
- (*input) (2,34).intensity = 113;
- (*input) (3,34).intensity = 111;
- (*input) (4,34).intensity = 112;
- (*input) (5,34).intensity = 112;
- (*input) (6,34).intensity = 112;
- (*input) (7,34).intensity = 109;
- (*input) (8,34).intensity = 110;
- (*input) (9,34).intensity = 107;
- (*input) (10,34).intensity = 108;
- (*input) (11,34).intensity = 105;
- (*input) (12,34).intensity = 106;
- (*input) (13,34).intensity = 105;
- (*input) (14,34).intensity = 105;
- (*input) (15,34).intensity = 103;
- (*input) (16,34).intensity = 104;
- (*input) (17,34).intensity = 103;
- (*input) (18,34).intensity = 103;
- (*input) (19,34).intensity = 102;
- (*input) (20,34).intensity = 103;
- (*input) (21,34).intensity = 103;
- (*input) (22,34).intensity = 102;
- (*input) (23,34).intensity = 103;
- (*input) (24,34).intensity = 102;
- (*input) (25,34).intensity = 100;
- (*input) (26,34).intensity = 102;
- (*input) (27,34).intensity = 101;
- (*input) (28,34).intensity = 102;
- (*input) (29,34).intensity = 101;
- (*input) (30,34).intensity = 101;
- (*input) (31,34).intensity = 100;
- (*input) (32,34).intensity = 100;
- (*input) (33,34).intensity = 101;
- (*input) (34,34).intensity = 99;
- (*input) (35,34).intensity = 100;
- (*input) (36,34).intensity = 101;
- (*input) (37,34).intensity = 100;
- (*input) (38,34).intensity = 101;
- (*input) (39,34).intensity = 100;
- (*input) (40,34).intensity = 101;
- (*input) (41,34).intensity = 101;
- (*input) (42,34).intensity = 101;
- (*input) (43,34).intensity = 99;
- (*input) (44,34).intensity = 98;
- (*input) (45,34).intensity = 99;
- (*input) (46,34).intensity = 98;
- (*input) (47,34).intensity = 99;
- (*input) (48,34).intensity = 99;
- (*input) (49,34).intensity = 99;
- (*input) (50,34).intensity = 97;
- (*input) (51,34).intensity = 98;
- (*input) (52,34).intensity = 97;
- (*input) (53,34).intensity = 96;
- (*input) (54,34).intensity = 98;
- (*input) (55,34).intensity = 98;
- (*input) (56,34).intensity = 98;
- (*input) (57,34).intensity = 96;
- (*input) (58,34).intensity = 97;
- (*input) (59,34).intensity = 96;
- (*input) (60,34).intensity = 95;
- (*input) (61,34).intensity = 96;
- (*input) (62,34).intensity = 97;
- (*input) (63,34).intensity = 95;
- (*input) (0,35).intensity = 116;
- (*input) (1,35).intensity = 115;
- (*input) (2,35).intensity = 115;
- (*input) (3,35).intensity = 112;
- (*input) (4,35).intensity = 111;
- (*input) (5,35).intensity = 112;
- (*input) (6,35).intensity = 112;
- (*input) (7,35).intensity = 110;
- (*input) (8,35).intensity = 111;
- (*input) (9,35).intensity = 110;
- (*input) (10,35).intensity = 109;
- (*input) (11,35).intensity = 105;
- (*input) (12,35).intensity = 106;
- (*input) (13,35).intensity = 106;
- (*input) (14,35).intensity = 106;
- (*input) (15,35).intensity = 103;
- (*input) (16,35).intensity = 104;
- (*input) (17,35).intensity = 105;
- (*input) (18,35).intensity = 105;
- (*input) (19,35).intensity = 102;
- (*input) (20,35).intensity = 102;
- (*input) (21,35).intensity = 104;
- (*input) (22,35).intensity = 103;
- (*input) (23,35).intensity = 105;
- (*input) (24,35).intensity = 103;
- (*input) (25,35).intensity = 103;
- (*input) (26,35).intensity = 102;
- (*input) (27,35).intensity = 101;
- (*input) (28,35).intensity = 101;
- (*input) (29,35).intensity = 102;
- (*input) (30,35).intensity = 101;
- (*input) (31,35).intensity = 101;
- (*input) (32,35).intensity = 101;
- (*input) (33,35).intensity = 101;
- (*input) (34,35).intensity = 100;
- (*input) (35,35).intensity = 100;
- (*input) (36,35).intensity = 101;
- (*input) (37,35).intensity = 100;
- (*input) (38,35).intensity = 102;
- (*input) (39,35).intensity = 100;
- (*input) (40,35).intensity = 100;
- (*input) (41,35).intensity = 100;
- (*input) (42,35).intensity = 101;
- (*input) (43,35).intensity = 101;
- (*input) (44,35).intensity = 100;
- (*input) (45,35).intensity = 100;
- (*input) (46,35).intensity = 99;
- (*input) (47,35).intensity = 99;
- (*input) (48,35).intensity = 99;
- (*input) (49,35).intensity = 99;
- (*input) (50,35).intensity = 98;
- (*input) (51,35).intensity = 99;
- (*input) (52,35).intensity = 97;
- (*input) (53,35).intensity = 97;
- (*input) (54,35).intensity = 98;
- (*input) (55,35).intensity = 98;
- (*input) (56,35).intensity = 98;
- (*input) (57,35).intensity = 97;
- (*input) (58,35).intensity = 97;
- (*input) (59,35).intensity = 97;
- (*input) (60,35).intensity = 96;
- (*input) (61,35).intensity = 96;
- (*input) (62,35).intensity = 97;
- (*input) (63,35).intensity = 96;
- (*input) (0,36).intensity = 116;
- (*input) (1,36).intensity = 117;
- (*input) (2,36).intensity = 116;
- (*input) (3,36).intensity = 114;
- (*input) (4,36).intensity = 112;
- (*input) (5,36).intensity = 115;
- (*input) (6,36).intensity = 114;
- (*input) (7,36).intensity = 112;
- (*input) (8,36).intensity = 111;
- (*input) (9,36).intensity = 110;
- (*input) (10,36).intensity = 109;
- (*input) (11,36).intensity = 106;
- (*input) (12,36).intensity = 105;
- (*input) (13,36).intensity = 107;
- (*input) (14,36).intensity = 106;
- (*input) (15,36).intensity = 103;
- (*input) (16,36).intensity = 105;
- (*input) (17,36).intensity = 105;
- (*input) (18,36).intensity = 105;
- (*input) (19,36).intensity = 104;
- (*input) (20,36).intensity = 103;
- (*input) (21,36).intensity = 105;
- (*input) (22,36).intensity = 103;
- (*input) (23,36).intensity = 103;
- (*input) (24,36).intensity = 102;
- (*input) (25,36).intensity = 102;
- (*input) (26,36).intensity = 103;
- (*input) (27,36).intensity = 101;
- (*input) (28,36).intensity = 102;
- (*input) (29,36).intensity = 102;
- (*input) (30,36).intensity = 100;
- (*input) (31,36).intensity = 101;
- (*input) (32,36).intensity = 102;
- (*input) (33,36).intensity = 102;
- (*input) (34,36).intensity = 101;
- (*input) (35,36).intensity = 102;
- (*input) (36,36).intensity = 101;
- (*input) (37,36).intensity = 101;
- (*input) (38,36).intensity = 103;
- (*input) (39,36).intensity = 102;
- (*input) (40,36).intensity = 101;
- (*input) (41,36).intensity = 99;
- (*input) (42,36).intensity = 101;
- (*input) (43,36).intensity = 101;
- (*input) (44,36).intensity = 101;
- (*input) (45,36).intensity = 101;
- (*input) (46,36).intensity = 99;
- (*input) (47,36).intensity = 99;
- (*input) (48,36).intensity = 99;
- (*input) (49,36).intensity = 99;
- (*input) (50,36).intensity = 98;
- (*input) (51,36).intensity = 99;
- (*input) (52,36).intensity = 98;
- (*input) (53,36).intensity = 98;
- (*input) (54,36).intensity = 100;
- (*input) (55,36).intensity = 97;
- (*input) (56,36).intensity = 98;
- (*input) (57,36).intensity = 96;
- (*input) (58,36).intensity = 97;
- (*input) (59,36).intensity = 97;
- (*input) (60,36).intensity = 96;
- (*input) (61,36).intensity = 96;
- (*input) (62,36).intensity = 97;
- (*input) (63,36).intensity = 96;
- (*input) (0,37).intensity = 118;
- (*input) (1,37).intensity = 118;
- (*input) (2,37).intensity = 117;
- (*input) (3,37).intensity = 116;
- (*input) (4,37).intensity = 114;
- (*input) (5,37).intensity = 115;
- (*input) (6,37).intensity = 114;
- (*input) (7,37).intensity = 114;
- (*input) (8,37).intensity = 112;
- (*input) (9,37).intensity = 111;
- (*input) (10,37).intensity = 110;
- (*input) (11,37).intensity = 107;
- (*input) (12,37).intensity = 105;
- (*input) (13,37).intensity = 108;
- (*input) (14,37).intensity = 106;
- (*input) (15,37).intensity = 105;
- (*input) (16,37).intensity = 105;
- (*input) (17,37).intensity = 105;
- (*input) (18,37).intensity = 105;
- (*input) (19,37).intensity = 104;
- (*input) (20,37).intensity = 103;
- (*input) (21,37).intensity = 104;
- (*input) (22,37).intensity = 103;
- (*input) (23,37).intensity = 103;
- (*input) (24,37).intensity = 103;
- (*input) (25,37).intensity = 104;
- (*input) (26,37).intensity = 103;
- (*input) (27,37).intensity = 102;
- (*input) (28,37).intensity = 102;
- (*input) (29,37).intensity = 103;
- (*input) (30,37).intensity = 101;
- (*input) (31,37).intensity = 102;
- (*input) (32,37).intensity = 102;
- (*input) (33,37).intensity = 102;
- (*input) (34,37).intensity = 102;
- (*input) (35,37).intensity = 101;
- (*input) (36,37).intensity = 101;
- (*input) (37,37).intensity = 101;
- (*input) (38,37).intensity = 102;
- (*input) (39,37).intensity = 101;
- (*input) (40,37).intensity = 100;
- (*input) (41,37).intensity = 99;
- (*input) (42,37).intensity = 102;
- (*input) (43,37).intensity = 100;
- (*input) (44,37).intensity = 102;
- (*input) (45,37).intensity = 101;
- (*input) (46,37).intensity = 99;
- (*input) (47,37).intensity = 100;
- (*input) (48,37).intensity = 99;
- (*input) (49,37).intensity = 101;
- (*input) (50,37).intensity = 99;
- (*input) (51,37).intensity = 99;
- (*input) (52,37).intensity = 99;
- (*input) (53,37).intensity = 98;
- (*input) (54,37).intensity = 99;
- (*input) (55,37).intensity = 98;
- (*input) (56,37).intensity = 97;
- (*input) (57,37).intensity = 97;
- (*input) (58,37).intensity = 97;
- (*input) (59,37).intensity = 98;
- (*input) (60,37).intensity = 97;
- (*input) (61,37).intensity = 97;
- (*input) (62,37).intensity = 97;
- (*input) (63,37).intensity = 96;
- (*input) (0,38).intensity = 120;
- (*input) (1,38).intensity = 118;
- (*input) (2,38).intensity = 119;
- (*input) (3,38).intensity = 116;
- (*input) (4,38).intensity = 114;
- (*input) (5,38).intensity = 116;
- (*input) (6,38).intensity = 115;
- (*input) (7,38).intensity = 115;
- (*input) (8,38).intensity = 113;
- (*input) (9,38).intensity = 111;
- (*input) (10,38).intensity = 111;
- (*input) (11,38).intensity = 108;
- (*input) (12,38).intensity = 106;
- (*input) (13,38).intensity = 109;
- (*input) (14,38).intensity = 107;
- (*input) (15,38).intensity = 105;
- (*input) (16,38).intensity = 105;
- (*input) (17,38).intensity = 105;
- (*input) (18,38).intensity = 105;
- (*input) (19,38).intensity = 106;
- (*input) (20,38).intensity = 105;
- (*input) (21,38).intensity = 104;
- (*input) (22,38).intensity = 103;
- (*input) (23,38).intensity = 103;
- (*input) (24,38).intensity = 104;
- (*input) (25,38).intensity = 103;
- (*input) (26,38).intensity = 103;
- (*input) (27,38).intensity = 103;
- (*input) (28,38).intensity = 103;
- (*input) (29,38).intensity = 104;
- (*input) (30,38).intensity = 101;
- (*input) (31,38).intensity = 102;
- (*input) (32,38).intensity = 103;
- (*input) (33,38).intensity = 104;
- (*input) (34,38).intensity = 102;
- (*input) (35,38).intensity = 102;
- (*input) (36,38).intensity = 100;
- (*input) (37,38).intensity = 102;
- (*input) (38,38).intensity = 102;
- (*input) (39,38).intensity = 102;
- (*input) (40,38).intensity = 101;
- (*input) (41,38).intensity = 99;
- (*input) (42,38).intensity = 102;
- (*input) (43,38).intensity = 101;
- (*input) (44,38).intensity = 103;
- (*input) (45,38).intensity = 101;
- (*input) (46,38).intensity = 101;
- (*input) (47,38).intensity = 100;
- (*input) (48,38).intensity = 99;
- (*input) (49,38).intensity = 99;
- (*input) (50,38).intensity = 100;
- (*input) (51,38).intensity = 99;
- (*input) (52,38).intensity = 100;
- (*input) (53,38).intensity = 99;
- (*input) (54,38).intensity = 100;
- (*input) (55,38).intensity = 98;
- (*input) (56,38).intensity = 98;
- (*input) (57,38).intensity = 97;
- (*input) (58,38).intensity = 98;
- (*input) (59,38).intensity = 98;
- (*input) (60,38).intensity = 97;
- (*input) (61,38).intensity = 97;
- (*input) (62,38).intensity = 98;
- (*input) (63,38).intensity = 97;
- (*input) (0,39).intensity = 120;
- (*input) (1,39).intensity = 118;
- (*input) (2,39).intensity = 119;
- (*input) (3,39).intensity = 117;
- (*input) (4,39).intensity = 116;
- (*input) (5,39).intensity = 115;
- (*input) (6,39).intensity = 115;
- (*input) (7,39).intensity = 114;
- (*input) (8,39).intensity = 114;
- (*input) (9,39).intensity = 112;
- (*input) (10,39).intensity = 111;
- (*input) (11,39).intensity = 108;
- (*input) (12,39).intensity = 108;
- (*input) (13,39).intensity = 110;
- (*input) (14,39).intensity = 107;
- (*input) (15,39).intensity = 105;
- (*input) (16,39).intensity = 106;
- (*input) (17,39).intensity = 105;
- (*input) (18,39).intensity = 105;
- (*input) (19,39).intensity = 105;
- (*input) (20,39).intensity = 106;
- (*input) (21,39).intensity = 105;
- (*input) (22,39).intensity = 103;
- (*input) (23,39).intensity = 103;
- (*input) (24,39).intensity = 103;
- (*input) (25,39).intensity = 105;
- (*input) (26,39).intensity = 104;
- (*input) (27,39).intensity = 103;
- (*input) (28,39).intensity = 103;
- (*input) (29,39).intensity = 103;
- (*input) (30,39).intensity = 103;
- (*input) (31,39).intensity = 103;
- (*input) (32,39).intensity = 103;
- (*input) (33,39).intensity = 103;
- (*input) (34,39).intensity = 103;
- (*input) (35,39).intensity = 101;
- (*input) (36,39).intensity = 101;
- (*input) (37,39).intensity = 103;
- (*input) (38,39).intensity = 101;
- (*input) (39,39).intensity = 102;
- (*input) (40,39).intensity = 101;
- (*input) (41,39).intensity = 101;
- (*input) (42,39).intensity = 103;
- (*input) (43,39).intensity = 101;
- (*input) (44,39).intensity = 102;
- (*input) (45,39).intensity = 102;
- (*input) (46,39).intensity = 101;
- (*input) (47,39).intensity = 101;
- (*input) (48,39).intensity = 99;
- (*input) (49,39).intensity = 101;
- (*input) (50,39).intensity = 101;
- (*input) (51,39).intensity = 99;
- (*input) (52,39).intensity = 99;
- (*input) (53,39).intensity = 99;
- (*input) (54,39).intensity = 100;
- (*input) (55,39).intensity = 100;
- (*input) (56,39).intensity = 98;
- (*input) (57,39).intensity = 98;
- (*input) (58,39).intensity = 98;
- (*input) (59,39).intensity = 98;
- (*input) (60,39).intensity = 97;
- (*input) (61,39).intensity = 97;
- (*input) (62,39).intensity = 98;
- (*input) (63,39).intensity = 97;
- (*input) (0,40).intensity = 121;
- (*input) (1,40).intensity = 119;
- (*input) (2,40).intensity = 119;
- (*input) (3,40).intensity = 118;
- (*input) (4,40).intensity = 116;
- (*input) (5,40).intensity = 117;
- (*input) (6,40).intensity = 117;
- (*input) (7,40).intensity = 114;
- (*input) (8,40).intensity = 115;
- (*input) (9,40).intensity = 113;
- (*input) (10,40).intensity = 112;
- (*input) (11,40).intensity = 110;
- (*input) (12,40).intensity = 111;
- (*input) (13,40).intensity = 111;
- (*input) (14,40).intensity = 107;
- (*input) (15,40).intensity = 107;
- (*input) (16,40).intensity = 107;
- (*input) (17,40).intensity = 106;
- (*input) (18,40).intensity = 106;
- (*input) (19,40).intensity = 106;
- (*input) (20,40).intensity = 107;
- (*input) (21,40).intensity = 106;
- (*input) (22,40).intensity = 104;
- (*input) (23,40).intensity = 105;
- (*input) (24,40).intensity = 104;
- (*input) (25,40).intensity = 105;
- (*input) (26,40).intensity = 103;
- (*input) (27,40).intensity = 105;
- (*input) (28,40).intensity = 104;
- (*input) (29,40).intensity = 103;
- (*input) (30,40).intensity = 105;
- (*input) (31,40).intensity = 103;
- (*input) (32,40).intensity = 105;
- (*input) (33,40).intensity = 104;
- (*input) (34,40).intensity = 103;
- (*input) (35,40).intensity = 102;
- (*input) (36,40).intensity = 102;
- (*input) (37,40).intensity = 103;
- (*input) (38,40).intensity = 102;
- (*input) (39,40).intensity = 101;
- (*input) (40,40).intensity = 102;
- (*input) (41,40).intensity = 102;
- (*input) (42,40).intensity = 103;
- (*input) (43,40).intensity = 103;
- (*input) (44,40).intensity = 102;
- (*input) (45,40).intensity = 102;
- (*input) (46,40).intensity = 102;
- (*input) (47,40).intensity = 101;
- (*input) (48,40).intensity = 101;
- (*input) (49,40).intensity = 100;
- (*input) (50,40).intensity = 102;
- (*input) (51,40).intensity = 99;
- (*input) (52,40).intensity = 101;
- (*input) (53,40).intensity = 99;
- (*input) (54,40).intensity = 99;
- (*input) (55,40).intensity = 99;
- (*input) (56,40).intensity = 97;
- (*input) (57,40).intensity = 99;
- (*input) (58,40).intensity = 98;
- (*input) (59,40).intensity = 99;
- (*input) (60,40).intensity = 98;
- (*input) (61,40).intensity = 99;
- (*input) (62,40).intensity = 98;
- (*input) (63,40).intensity = 98;
- (*input) (0,41).intensity = 121;
- (*input) (1,41).intensity = 120;
- (*input) (2,41).intensity = 119;
- (*input) (3,41).intensity = 119;
- (*input) (4,41).intensity = 117;
- (*input) (5,41).intensity = 118;
- (*input) (6,41).intensity = 116;
- (*input) (7,41).intensity = 115;
- (*input) (8,41).intensity = 116;
- (*input) (9,41).intensity = 114;
- (*input) (10,41).intensity = 114;
- (*input) (11,41).intensity = 111;
- (*input) (12,41).intensity = 112;
- (*input) (13,41).intensity = 111;
- (*input) (14,41).intensity = 109;
- (*input) (15,41).intensity = 108;
- (*input) (16,41).intensity = 108;
- (*input) (17,41).intensity = 107;
- (*input) (18,41).intensity = 107;
- (*input) (19,41).intensity = 107;
- (*input) (20,41).intensity = 107;
- (*input) (21,41).intensity = 107;
- (*input) (22,41).intensity = 106;
- (*input) (23,41).intensity = 105;
- (*input) (24,41).intensity = 104;
- (*input) (25,41).intensity = 105;
- (*input) (26,41).intensity = 104;
- (*input) (27,41).intensity = 105;
- (*input) (28,41).intensity = 105;
- (*input) (29,41).intensity = 104;
- (*input) (30,41).intensity = 105;
- (*input) (31,41).intensity = 104;
- (*input) (32,41).intensity = 105;
- (*input) (33,41).intensity = 104;
- (*input) (34,41).intensity = 104;
- (*input) (35,41).intensity = 103;
- (*input) (36,41).intensity = 102;
- (*input) (37,41).intensity = 103;
- (*input) (38,41).intensity = 102;
- (*input) (39,41).intensity = 102;
- (*input) (40,41).intensity = 102;
- (*input) (41,41).intensity = 101;
- (*input) (42,41).intensity = 104;
- (*input) (43,41).intensity = 103;
- (*input) (44,41).intensity = 102;
- (*input) (45,41).intensity = 101;
- (*input) (46,41).intensity = 103;
- (*input) (47,41).intensity = 101;
- (*input) (48,41).intensity = 102;
- (*input) (49,41).intensity = 101;
- (*input) (50,41).intensity = 102;
- (*input) (51,41).intensity = 100;
- (*input) (52,41).intensity = 100;
- (*input) (53,41).intensity = 99;
- (*input) (54,41).intensity = 99;
- (*input) (55,41).intensity = 99;
- (*input) (56,41).intensity = 98;
- (*input) (57,41).intensity = 99;
- (*input) (58,41).intensity = 99;
- (*input) (59,41).intensity = 97;
- (*input) (60,41).intensity = 98;
- (*input) (61,41).intensity = 99;
- (*input) (62,41).intensity = 98;
- (*input) (63,41).intensity = 98;
- (*input) (0,42).intensity = 121;
- (*input) (1,42).intensity = 122;
- (*input) (2,42).intensity = 119;
- (*input) (3,42).intensity = 119;
- (*input) (4,42).intensity = 118;
- (*input) (5,42).intensity = 118;
- (*input) (6,42).intensity = 116;
- (*input) (7,42).intensity = 116;
- (*input) (8,42).intensity = 117;
- (*input) (9,42).intensity = 114;
- (*input) (10,42).intensity = 115;
- (*input) (11,42).intensity = 113;
- (*input) (12,42).intensity = 114;
- (*input) (13,42).intensity = 112;
- (*input) (14,42).intensity = 109;
- (*input) (15,42).intensity = 109;
- (*input) (16,42).intensity = 109;
- (*input) (17,42).intensity = 106;
- (*input) (18,42).intensity = 107;
- (*input) (19,42).intensity = 105;
- (*input) (20,42).intensity = 108;
- (*input) (21,42).intensity = 106;
- (*input) (22,42).intensity = 107;
- (*input) (23,42).intensity = 106;
- (*input) (24,42).intensity = 105;
- (*input) (25,42).intensity = 104;
- (*input) (26,42).intensity = 103;
- (*input) (27,42).intensity = 105;
- (*input) (28,42).intensity = 105;
- (*input) (29,42).intensity = 104;
- (*input) (30,42).intensity = 105;
- (*input) (31,42).intensity = 103;
- (*input) (32,42).intensity = 104;
- (*input) (33,42).intensity = 104;
- (*input) (34,42).intensity = 105;
- (*input) (35,42).intensity = 106;
- (*input) (36,42).intensity = 102;
- (*input) (37,42).intensity = 104;
- (*input) (38,42).intensity = 104;
- (*input) (39,42).intensity = 102;
- (*input) (40,42).intensity = 104;
- (*input) (41,42).intensity = 102;
- (*input) (42,42).intensity = 105;
- (*input) (43,42).intensity = 103;
- (*input) (44,42).intensity = 102;
- (*input) (45,42).intensity = 103;
- (*input) (46,42).intensity = 102;
- (*input) (47,42).intensity = 100;
- (*input) (48,42).intensity = 102;
- (*input) (49,42).intensity = 101;
- (*input) (50,42).intensity = 103;
- (*input) (51,42).intensity = 100;
- (*input) (52,42).intensity = 100;
- (*input) (53,42).intensity = 99;
- (*input) (54,42).intensity = 99;
- (*input) (55,42).intensity = 100;
- (*input) (56,42).intensity = 99;
- (*input) (57,42).intensity = 100;
- (*input) (58,42).intensity = 98;
- (*input) (59,42).intensity = 98;
- (*input) (60,42).intensity = 99;
- (*input) (61,42).intensity = 101;
- (*input) (62,42).intensity = 99;
- (*input) (63,42).intensity = 98;
- (*input) (0,43).intensity = 122;
- (*input) (1,43).intensity = 120;
- (*input) (2,43).intensity = 119;
- (*input) (3,43).intensity = 120;
- (*input) (4,43).intensity = 119;
- (*input) (5,43).intensity = 118;
- (*input) (6,43).intensity = 118;
- (*input) (7,43).intensity = 117;
- (*input) (8,43).intensity = 117;
- (*input) (9,43).intensity = 116;
- (*input) (10,43).intensity = 114;
- (*input) (11,43).intensity = 113;
- (*input) (12,43).intensity = 114;
- (*input) (13,43).intensity = 112;
- (*input) (14,43).intensity = 111;
- (*input) (15,43).intensity = 110;
- (*input) (16,43).intensity = 110;
- (*input) (17,43).intensity = 107;
- (*input) (18,43).intensity = 108;
- (*input) (19,43).intensity = 106;
- (*input) (20,43).intensity = 109;
- (*input) (21,43).intensity = 107;
- (*input) (22,43).intensity = 107;
- (*input) (23,43).intensity = 106;
- (*input) (24,43).intensity = 106;
- (*input) (25,43).intensity = 105;
- (*input) (26,43).intensity = 106;
- (*input) (27,43).intensity = 106;
- (*input) (28,43).intensity = 105;
- (*input) (29,43).intensity = 105;
- (*input) (30,43).intensity = 104;
- (*input) (31,43).intensity = 104;
- (*input) (32,43).intensity = 104;
- (*input) (33,43).intensity = 104;
- (*input) (34,43).intensity = 105;
- (*input) (35,43).intensity = 106;
- (*input) (36,43).intensity = 103;
- (*input) (37,43).intensity = 105;
- (*input) (38,43).intensity = 105;
- (*input) (39,43).intensity = 105;
- (*input) (40,43).intensity = 105;
- (*input) (41,43).intensity = 102;
- (*input) (42,43).intensity = 105;
- (*input) (43,43).intensity = 103;
- (*input) (44,43).intensity = 102;
- (*input) (45,43).intensity = 103;
- (*input) (46,43).intensity = 103;
- (*input) (47,43).intensity = 102;
- (*input) (48,43).intensity = 103;
- (*input) (49,43).intensity = 102;
- (*input) (50,43).intensity = 103;
- (*input) (51,43).intensity = 100;
- (*input) (52,43).intensity = 101;
- (*input) (53,43).intensity = 100;
- (*input) (54,43).intensity = 99;
- (*input) (55,43).intensity = 100;
- (*input) (56,43).intensity = 100;
- (*input) (57,43).intensity = 100;
- (*input) (58,43).intensity = 98;
- (*input) (59,43).intensity = 98;
- (*input) (60,43).intensity = 99;
- (*input) (61,43).intensity = 100;
- (*input) (62,43).intensity = 99;
- (*input) (63,43).intensity = 98;
- (*input) (0,44).intensity = 123;
- (*input) (1,44).intensity = 122;
- (*input) (2,44).intensity = 120;
- (*input) (3,44).intensity = 121;
- (*input) (4,44).intensity = 120;
- (*input) (5,44).intensity = 118;
- (*input) (6,44).intensity = 118;
- (*input) (7,44).intensity = 118;
- (*input) (8,44).intensity = 117;
- (*input) (9,44).intensity = 118;
- (*input) (10,44).intensity = 114;
- (*input) (11,44).intensity = 115;
- (*input) (12,44).intensity = 115;
- (*input) (13,44).intensity = 113;
- (*input) (14,44).intensity = 112;
- (*input) (15,44).intensity = 110;
- (*input) (16,44).intensity = 110;
- (*input) (17,44).intensity = 108;
- (*input) (18,44).intensity = 109;
- (*input) (19,44).intensity = 106;
- (*input) (20,44).intensity = 109;
- (*input) (21,44).intensity = 106;
- (*input) (22,44).intensity = 107;
- (*input) (23,44).intensity = 106;
- (*input) (24,44).intensity = 107;
- (*input) (25,44).intensity = 106;
- (*input) (26,44).intensity = 106;
- (*input) (27,44).intensity = 106;
- (*input) (28,44).intensity = 105;
- (*input) (29,44).intensity = 106;
- (*input) (30,44).intensity = 103;
- (*input) (31,44).intensity = 103;
- (*input) (32,44).intensity = 105;
- (*input) (33,44).intensity = 105;
- (*input) (34,44).intensity = 105;
- (*input) (35,44).intensity = 106;
- (*input) (36,44).intensity = 104;
- (*input) (37,44).intensity = 105;
- (*input) (38,44).intensity = 105;
- (*input) (39,44).intensity = 105;
- (*input) (40,44).intensity = 105;
- (*input) (41,44).intensity = 104;
- (*input) (42,44).intensity = 106;
- (*input) (43,44).intensity = 103;
- (*input) (44,44).intensity = 103;
- (*input) (45,44).intensity = 106;
- (*input) (46,44).intensity = 103;
- (*input) (47,44).intensity = 102;
- (*input) (48,44).intensity = 102;
- (*input) (49,44).intensity = 103;
- (*input) (50,44).intensity = 103;
- (*input) (51,44).intensity = 101;
- (*input) (52,44).intensity = 101;
- (*input) (53,44).intensity = 100;
- (*input) (54,44).intensity = 100;
- (*input) (55,44).intensity = 102;
- (*input) (56,44).intensity = 101;
- (*input) (57,44).intensity = 101;
- (*input) (58,44).intensity = 99;
- (*input) (59,44).intensity = 99;
- (*input) (60,44).intensity = 99;
- (*input) (61,44).intensity = 100;
- (*input) (62,44).intensity = 99;
- (*input) (63,44).intensity = 98;
- (*input) (0,45).intensity = 123;
- (*input) (1,45).intensity = 122;
- (*input) (2,45).intensity = 121;
- (*input) (3,45).intensity = 122;
- (*input) (4,45).intensity = 121;
- (*input) (5,45).intensity = 118;
- (*input) (6,45).intensity = 120;
- (*input) (7,45).intensity = 118;
- (*input) (8,45).intensity = 118;
- (*input) (9,45).intensity = 118;
- (*input) (10,45).intensity = 115;
- (*input) (11,45).intensity = 114;
- (*input) (12,45).intensity = 115;
- (*input) (13,45).intensity = 114;
- (*input) (14,45).intensity = 113;
- (*input) (15,45).intensity = 111;
- (*input) (16,45).intensity = 111;
- (*input) (17,45).intensity = 110;
- (*input) (18,45).intensity = 111;
- (*input) (19,45).intensity = 107;
- (*input) (20,45).intensity = 108;
- (*input) (21,45).intensity = 108;
- (*input) (22,45).intensity = 107;
- (*input) (23,45).intensity = 108;
- (*input) (24,45).intensity = 108;
- (*input) (25,45).intensity = 107;
- (*input) (26,45).intensity = 107;
- (*input) (27,45).intensity = 106;
- (*input) (28,45).intensity = 105;
- (*input) (29,45).intensity = 106;
- (*input) (30,45).intensity = 105;
- (*input) (31,45).intensity = 103;
- (*input) (32,45).intensity = 106;
- (*input) (33,45).intensity = 104;
- (*input) (34,45).intensity = 105;
- (*input) (35,45).intensity = 105;
- (*input) (36,45).intensity = 105;
- (*input) (37,45).intensity = 103;
- (*input) (38,45).intensity = 103;
- (*input) (39,45).intensity = 105;
- (*input) (40,45).intensity = 105;
- (*input) (41,45).intensity = 105;
- (*input) (42,45).intensity = 105;
- (*input) (43,45).intensity = 105;
- (*input) (44,45).intensity = 104;
- (*input) (45,45).intensity = 105;
- (*input) (46,45).intensity = 103;
- (*input) (47,45).intensity = 105;
- (*input) (48,45).intensity = 103;
- (*input) (49,45).intensity = 103;
- (*input) (50,45).intensity = 103;
- (*input) (51,45).intensity = 101;
- (*input) (52,45).intensity = 102;
- (*input) (53,45).intensity = 102;
- (*input) (54,45).intensity = 101;
- (*input) (55,45).intensity = 102;
- (*input) (56,45).intensity = 101;
- (*input) (57,45).intensity = 101;
- (*input) (58,45).intensity = 100;
- (*input) (59,45).intensity = 101;
- (*input) (60,45).intensity = 100;
- (*input) (61,45).intensity = 99;
- (*input) (62,45).intensity = 99;
- (*input) (63,45).intensity = 99;
- (*input) (0,46).intensity = 124;
- (*input) (1,46).intensity = 122;
- (*input) (2,46).intensity = 123;
- (*input) (3,46).intensity = 121;
- (*input) (4,46).intensity = 122;
- (*input) (5,46).intensity = 120;
- (*input) (6,46).intensity = 119;
- (*input) (7,46).intensity = 118;
- (*input) (8,46).intensity = 118;
- (*input) (9,46).intensity = 118;
- (*input) (10,46).intensity = 117;
- (*input) (11,46).intensity = 115;
- (*input) (12,46).intensity = 116;
- (*input) (13,46).intensity = 114;
- (*input) (14,46).intensity = 115;
- (*input) (15,46).intensity = 112;
- (*input) (16,46).intensity = 111;
- (*input) (17,46).intensity = 112;
- (*input) (18,46).intensity = 112;
- (*input) (19,46).intensity = 109;
- (*input) (20,46).intensity = 108;
- (*input) (21,46).intensity = 108;
- (*input) (22,46).intensity = 109;
- (*input) (23,46).intensity = 108;
- (*input) (24,46).intensity = 109;
- (*input) (25,46).intensity = 107;
- (*input) (26,46).intensity = 107;
- (*input) (27,46).intensity = 106;
- (*input) (28,46).intensity = 105;
- (*input) (29,46).intensity = 107;
- (*input) (30,46).intensity = 105;
- (*input) (31,46).intensity = 103;
- (*input) (32,46).intensity = 106;
- (*input) (33,46).intensity = 105;
- (*input) (34,46).intensity = 106;
- (*input) (35,46).intensity = 105;
- (*input) (36,46).intensity = 106;
- (*input) (37,46).intensity = 103;
- (*input) (38,46).intensity = 103;
- (*input) (39,46).intensity = 106;
- (*input) (40,46).intensity = 103;
- (*input) (41,46).intensity = 105;
- (*input) (42,46).intensity = 104;
- (*input) (43,46).intensity = 104;
- (*input) (44,46).intensity = 106;
- (*input) (45,46).intensity = 105;
- (*input) (46,46).intensity = 103;
- (*input) (47,46).intensity = 104;
- (*input) (48,46).intensity = 104;
- (*input) (49,46).intensity = 103;
- (*input) (50,46).intensity = 104;
- (*input) (51,46).intensity = 102;
- (*input) (52,46).intensity = 101;
- (*input) (53,46).intensity = 102;
- (*input) (54,46).intensity = 102;
- (*input) (55,46).intensity = 103;
- (*input) (56,46).intensity = 102;
- (*input) (57,46).intensity = 102;
- (*input) (58,46).intensity = 103;
- (*input) (59,46).intensity = 101;
- (*input) (60,46).intensity = 100;
- (*input) (61,46).intensity = 100;
- (*input) (62,46).intensity = 102;
- (*input) (63,46).intensity = 100;
- (*input) (0,47).intensity = 123;
- (*input) (1,47).intensity = 122;
- (*input) (2,47).intensity = 122;
- (*input) (3,47).intensity = 122;
- (*input) (4,47).intensity = 122;
- (*input) (5,47).intensity = 120;
- (*input) (6,47).intensity = 120;
- (*input) (7,47).intensity = 119;
- (*input) (8,47).intensity = 120;
- (*input) (9,47).intensity = 118;
- (*input) (10,47).intensity = 118;
- (*input) (11,47).intensity = 117;
- (*input) (12,47).intensity = 116;
- (*input) (13,47).intensity = 116;
- (*input) (14,47).intensity = 116;
- (*input) (15,47).intensity = 114;
- (*input) (16,47).intensity = 113;
- (*input) (17,47).intensity = 114;
- (*input) (18,47).intensity = 112;
- (*input) (19,47).intensity = 109;
- (*input) (20,47).intensity = 109;
- (*input) (21,47).intensity = 109;
- (*input) (22,47).intensity = 108;
- (*input) (23,47).intensity = 109;
- (*input) (24,47).intensity = 109;
- (*input) (25,47).intensity = 107;
- (*input) (26,47).intensity = 107;
- (*input) (27,47).intensity = 107;
- (*input) (28,47).intensity = 106;
- (*input) (29,47).intensity = 107;
- (*input) (30,47).intensity = 107;
- (*input) (31,47).intensity = 105;
- (*input) (32,47).intensity = 106;
- (*input) (33,47).intensity = 105;
- (*input) (34,47).intensity = 106;
- (*input) (35,47).intensity = 105;
- (*input) (36,47).intensity = 107;
- (*input) (37,47).intensity = 104;
- (*input) (38,47).intensity = 104;
- (*input) (39,47).intensity = 106;
- (*input) (40,47).intensity = 104;
- (*input) (41,47).intensity = 102;
- (*input) (42,47).intensity = 105;
- (*input) (43,47).intensity = 104;
- (*input) (44,47).intensity = 105;
- (*input) (45,47).intensity = 105;
- (*input) (46,47).intensity = 104;
- (*input) (47,47).intensity = 103;
- (*input) (48,47).intensity = 105;
- (*input) (49,47).intensity = 103;
- (*input) (50,47).intensity = 105;
- (*input) (51,47).intensity = 102;
- (*input) (52,47).intensity = 101;
- (*input) (53,47).intensity = 103;
- (*input) (54,47).intensity = 104;
- (*input) (55,47).intensity = 104;
- (*input) (56,47).intensity = 102;
- (*input) (57,47).intensity = 101;
- (*input) (58,47).intensity = 101;
- (*input) (59,47).intensity = 100;
- (*input) (60,47).intensity = 101;
- (*input) (61,47).intensity = 100;
- (*input) (62,47).intensity = 98;
- (*input) (63,47).intensity = 96;
+ input->resize(input->width * input->height);
+ for (std::uint32_t r = 0; r < input->height; r++)
+ for (std::uint32_t c = 0; c < input->width; c++)
+ {
+ float x1 = -2.0f + (4.0f / (float)input->width) * (float)c;
+ float y1 = -2.0f + (4.0f / (float)input->height) * (float)r;
+ float x2 = -M_PI + (2.0f * M_PI / (float)input->width) * (float)c;
+ float y2 = -2.0f + (4.0f / (float)input->height) * (float)r;
+ float z = x1 * exp(-(x1 * x1 + y1 * y1)) * 2.5f + sin(x2) * sin(y2);
+ RGB color = interpolate_color(-1.6f, 1.6f, z);
+ (*input) (c, r).x = x1;
+ (*input) (c, r).y = y1;
+ (*input) (c, r).z = z;
+ (*input) (c, r).r = (uint8_t)(255.0f * color.r);
+ (*input) (c, r).g = (uint8_t)(255.0f * color.g);
+ (*input) (c, r).b = (uint8_t)(255.0f * color.b);
+ }
+
+ // filter
+ auto output = pcl::make_shared<PointCloud<PointXYZRGB>>();
+ pcl::filters::Convolution<PointXYZRGB, PointXYZRGB> convolve;
+ convolve.setInputCloud (input);
+ convolve.setKernel (filter);
+ convolve.convolveRows (*output);
+
+ // output rows (first and last row)
+ // note: this is because of border policy ignore which sets the border of width = filter_size/2 as NaN for xyz and 0 for rgb values
+ std::array<RGB, 128> output_results {
+ // row 0 row 47
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(80, 49, 187), RGB(155, 82, 129),
+ RGB(68, 45, 188), RGB(167, 89, 112),
+ RGB(56, 41, 190), RGB(178, 95, 96),
+ RGB(45, 37, 191), RGB(189, 101, 80),
+ RGB(35, 33, 192), RGB(200, 106, 66),
+ RGB(25, 30, 194), RGB(209, 111, 53),
+ RGB(17, 27, 195), RGB(217, 116, 41),
+ RGB(9, 24, 196), RGB(223, 121, 34),
+ RGB(4, 25, 197), RGB(224, 127, 34),
+ RGB(1, 30, 199), RGB(222, 133, 37),
+ RGB(1, 37, 201), RGB(221, 138, 41),
+ RGB(1, 42, 202), RGB(219, 141, 43),
+ RGB(0, 46, 203), RGB(218, 143, 45),
+ RGB(0, 47, 203), RGB(218, 144, 46),
+ RGB(0, 45, 203), RGB(218, 144, 45),
+ RGB(1, 41, 202), RGB(219, 142, 44),
+ RGB(1, 35, 200), RGB(220, 139, 42),
+ RGB(2, 29, 199), RGB(222, 134, 38),
+ RGB(5, 25, 197), RGB(223, 129, 35),
+ RGB(11, 25, 196), RGB(223, 123, 34),
+ RGB(18, 27, 195), RGB(219, 118, 39),
+ RGB(27, 30, 193), RGB(211, 113, 50),
+ RGB(36, 33, 192), RGB(202, 107, 63),
+ RGB(46, 37, 191), RGB(191, 102, 78),
+ RGB(57, 41, 190), RGB(180, 96, 94),
+ RGB(68, 45, 188), RGB(168, 89, 110),
+ RGB(80, 49, 187), RGB(156, 83, 128),
+ RGB(92, 53, 185), RGB(143, 76, 145),
+ RGB(104, 58, 183), RGB(130, 69, 162),
+ RGB(116, 63, 176), RGB(117, 63, 176),
+ RGB(128, 68, 164), RGB(103, 57, 182),
+ RGB(140, 74, 149), RGB(90, 52, 185),
+ RGB(152, 80, 133), RGB(76, 48, 187),
+ RGB(163, 86, 117), RGB(63, 43, 189),
+ RGB(174, 92, 102), RGB(51, 39, 191),
+ RGB(184, 98, 88), RGB(39, 35, 192),
+ RGB(194, 103, 75), RGB(28, 31, 193),
+ RGB(203, 108, 62), RGB(17, 27, 195),
+ RGB(211, 112, 51), RGB(9, 26, 196),
+ RGB(218, 116, 41), RGB(3, 29, 198),
+ RGB(222, 120, 35), RGB(1, 37, 201),
+ RGB(224, 124, 32), RGB(1, 48, 204),
+ RGB(224, 128, 34), RGB(0, 57, 206),
+ RGB(223, 131, 36), RGB(0, 63, 207),
+ RGB(222, 133, 37), RGB(0, 67, 208),
+ RGB(222, 134, 38), RGB(0, 68, 209),
+ RGB(222, 133, 37), RGB(0, 66, 208),
+ RGB(223, 132, 36), RGB(0, 61, 207),
+ RGB(224, 129, 34), RGB(1, 54, 205),
+ RGB(224, 125, 32), RGB(1, 45, 203),
+ RGB(223, 121, 34), RGB(1, 35, 200),
+ RGB(219, 117, 40), RGB(4, 27, 198),
+ RGB(212, 113, 49), RGB(11, 26, 196),
+ RGB(204, 109, 60), RGB(20, 28, 194),
+ RGB(195, 104, 73), RGB(30, 31, 193),
+ RGB(185, 99, 86), RGB(41, 35, 192),
+ RGB(175, 93, 101), RGB(52, 39, 190),
+ RGB(164, 87, 116), RGB(64, 43, 189),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ RGB(0, 0, 0), RGB(0, 0, 0),
+ };
+
+ // check result
+ for (std::uint32_t i = 0; i < output->width ; ++i)
+ {
+ EXPECT_EQ ((*output) (i, 0).r, output_results[i * 2 + 0].r);
+ EXPECT_EQ ((*output) (i, 0).g, output_results[i * 2 + 0].g);
+ EXPECT_EQ ((*output) (i, 0).b, output_results[i * 2 + 0].b);
+ EXPECT_EQ ((*output) (i, 47).r, output_results[i * 2 + 1].r);
+ EXPECT_EQ ((*output) (i, 47).g, output_results[i * 2 + 1].g);
+ EXPECT_EQ ((*output) (i, 47).b, output_results[i * 2 + 1].b);
+ }
+}
+
+int
+main (int argc, char** argv)
+{
testing::InitGoogleTest (&argc, argv);
return (RUN_ALL_TESTS ());
}
using namespace pcl;
using namespace pcl::io;
-using namespace std;
using namespace Eigen;
ExtractIndices<PointXYZ> ei;
pcl::IndicesPtr indices (new pcl::Indices (2));
(*indices)[0] = 0;
- (*indices)[1] = static_cast<int> (cloud->points.size ()) - 1;
+ (*indices)[1] = cloud->size () - 1;
PointCloud<PointXYZ>::Ptr output (new PointCloud<PointXYZ>);
ei.setInputCloud (cloud);
ei.setIndices (indices);
ei.filter (*output);
- EXPECT_EQ (int (output->points.size ()), 2);
- EXPECT_EQ (int (output->width), 2);
- EXPECT_EQ (int (output->height), 1);
+ EXPECT_EQ (output->size (), 2);
+ EXPECT_EQ (output->width, 2);
+ EXPECT_EQ (output->height, 1);
- EXPECT_EQ (cloud->points[0].x, output->points[0].x);
- EXPECT_EQ (cloud->points[0].y, output->points[0].y);
- EXPECT_EQ (cloud->points[0].z, output->points[0].z);
+ EXPECT_EQ ((*cloud)[0].x, (*output)[0].x);
+ EXPECT_EQ ((*cloud)[0].y, (*output)[0].y);
+ EXPECT_EQ ((*cloud)[0].z, (*output)[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].x, output->points[1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].y, output->points[1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].z, output->points[1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].x, (*output)[1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].y, (*output)[1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].z, (*output)[1].z);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ExtractIndices<PointXYZ> ei;
pcl::IndicesPtr indices (new pcl::Indices (2));
(*indices)[0] = 0;
- (*indices)[1] = static_cast<int> (cloud->points.size ()) - 1;
+ (*indices)[1] = cloud->size () - 1;
PointCloud<PointXYZ> output;
ei.setInputCloud (cloud);
ei.setIndices (indices);
ei.filter (output);
- EXPECT_EQ (int (output.points.size ()), 2);
- EXPECT_EQ (int (output.width), 2);
- EXPECT_EQ (int (output.height), 1);
+ EXPECT_EQ (output.size (), 2);
+ EXPECT_EQ (output.width, 2);
+ EXPECT_EQ (output.height, 1);
- EXPECT_EQ (cloud->points[0].x, output.points[0].x);
- EXPECT_EQ (cloud->points[0].y, output.points[0].y);
- EXPECT_EQ (cloud->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[0].x, output[0].x);
+ EXPECT_EQ ((*cloud)[0].y, output[0].y);
+ EXPECT_EQ ((*cloud)[0].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].x, output.points[1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].y, output.points[1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].z, output.points[1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].x, output[1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].y, output[1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[1].z);
ei.setNegative (true);
ei.filter (output);
- EXPECT_EQ (output.points.size (), cloud->points.size () - 2);
- EXPECT_EQ (output.width, cloud->points.size () - 2);
- EXPECT_EQ (int (output.height), 1);
+ EXPECT_EQ (output.size (), cloud->size () - 2);
+ EXPECT_EQ (output.width, cloud->size () - 2);
+ EXPECT_EQ (output.height, 1);
- EXPECT_EQ (cloud->points[1].x, output.points[0].x);
- EXPECT_EQ (cloud->points[1].y, output.points[0].y);
- EXPECT_EQ (cloud->points[1].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[1].x, output[0].x);
+ EXPECT_EQ ((*cloud)[1].y, output[0].y);
+ EXPECT_EQ ((*cloud)[1].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 2].x, output.points[output.points.size () - 1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 2].y, output.points[output.points.size () - 1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 2].z, output.points[output.points.size () - 1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 2].x, output[output.size () - 1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 2].y, output[output.size () - 1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 2].z, output[output.size () - 1].z);
// Test the pcl::PCLPointCloud2 method
ExtractIndices<PCLPointCloud2> ei2;
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 2);
- EXPECT_EQ (int (output.width), 2);
- EXPECT_EQ (int (output.height), 1);
+ EXPECT_EQ (output.size (), 2);
+ EXPECT_EQ (output.width, 2);
+ EXPECT_EQ (output.height, 1);
- EXPECT_EQ (cloud->points[0].x, output.points[0].x);
- EXPECT_EQ (cloud->points[0].y, output.points[0].y);
- EXPECT_EQ (cloud->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[0].x, output[0].x);
+ EXPECT_EQ ((*cloud)[0].y, output[0].y);
+ EXPECT_EQ ((*cloud)[0].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].x, output.points[1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].y, output.points[1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].z, output.points[1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].x, output[1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].y, output[1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[1].z);
ei2.setNegative (true);
ei2.filter (output_blob);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (output.points.size (), cloud->points.size () - 2);
- EXPECT_EQ (output.width, cloud->points.size () - 2);
- EXPECT_EQ (int (output.height), 1);
+ EXPECT_EQ (output.size (), cloud->size () - 2);
+ EXPECT_EQ (output.width, cloud->size () - 2);
+ EXPECT_EQ (output.height, 1);
- EXPECT_EQ (cloud->points[1].x, output.points[0].x);
- EXPECT_EQ (cloud->points[1].y, output.points[0].y);
- EXPECT_EQ (cloud->points[1].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[1].x, output[0].x);
+ EXPECT_EQ ((*cloud)[1].y, output[0].y);
+ EXPECT_EQ ((*cloud)[1].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 2].x, output.points[output.points.size () - 1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 2].y, output.points[output.points.size () - 1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 2].z, output.points[output.points.size () - 1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 2].x, output[output.size () - 1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 2].y, output[output.size () - 1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 2].z, output[output.size () - 1].z);
ei2.setNegative (false);
ei2.setKeepOrganized (true);
fromPCLPointCloud2(output_blob, output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
- EXPECT_EQ (output.points[0].x, cloud->points[0].x);
- EXPECT_EQ (output.points[0].y, cloud->points[0].y);
- EXPECT_EQ (output.points[0].z, cloud->points[0].z);
- EXPECT_TRUE (std::isnan(output.points[1].x));
- EXPECT_TRUE (std::isnan(output.points[1].y));
- EXPECT_TRUE (std::isnan(output.points[1].z));
+ EXPECT_EQ (output[0].x, (*cloud)[0].x);
+ EXPECT_EQ (output[0].y, (*cloud)[0].y);
+ EXPECT_EQ (output[0].z, (*cloud)[0].z);
+ EXPECT_TRUE (std::isnan(output[1].x));
+ EXPECT_TRUE (std::isnan(output[1].y));
+ EXPECT_TRUE (std::isnan(output[1].z));
ei2.setNegative (true);
ei2.setKeepOrganized (true);
fromPCLPointCloud2(output_blob, output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
- EXPECT_TRUE (std::isnan(output.points[0].x));
- EXPECT_TRUE (std::isnan(output.points[0].y));
- EXPECT_TRUE (std::isnan(output.points[0].z));
- EXPECT_EQ (output.points[1].x, cloud->points[1].x);
- EXPECT_EQ (output.points[1].y, cloud->points[1].y);
- EXPECT_EQ (output.points[1].z, cloud->points[1].z);
+ EXPECT_TRUE (std::isnan(output[0].x));
+ EXPECT_TRUE (std::isnan(output[0].y));
+ EXPECT_TRUE (std::isnan(output[0].z));
+ EXPECT_EQ (output[1].x, (*cloud)[1].x);
+ EXPECT_EQ (output[1].y, (*cloud)[1].y);
+ EXPECT_EQ (output[1].z, (*cloud)[1].z);
// Test setNegative on empty datasets
PointCloud<PointXYZ> empty, result;
eie.setNegative (false);
eie.filter (result);
- EXPECT_EQ (int (result.points.size ()), 0);
+ EXPECT_EQ (result.size (), 0);
eie.setNegative (true);
eie.filter (result);
- EXPECT_EQ (int (result.points.size ()), 0);
+ EXPECT_EQ (result.size (), 0);
pcl::IndicesPtr idx (new pcl::Indices (10));
eie.setIndices (idx);
eie.setNegative (false);
eie.filter (result);
- EXPECT_EQ (int (result.points.size ()), 0);
+ EXPECT_EQ (result.size (), 0);
eie.setNegative (true);
eie.filter (result);
- EXPECT_EQ (int (result.points.size ()), 0);
+ EXPECT_EQ (result.size (), 0);
empty.points.resize (10);
empty.width = 10; empty.height = 1;
eie.setIndices (idx);
eie.setNegative (false);
eie.filter (result);
- EXPECT_EQ (int (result.points.size ()), 10);
+ EXPECT_EQ (result.size (), 10);
eie.setNegative (true);
eie.filter (result);
- EXPECT_EQ (int (result.points.size ()), 0);
+ EXPECT_EQ (result.size (), 0);
/*
PointCloud<PointXYZ> sc, scf;
sc.points.resize (5); sc.width = 5; sc.height = 1; sc.is_dense = true;
for (int i = 0; i < 5; i++)
{
- sc.points[i].x = sc.points[i].z = 0;
- sc.points[i].y = i;
+ sc[i].x = sc[i].z = 0;
+ sc[i].y = i;
}
PassThrough<PointXYZ> ps;
ps.setInputCloud (sc.makeShared ());
{
ps.setNegative ((bool)i);
ps.filter (scf);
- std::cerr << scf.points.size () << std::endl;
- for (std::size_t j = 0; j < scf.points.size (); ++j)
- std::cerr << scf.points[j] << std::endl;
+ std::cerr << scf.size () << std::endl;
+ for (index_t j = 0; j < scf.size (); ++j)
+ std::cerr << scf[j] << std::endl;
}
*/
}
pt.setInputCloud (cloud);
pt.filter (output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
pt.setFilterLimits (0.05f, 0.1f);
pt.filter (output);
- EXPECT_EQ (int (output.points.size ()), 42);
- EXPECT_EQ (int (output.width), 42);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 42);
+ EXPECT_EQ (output.width, 42);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, -0.074556, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.13415, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.051046, 1e-5);
+ EXPECT_NEAR (output[0].x, -0.074556, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.13415, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.051046, 1e-5);
- EXPECT_NEAR (output.points[41].x, -0.030331, 1e-5);
- EXPECT_NEAR (output.points[41].y, 0.039749, 1e-5);
- EXPECT_NEAR (output.points[41].z, 0.052133, 1e-5);
+ EXPECT_NEAR (output[41].x, -0.030331, 1e-5);
+ EXPECT_NEAR (output[41].y, 0.039749, 1e-5);
+ EXPECT_NEAR (output[41].z, 0.052133, 1e-5);
pt.setNegative (true);
pt.filter (output);
- EXPECT_EQ (int (output.points.size ()), 355);
- EXPECT_EQ (int (output.width), 355);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 355);
+ EXPECT_EQ (output.width, 355);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, 0.0054216, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.11349, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.040749, 1e-5);
+ EXPECT_NEAR (output[0].x, 0.0054216, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.11349, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.040749, 1e-5);
- EXPECT_NEAR (output.points[354].x, -0.07793, 1e-5);
- EXPECT_NEAR (output.points[354].y, 0.17516, 1e-5);
- EXPECT_NEAR (output.points[354].z, -0.0444, 1e-5);
+ EXPECT_NEAR (output[354].x, -0.07793, 1e-5);
+ EXPECT_NEAR (output[354].y, 0.17516, 1e-5);
+ EXPECT_NEAR (output[354].z, -0.0444, 1e-5);
PassThrough<PointXYZ> pt_(true);
pt_.filter (output);
EXPECT_EQ (pt_.getRemovedIndices()->size(), 0);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
pt_.setFilterLimits (0.05f, 0.1f);
pt_.filter (output);
- EXPECT_EQ (int (output.points.size ()), 42);
- EXPECT_EQ (int (output.width), 42);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()), cloud->points.size ()-pt_.getRemovedIndices()->size());
+ EXPECT_EQ (output.size (), 42);
+ EXPECT_EQ (output.width, 42);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size (), cloud->size ()-pt_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[0].x, -0.074556, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.13415, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.051046, 1e-5);
+ EXPECT_NEAR (output[0].x, -0.074556, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.13415, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.051046, 1e-5);
- EXPECT_NEAR (output.points[41].x, -0.030331, 1e-5);
- EXPECT_NEAR (output.points[41].y, 0.039749, 1e-5);
- EXPECT_NEAR (output.points[41].z, 0.052133, 1e-5);
+ EXPECT_NEAR (output[41].x, -0.030331, 1e-5);
+ EXPECT_NEAR (output[41].y, 0.039749, 1e-5);
+ EXPECT_NEAR (output[41].z, 0.052133, 1e-5);
pt_.setNegative (true);
pt_.filter (output);
- EXPECT_EQ (int (output.points.size ()), 355);
- EXPECT_EQ (int (output.width), 355);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()), cloud->points.size ()-pt_.getRemovedIndices()->size());
+ EXPECT_EQ (output.size (), 355);
+ EXPECT_EQ (output.width, 355);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size (), cloud->size ()-pt_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[0].x, 0.0054216, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.11349, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.040749, 1e-5);
+ EXPECT_NEAR (output[0].x, 0.0054216, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.11349, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.040749, 1e-5);
- EXPECT_NEAR (output.points[354].x, -0.07793, 1e-5);
- EXPECT_NEAR (output.points[354].y, 0.17516, 1e-5);
- EXPECT_NEAR (output.points[354].z, -0.0444, 1e-5);
+ EXPECT_NEAR (output[354].x, -0.07793, 1e-5);
+ EXPECT_NEAR (output[354].y, 0.17516, 1e-5);
+ EXPECT_NEAR (output[354].z, -0.0444, 1e-5);
// Test the keep organized structure
pt.setUserFilterValue (std::numeric_limits<float>::quiet_NaN ());
pt.setFilterFieldName ("");
pt.filter (output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
EXPECT_EQ (output.is_dense, cloud->is_dense);
- EXPECT_NEAR (output.points[0].x, cloud->points[0].x, 1e-5);
- EXPECT_NEAR (output.points[output.points.size () - 1].x, cloud->points[cloud->points.size () - 1].x, 1e-5);
+ EXPECT_NEAR (output[0].x, (*cloud)[0].x, 1e-5);
+ EXPECT_NEAR (output[output.size () - 1].x, (*cloud)[cloud->size () - 1].x, 1e-5);
pt.setFilterFieldName ("z");
pt.setNegative (false);
pt.setKeepOrganized (true);
pt.filter (output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
- EXPECT_EQ (bool (output.is_dense), false); // NaN was set as a user filter value
+ EXPECT_FALSE (output.is_dense); // NaN was set as a user filter value
- EXPECT_TRUE (std::isnan (output.points[0].x));
- EXPECT_TRUE (std::isnan (output.points[0].y));
- EXPECT_TRUE (std::isnan (output.points[0].z));
- EXPECT_TRUE (std::isnan (output.points[41].x));
- EXPECT_TRUE (std::isnan (output.points[41].y));
- EXPECT_TRUE (std::isnan (output.points[41].z));
+ EXPECT_TRUE (std::isnan (output[0].x));
+ EXPECT_TRUE (std::isnan (output[0].y));
+ EXPECT_TRUE (std::isnan (output[0].z));
+ EXPECT_TRUE (std::isnan (output[41].x));
+ EXPECT_TRUE (std::isnan (output[41].y));
+ EXPECT_TRUE (std::isnan (output[41].z));
pt.setNegative (true);
pt.filter (output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
- EXPECT_EQ (bool (output.is_dense), false); // NaN was set as a user filter value
+ EXPECT_FALSE (output.is_dense); // NaN was set as a user filter value
- EXPECT_NEAR (output.points[0].x, cloud->points[0].x, 1e-5);
- EXPECT_NEAR (output.points[0].y, cloud->points[0].y, 1e-5);
- EXPECT_NEAR (output.points[0].z, cloud->points[0].z, 1e-5);
+ EXPECT_NEAR (output[0].x, (*cloud)[0].x, 1e-5);
+ EXPECT_NEAR (output[0].y, (*cloud)[0].y, 1e-5);
+ EXPECT_NEAR (output[0].z, (*cloud)[0].z, 1e-5);
- EXPECT_NEAR (output.points[41].x, cloud->points[41].x, 1e-5);
- EXPECT_NEAR (output.points[41].y, cloud->points[41].y, 1e-5);
- EXPECT_NEAR (output.points[41].z, cloud->points[41].z, 1e-5);
+ EXPECT_NEAR (output[41].x, (*cloud)[41].x, 1e-5);
+ EXPECT_NEAR (output[41].y, (*cloud)[41].y, 1e-5);
+ EXPECT_NEAR (output[41].z, (*cloud)[41].z, 1e-5);
// Test the PCLPointCloud2 method
PassThrough<PCLPointCloud2> pt2;
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 42);
- EXPECT_EQ (int (output.width), 42);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 42);
+ EXPECT_EQ (output.width, 42);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, -0.074556, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.13415, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.051046, 1e-5);
+ EXPECT_NEAR (output[0].x, -0.074556, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.13415, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.051046, 1e-5);
- EXPECT_NEAR (output.points[41].x, -0.030331, 1e-5);
- EXPECT_NEAR (output.points[41].y, 0.039749, 1e-5);
- EXPECT_NEAR (output.points[41].z, 0.052133, 1e-5);
+ EXPECT_NEAR (output[41].x, -0.030331, 1e-5);
+ EXPECT_NEAR (output[41].y, 0.039749, 1e-5);
+ EXPECT_NEAR (output[41].z, 0.052133, 1e-5);
pt2.setNegative (true);
pt2.filter (output_blob);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 355);
- EXPECT_EQ (int (output.width), 355);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 355);
+ EXPECT_EQ (output.width, 355);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, 0.0054216, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.11349, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.040749, 1e-5);
+ EXPECT_NEAR (output[0].x, 0.0054216, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.11349, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.040749, 1e-5);
- EXPECT_NEAR (output.points[354].x, -0.07793, 1e-5);
- EXPECT_NEAR (output.points[354].y, 0.17516, 1e-5);
- EXPECT_NEAR (output.points[354].z, -0.0444, 1e-5);
+ EXPECT_NEAR (output[354].x, -0.07793, 1e-5);
+ EXPECT_NEAR (output[354].y, 0.17516, 1e-5);
+ EXPECT_NEAR (output[354].z, -0.0444, 1e-5);
PassThrough<PCLPointCloud2> pt2_(true);
pt2_.setInputCloud (cloud_blob);
fromPCLPointCloud2 (output_blob, output);
EXPECT_EQ (pt2_.getRemovedIndices()->size(), 0);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 42);
- EXPECT_EQ (int (output.width), 42);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()), cloud->points.size ()-pt2_.getRemovedIndices()->size());
+ EXPECT_EQ (output.size (), 42);
+ EXPECT_EQ (output.width, 42);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size (), cloud->size ()-pt2_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[0].x, -0.074556, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.13415, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.051046, 1e-5);
+ EXPECT_NEAR (output[0].x, -0.074556, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.13415, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.051046, 1e-5);
- EXPECT_NEAR (output.points[41].x, -0.030331, 1e-5);
- EXPECT_NEAR (output.points[41].y, 0.039749, 1e-5);
- EXPECT_NEAR (output.points[41].z, 0.052133, 1e-5);
+ EXPECT_NEAR (output[41].x, -0.030331, 1e-5);
+ EXPECT_NEAR (output[41].y, 0.039749, 1e-5);
+ EXPECT_NEAR (output[41].z, 0.052133, 1e-5);
pt2_.setNegative (true);
pt2_.filter (output_blob);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 355);
- EXPECT_EQ (int (output.width), 355);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()), cloud->points.size ()-pt2_.getRemovedIndices()->size());
+ EXPECT_EQ (output.size (), 355);
+ EXPECT_EQ (output.width, 355);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size (), cloud->size ()-pt2_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[0].x, 0.0054216, 1e-5);
- EXPECT_NEAR (output.points[0].y, 0.11349, 1e-5);
- EXPECT_NEAR (output.points[0].z, 0.040749, 1e-5);
+ EXPECT_NEAR (output[0].x, 0.0054216, 1e-5);
+ EXPECT_NEAR (output[0].y, 0.11349, 1e-5);
+ EXPECT_NEAR (output[0].z, 0.040749, 1e-5);
- EXPECT_NEAR (output.points[354].x, -0.07793, 1e-5);
- EXPECT_NEAR (output.points[354].y, 0.17516, 1e-5);
- EXPECT_NEAR (output.points[354].z, -0.0444, 1e-5);
+ EXPECT_NEAR (output[354].x, -0.07793, 1e-5);
+ EXPECT_NEAR (output[354].y, 0.17516, 1e-5);
+ EXPECT_NEAR (output[354].z, -0.0444, 1e-5);
// Test the keep organized structure
pt2.setUserFilterValue (std::numeric_limits<float>::quiet_NaN ());
pt2.filter (output_blob);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
EXPECT_EQ (output.is_dense, cloud->is_dense);
- EXPECT_NEAR (output.points[0].x, cloud->points[0].x, 1e-5);
- EXPECT_NEAR (output.points[output.points.size () - 1].x, cloud->points[cloud->points.size () - 1].x, 1e-5);
+ EXPECT_NEAR (output[0].x, (*cloud)[0].x, 1e-5);
+ EXPECT_NEAR (output[output.size () - 1].x, (*cloud)[cloud->size () - 1].x, 1e-5);
pt2.setFilterFieldName ("z");
pt2.setNegative (false);
pt2.filter (output_blob);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
- EXPECT_EQ (bool (output.is_dense), false); // NaN was set as a user filter value
+ EXPECT_FALSE (output.is_dense); // NaN was set as a user filter value
- EXPECT_TRUE (std::isnan (output.points[0].x));
- EXPECT_TRUE (std::isnan (output.points[0].y));
- EXPECT_TRUE (std::isnan (output.points[0].z));
+ EXPECT_TRUE (std::isnan (output[0].x));
+ EXPECT_TRUE (std::isnan (output[0].y));
+ EXPECT_TRUE (std::isnan (output[0].z));
- EXPECT_TRUE (std::isnan (output.points[41].x));
- EXPECT_TRUE (std::isnan (output.points[41].y));
- EXPECT_TRUE (std::isnan (output.points[41].z));
+ EXPECT_TRUE (std::isnan (output[41].x));
+ EXPECT_TRUE (std::isnan (output[41].y));
+ EXPECT_TRUE (std::isnan (output[41].z));
pt2.setNegative (true);
pt2.filter (output_blob);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (output.points.size (), cloud->points.size ());
+ EXPECT_EQ (output.size (), cloud->size ());
EXPECT_EQ (output.width, cloud->width);
EXPECT_EQ (output.height, cloud->height);
- EXPECT_EQ (bool (output.is_dense), false); // NaN was set as a user filter value
+ EXPECT_FALSE (output.is_dense); // NaN was set as a user filter value
- EXPECT_NEAR (output.points[0].x, cloud->points[0].x, 1e-5);
- EXPECT_NEAR (output.points[0].y, cloud->points[0].y, 1e-5);
- EXPECT_NEAR (output.points[0].z, cloud->points[0].z, 1e-5);
+ EXPECT_NEAR (output[0].x, (*cloud)[0].x, 1e-5);
+ EXPECT_NEAR (output[0].y, (*cloud)[0].y, 1e-5);
+ EXPECT_NEAR (output[0].z, (*cloud)[0].z, 1e-5);
- EXPECT_NEAR (output.points[41].x, cloud->points[41].x, 1e-5);
- EXPECT_NEAR (output.points[41].y, cloud->points[41].y, 1e-5);
- EXPECT_NEAR (output.points[41].z, cloud->points[41].z, 1e-5);
+ EXPECT_NEAR (output[41].x, (*cloud)[41].x, 1e-5);
+ EXPECT_NEAR (output[41].y, (*cloud)[41].y, 1e-5);
+ EXPECT_NEAR (output[41].z, (*cloud)[41].z, 1e-5);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
grid.setInputCloud (cloud);
grid.filter (output);
- EXPECT_EQ (int (output.points.size ()), 103);
- EXPECT_EQ (int (output.width), 103);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 103);
+ EXPECT_EQ (output.width, 103);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
grid.setFilterFieldName ("z");
grid.setFilterLimits (0.05, 0.1);
grid.filter (output);
- EXPECT_EQ (int (output.points.size ()), 14);
- EXPECT_EQ (int (output.width), 14);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 14);
+ EXPECT_EQ (output.width, 14);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, -0.026125, 1e-4);
- EXPECT_NEAR (output.points[0].y, 0.039788, 1e-4);
- EXPECT_NEAR (output.points[0].z, 0.052827, 1e-4);
+ EXPECT_NEAR (output[0].x, -0.026125, 1e-4);
+ EXPECT_NEAR (output[0].y, 0.039788, 1e-4);
+ EXPECT_NEAR (output[0].z, 0.052827, 1e-4);
- EXPECT_NEAR (output.points[13].x, -0.073202, 1e-4);
- EXPECT_NEAR (output.points[13].y, 0.1296, 1e-4);
- EXPECT_NEAR (output.points[13].z, 0.051333, 1e-4);
+ EXPECT_NEAR (output[13].x, -0.073202, 1e-4);
+ EXPECT_NEAR (output[13].y, 0.1296, 1e-4);
+ EXPECT_NEAR (output[13].z, 0.051333, 1e-4);
grid.setFilterLimitsNegative (true);
grid.setSaveLeafLayout(true);
grid.filter (output);
- EXPECT_EQ (int (output.points.size ()), 100);
- EXPECT_EQ (int (output.width), 100);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 100);
+ EXPECT_EQ (output.width, 100);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- //EXPECT_NEAR (output.points[0].x, -0.070192, 1e-4);
- //EXPECT_NEAR (output.points[0].y, 0.17653, 1e-4);
- //EXPECT_NEAR (output.points[0].z, -0.048774, 1e-4);
+ //EXPECT_NEAR (output[0].x, -0.070192, 1e-4);
+ //EXPECT_NEAR (output[0].y, 0.17653, 1e-4);
+ //EXPECT_NEAR (output[0].z, -0.048774, 1e-4);
- //EXPECT_NEAR (output.points[99].x, -0.068948, 1e-4);
- //EXPECT_NEAR (output.points[99].y, 0.1447, 1e-4);
- //EXPECT_NEAR (output.points[99].z, 0.042178, 1e-4);
+ //EXPECT_NEAR (output[99].x, -0.068948, 1e-4);
+ //EXPECT_NEAR (output[99].y, 0.1447, 1e-4);
+ //EXPECT_NEAR (output[99].z, 0.042178, 1e-4);
// centroids should be identified correctly
- EXPECT_EQ (grid.getCentroidIndex (output.points[0]), 0);
- EXPECT_EQ (grid.getCentroidIndex (output.points[99]), 99);
+ EXPECT_EQ (grid.getCentroidIndex (output[0]), 0);
+ EXPECT_EQ (grid.getCentroidIndex (output[99]), 99);
EXPECT_EQ (grid.getCentroidIndexAt (grid.getGridCoordinates (-1,-1,-1)), -1);
//PCL_ERROR ("IGNORE PREVIOUS ERROR: testing it's functionality!\n");
// input point 195 [0.04872199893, 0.07376000285, 0.01743399911]
- int centroidIdx = grid.getCentroidIndex (cloud->points[195]);
+ int centroidIdx = grid.getCentroidIndex ((*cloud)[195]);
// for arbitrary points, the centroid should be close
- EXPECT_LE (std::abs (output.points[centroidIdx].x - cloud->points[195].x), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx].y - cloud->points[195].y), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx].z - cloud->points[195].z), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx].x - (*cloud)[195].x), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx].y - (*cloud)[195].y), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx].z - (*cloud)[195].z), 0.02);
// if getNeighborCentroidIndices works then the other helper functions work as well
- EXPECT_EQ (grid.getNeighborCentroidIndices (output.points[0], Eigen::MatrixXi::Zero(3,1))[0], 0);
- EXPECT_EQ (grid.getNeighborCentroidIndices (output.points[99], Eigen::MatrixXi::Zero(3,1))[0], 99);
+ EXPECT_EQ (grid.getNeighborCentroidIndices (output[0], Eigen::MatrixXi::Zero(3,1))[0], 0);
+ EXPECT_EQ (grid.getNeighborCentroidIndices (output[99], Eigen::MatrixXi::Zero(3,1))[0], 99);
// neighboring centroid should be in the right position
Eigen::MatrixXi directions = Eigen::Vector3i (0, 0, 1);
- std::vector<int> neighbors = grid.getNeighborCentroidIndices (cloud->points[195], directions);
+ std::vector<int> neighbors = grid.getNeighborCentroidIndices ((*cloud)[195], directions);
EXPECT_EQ (neighbors.size (), std::size_t (directions.cols ()));
EXPECT_NE (neighbors.at (0), -1);
- EXPECT_LE (std::abs (output.points[neighbors.at (0)].x - output.points[centroidIdx].x), 0.02);
- EXPECT_LE (std::abs (output.points[neighbors.at (0)].y - output.points[centroidIdx].y), 0.02);
- EXPECT_LE ( output.points[neighbors.at (0)].z - output.points[centroidIdx].z, 0.02 * 2);
+ EXPECT_LE (std::abs (output[neighbors.at (0)].x - output[centroidIdx].x), 0.02);
+ EXPECT_LE (std::abs (output[neighbors.at (0)].y - output[centroidIdx].y), 0.02);
+ EXPECT_LE ( output[neighbors.at (0)].z - output[centroidIdx].z, 0.02 * 2);
// Test the pcl::PCLPointCloud2 method
VoxelGrid<PCLPointCloud2> grid2;
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 103);
- EXPECT_EQ (int (output.width), 103);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 103);
+ EXPECT_EQ (output.width, 103);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
grid2.setFilterFieldName ("z");
grid2.setFilterLimits (0.05, 0.1);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 14);
- EXPECT_EQ (int (output.width), 14);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 14);
+ EXPECT_EQ (output.width, 14);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, -0.026125, 1e-4);
- EXPECT_NEAR (output.points[0].y, 0.039788, 1e-4);
- EXPECT_NEAR (output.points[0].z, 0.052827, 1e-4);
+ EXPECT_NEAR (output[0].x, -0.026125, 1e-4);
+ EXPECT_NEAR (output[0].y, 0.039788, 1e-4);
+ EXPECT_NEAR (output[0].z, 0.052827, 1e-4);
- EXPECT_NEAR (output.points[13].x, -0.073202, 1e-4);
- EXPECT_NEAR (output.points[13].y, 0.1296, 1e-4);
- EXPECT_NEAR (output.points[13].z, 0.051333, 1e-4);
+ EXPECT_NEAR (output[13].x, -0.073202, 1e-4);
+ EXPECT_NEAR (output[13].y, 0.1296, 1e-4);
+ EXPECT_NEAR (output[13].z, 0.051333, 1e-4);
grid2.setFilterLimitsNegative (true);
grid2.setSaveLeafLayout(true);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 100);
- EXPECT_EQ (int (output.width), 100);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 100);
+ EXPECT_EQ (output.width, 100);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- //EXPECT_NEAR (output.points[0].x, -0.070192, 1e-4);
- //EXPECT_NEAR (output.points[0].y, 0.17653, 1e-4);
- //EXPECT_NEAR (output.points[0].z, -0.048774, 1e-4);
+ //EXPECT_NEAR (output[0].x, -0.070192, 1e-4);
+ //EXPECT_NEAR (output[0].y, 0.17653, 1e-4);
+ //EXPECT_NEAR (output[0].z, -0.048774, 1e-4);
- //EXPECT_NEAR (output.points[99].x, -0.068948, 1e-4);
- //EXPECT_NEAR (output.points[99].y, 0.1447, 1e-4);
- //EXPECT_NEAR (output.points[99].z, 0.042178, 1e-4);
+ //EXPECT_NEAR (output[99].x, -0.068948, 1e-4);
+ //EXPECT_NEAR (output[99].y, 0.1447, 1e-4);
+ //EXPECT_NEAR (output[99].z, 0.042178, 1e-4);
// centroids should be identified correctly
- EXPECT_EQ (grid2.getCentroidIndex (output.points[0].x, output.points[0].y, output.points[0].z), 0);
- EXPECT_EQ (grid2.getCentroidIndex (output.points[99].x, output.points[99].y, output.points[99].z), 99);
+ EXPECT_EQ (grid2.getCentroidIndex (output[0].x, output[0].y, output[0].z), 0);
+ EXPECT_EQ (grid2.getCentroidIndex (output[99].x, output[99].y, output[99].z), 99);
EXPECT_EQ (grid2.getCentroidIndexAt (grid2.getGridCoordinates (-1,-1,-1)), -1);
//PCL_ERROR ("IGNORE PREVIOUS ERROR: testing it's functionality!\n");
EXPECT_NE (centroidIdx2, -1);
// for arbitrary points, the centroid should be close
- EXPECT_LE (std::abs (output.points[centroidIdx2].x - 0.048722), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx2].y - 0.073760), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx2].z - 0.017434), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx2].x - 0.048722), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx2].y - 0.073760), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx2].z - 0.017434), 0.02);
// if getNeighborCentroidIndices works then the other helper functions work as well
- EXPECT_EQ (grid2.getNeighborCentroidIndices (output.points[0].x, output.points[0].y, output.points[0].z, Eigen::MatrixXi::Zero(3,1))[0], 0);
- EXPECT_EQ (grid2.getNeighborCentroidIndices (output.points[99].x, output.points[99].y, output.points[99].z, Eigen::MatrixXi::Zero(3,1))[0], 99);
+ EXPECT_EQ (grid2.getNeighborCentroidIndices (output[0].x, output[0].y, output[0].z, Eigen::MatrixXi::Zero(3,1))[0], 0);
+ EXPECT_EQ (grid2.getNeighborCentroidIndices (output[99].x, output[99].y, output[99].z, Eigen::MatrixXi::Zero(3,1))[0], 99);
// neighboring centroid should be in the right position
Eigen::MatrixXi directions2 = Eigen::Vector3i (0, 0, 1);
std::vector<int> neighbors2 = grid2.getNeighborCentroidIndices (0.048722f, 0.073760f, 0.017434f, directions2);
EXPECT_EQ (neighbors2.size (), std::size_t (directions2.cols ()));
EXPECT_NE (neighbors2.at (0), -1);
- EXPECT_LE (std::abs (output.points[neighbors2.at (0)].x - output.points[centroidIdx2].x), 0.02);
- EXPECT_LE (std::abs (output.points[neighbors2.at (0)].y - output.points[centroidIdx2].y), 0.02);
- EXPECT_LE (output.points[neighbors2.at (0)].z - output.points[centroidIdx2].z, 0.02 * 2);
+ EXPECT_LE (std::abs (output[neighbors2.at (0)].x - output[centroidIdx2].x), 0.02);
+ EXPECT_LE (std::abs (output[neighbors2.at (0)].y - output[centroidIdx2].y), 0.02);
+ EXPECT_LE (output[neighbors2.at (0)].z - output[centroidIdx2].z, 0.02 * 2);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
grid.setInputCloud (cloud);
grid.filter (output);
- EXPECT_EQ (int (output.points.size ()), 103);
- EXPECT_EQ (int (output.width), 103);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 103);
+ EXPECT_EQ (output.width, 103);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
grid.setFilterFieldName ("z");
grid.setFilterLimits (0.05, 0.1);
grid.filter (output);
- EXPECT_EQ (int (output.points.size ()), 14);
- EXPECT_EQ (int (output.width), 14);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 14);
+ EXPECT_EQ (output.width, 14);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, -0.026125, 1e-4);
- EXPECT_NEAR (output.points[0].y, 0.039788, 1e-4);
- EXPECT_NEAR (output.points[0].z, 0.052827, 1e-4);
+ EXPECT_NEAR (output[0].x, -0.026125, 1e-4);
+ EXPECT_NEAR (output[0].y, 0.039788, 1e-4);
+ EXPECT_NEAR (output[0].z, 0.052827, 1e-4);
- EXPECT_NEAR (output.points[13].x, -0.073202, 1e-4);
- EXPECT_NEAR (output.points[13].y, 0.1296, 1e-4);
- EXPECT_NEAR (output.points[13].z, 0.051333, 1e-4);
+ EXPECT_NEAR (output[13].x, -0.073202, 1e-4);
+ EXPECT_NEAR (output[13].y, 0.1296, 1e-4);
+ EXPECT_NEAR (output[13].z, 0.051333, 1e-4);
grid.setFilterLimitsNegative (true);
grid.setSaveLeafLayout(true);
grid.filter (output);
- EXPECT_EQ (int (output.points.size ()), 100);
- EXPECT_EQ (int (output.width), 100);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 100);
+ EXPECT_EQ (output.width, 100);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
// centroids should be identified correctly
- EXPECT_EQ (grid.getCentroidIndex (output.points[0]), 0);
- EXPECT_EQ (grid.getCentroidIndex (output.points[99]), 99);
+ EXPECT_EQ (grid.getCentroidIndex (output[0]), 0);
+ EXPECT_EQ (grid.getCentroidIndex (output[99]), 99);
EXPECT_EQ (grid.getCentroidIndexAt (grid.getGridCoordinates (-1,-1,-1)), -1);
//PCL_ERROR ("IGNORE PREVIOUS ERROR: testing it's functionality!\n");
// input point 195 [0.04872199893, 0.07376000285, 0.01743399911]
- int centroidIdx = grid.getCentroidIndex (cloud->points[195]);
+ int centroidIdx = grid.getCentroidIndex ((*cloud)[195]);
// for arbitrary points, the centroid should be close
- EXPECT_LE (std::abs (output.points[centroidIdx].x - cloud->points[195].x), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx].y - cloud->points[195].y), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx].z - cloud->points[195].z), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx].x - (*cloud)[195].x), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx].y - (*cloud)[195].y), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx].z - (*cloud)[195].z), 0.02);
// if getNeighborCentroidIndices works then the other helper functions work as well
- EXPECT_EQ (grid.getNeighborCentroidIndices (output.points[0], Eigen::MatrixXi::Zero(3,1))[0], 0);
- EXPECT_EQ (grid.getNeighborCentroidIndices (output.points[99], Eigen::MatrixXi::Zero(3,1))[0], 99);
+ EXPECT_EQ (grid.getNeighborCentroidIndices (output[0], Eigen::MatrixXi::Zero(3,1))[0], 0);
+ EXPECT_EQ (grid.getNeighborCentroidIndices (output[99], Eigen::MatrixXi::Zero(3,1))[0], 99);
// neighboring centroid should be in the right position
Eigen::MatrixXi directions = Eigen::Vector3i (0, 0, 1);
- std::vector<int> neighbors = grid.getNeighborCentroidIndices (cloud->points[195], directions);
+ std::vector<int> neighbors = grid.getNeighborCentroidIndices ((*cloud)[195], directions);
EXPECT_EQ (neighbors.size (), std::size_t (directions.cols ()));
EXPECT_NE (neighbors.at (0), -1);
- EXPECT_LE (std::abs (output.points[neighbors.at (0)].x - output.points[centroidIdx].x), 0.02);
- EXPECT_LE (std::abs (output.points[neighbors.at (0)].y - output.points[centroidIdx].y), 0.02);
- EXPECT_LE ( output.points[neighbors.at (0)].z - output.points[centroidIdx].z, 0.02 * 2);
+ EXPECT_LE (std::abs (output[neighbors.at (0)].x - output[centroidIdx].x), 0.02);
+ EXPECT_LE (std::abs (output[neighbors.at (0)].y - output[centroidIdx].y), 0.02);
+ EXPECT_LE ( output[neighbors.at (0)].z - output[centroidIdx].z, 0.02 * 2);
// Test the pcl::PCLPointCloud2 method
VoxelGrid<PCLPointCloud2> grid2;
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 103);
- EXPECT_EQ (int (output.width), 103);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 103);
+ EXPECT_EQ (output.width, 103);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
grid2.setFilterFieldName ("z");
grid2.setFilterLimits (0.05, 0.1);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 14);
- EXPECT_EQ (int (output.width), 14);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 14);
+ EXPECT_EQ (output.width, 14);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, -0.026125, 1e-4);
- EXPECT_NEAR (output.points[0].y, 0.039788, 1e-4);
- EXPECT_NEAR (output.points[0].z, 0.052827, 1e-4);
+ EXPECT_NEAR (output[0].x, -0.026125, 1e-4);
+ EXPECT_NEAR (output[0].y, 0.039788, 1e-4);
+ EXPECT_NEAR (output[0].z, 0.052827, 1e-4);
- EXPECT_NEAR (output.points[13].x, -0.073202, 1e-4);
- EXPECT_NEAR (output.points[13].y, 0.1296, 1e-4);
- EXPECT_NEAR (output.points[13].z, 0.051333, 1e-4);
+ EXPECT_NEAR (output[13].x, -0.073202, 1e-4);
+ EXPECT_NEAR (output[13].y, 0.1296, 1e-4);
+ EXPECT_NEAR (output[13].z, 0.051333, 1e-4);
grid2.setFilterLimitsNegative (true);
grid2.setSaveLeafLayout(true);
fromPCLPointCloud2 (output_blob, output);
- EXPECT_EQ (int (output.points.size ()), 100);
- EXPECT_EQ (int (output.width), 100);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 100);
+ EXPECT_EQ (output.width, 100);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
// centroids should be identified correctly
- EXPECT_EQ (grid2.getCentroidIndex (output.points[0].x, output.points[0].y, output.points[0].z), 0);
- EXPECT_EQ (grid2.getCentroidIndex (output.points[99].x, output.points[99].y, output.points[99].z), 99);
+ EXPECT_EQ (grid2.getCentroidIndex (output[0].x, output[0].y, output[0].z), 0);
+ EXPECT_EQ (grid2.getCentroidIndex (output[99].x, output[99].y, output[99].z), 99);
EXPECT_EQ (grid2.getCentroidIndexAt (grid2.getGridCoordinates (-1,-1,-1)), -1);
//PCL_ERROR ("IGNORE PREVIOUS ERROR: testing it's functionality!\n");
EXPECT_NE (centroidIdx2, -1);
// for arbitrary points, the centroid should be close
- EXPECT_LE (std::abs (output.points[centroidIdx2].x - 0.048722), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx2].y - 0.073760), 0.02);
- EXPECT_LE (std::abs (output.points[centroidIdx2].z - 0.017434), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx2].x - 0.048722), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx2].y - 0.073760), 0.02);
+ EXPECT_LE (std::abs (output[centroidIdx2].z - 0.017434), 0.02);
// if getNeighborCentroidIndices works then the other helper functions work as well
- EXPECT_EQ (grid2.getNeighborCentroidIndices (output.points[0].x, output.points[0].y, output.points[0].z, Eigen::MatrixXi::Zero(3,1))[0], 0);
- EXPECT_EQ (grid2.getNeighborCentroidIndices (output.points[99].x, output.points[99].y, output.points[99].z, Eigen::MatrixXi::Zero(3,1))[0], 99);
+ EXPECT_EQ (grid2.getNeighborCentroidIndices (output[0].x, output[0].y, output[0].z, Eigen::MatrixXi::Zero(3,1))[0], 0);
+ EXPECT_EQ (grid2.getNeighborCentroidIndices (output[99].x, output[99].y, output[99].z, Eigen::MatrixXi::Zero(3,1))[0], 99);
// neighboring centroid should be in the right position
Eigen::MatrixXi directions2 = Eigen::Vector3i (0, 0, 1);
std::vector<int> neighbors2 = grid2.getNeighborCentroidIndices (0.048722f, 0.073760f, 0.017434f, directions2);
EXPECT_EQ (neighbors2.size (), std::size_t (directions2.cols ()));
EXPECT_NE (neighbors2.at (0), -1);
- EXPECT_LE (std::abs (output.points[neighbors2.at (0)].x - output.points[centroidIdx2].x), 0.02);
- EXPECT_LE (std::abs (output.points[neighbors2.at (0)].y - output.points[centroidIdx2].y), 0.02);
- EXPECT_LE (output.points[neighbors2.at (0)].z - output.points[centroidIdx2].z, 0.02 * 2);
+ EXPECT_LE (std::abs (output[neighbors2.at (0)].x - output[centroidIdx2].x), 0.02);
+ EXPECT_LE (std::abs (output[neighbors2.at (0)].y - output[centroidIdx2].y), 0.02);
+ EXPECT_LE (output[neighbors2.at (0)].z - output[centroidIdx2].z, 0.02 * 2);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
grid_rgb.setInputCloud (cloud_rgb_ptr_);
grid_rgb.filter (output_rgb);
- EXPECT_EQ (int (output_rgb.points.size ()), 1);
- EXPECT_EQ (int (output_rgb.width), 1);
- EXPECT_EQ (int (output_rgb.height), 1);
- EXPECT_EQ (bool (output_rgb.is_dense), true);
+ EXPECT_EQ (output_rgb.size (), 1);
+ EXPECT_EQ (output_rgb.width, 1);
+ EXPECT_EQ (output_rgb.height, 1);
+ EXPECT_TRUE (output_rgb.is_dense);
{
int rgb;
int r,g,b;
- memcpy (&rgb, &(output_rgb.points[0].rgb), sizeof(int));
+ memcpy (&rgb, &(output_rgb[0].rgb), sizeof(int));
r = (rgb >> 16) & 0xFF; g = (rgb >> 8 ) & 0xFF; b = (rgb >> 0 ) & 0xFF;
- EXPECT_NEAR (output_rgb.points[0].x, 0.0, 1e-4);
- EXPECT_NEAR (output_rgb.points[0].y, 0.0, 1e-4);
- EXPECT_NEAR (output_rgb.points[0].z, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgb[0].x, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgb[0].y, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgb[0].z, 0.0, 1e-4);
EXPECT_NEAR (r, ave_r, 1.0);
EXPECT_NEAR (g, ave_g, 1.0);
EXPECT_NEAR (b, ave_b, 1.0);
fromPCLPointCloud2 (output_rgb_blob, output_rgb);
- EXPECT_EQ (int (output_rgb.points.size ()), 1);
- EXPECT_EQ (int (output_rgb.width), 1);
- EXPECT_EQ (int (output_rgb.height), 1);
- EXPECT_EQ (bool (output_rgb.is_dense), true);
+ EXPECT_EQ (output_rgb.size (), 1);
+ EXPECT_EQ (output_rgb.width, 1);
+ EXPECT_EQ (output_rgb.height, 1);
+ EXPECT_TRUE (output_rgb.is_dense);
{
int rgb;
int r,g,b;
- memcpy (&rgb, &(output_rgb.points[0].rgb), sizeof(int));
+ memcpy (&rgb, &(output_rgb[0].rgb), sizeof(int));
r = (rgb >> 16) & 0xFF; g = (rgb >> 8 ) & 0xFF; b = (rgb >> 0 ) & 0xFF;
- EXPECT_NEAR (output_rgb.points[0].x, 0.0, 1e-4);
- EXPECT_NEAR (output_rgb.points[0].y, 0.0, 1e-4);
- EXPECT_NEAR (output_rgb.points[0].z, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgb[0].x, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgb[0].y, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgb[0].z, 0.0, 1e-4);
EXPECT_NEAR (r, ave_r, 1.0);
EXPECT_NEAR (g, ave_g, 1.0);
EXPECT_NEAR (b, ave_b, 1.0);
grid_rgba.setInputCloud (cloud_rgba_ptr_);
grid_rgba.filter (output_rgba);
- EXPECT_EQ (int (output_rgba.points.size ()), 1);
- EXPECT_EQ (int (output_rgba.width), 1);
- EXPECT_EQ (int (output_rgba.height), 1);
- EXPECT_EQ (bool (output_rgba.is_dense), true);
+ EXPECT_EQ (output_rgba.size (), 1);
+ EXPECT_EQ (output_rgba.width, 1);
+ EXPECT_EQ (output_rgba.height, 1);
+ EXPECT_TRUE (output_rgba.is_dense);
{
int rgba;
int r,g,b,a;
- memcpy (&rgba, &(output_rgba.points[0].rgba), sizeof(int));
+ memcpy (&rgba, &(output_rgba[0].rgba), sizeof(int));
a = (rgba >> 24) & 0xFF; r = (rgba >> 16) & 0xFF; g = (rgba >> 8 ) & 0xFF; b = (rgba >> 0 ) & 0xFF;
- EXPECT_NEAR (output_rgba.points[0].x, 0.0, 1e-4);
- EXPECT_NEAR (output_rgba.points[0].y, 0.0, 1e-4);
- EXPECT_NEAR (output_rgba.points[0].z, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgba[0].x, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgba[0].y, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgba[0].z, 0.0, 1e-4);
EXPECT_NEAR (r, ave_r, 1.0);
EXPECT_NEAR (g, ave_g, 1.0);
EXPECT_NEAR (b, ave_b, 1.0);
fromPCLPointCloud2 (output_rgba_blob, output_rgba);
- EXPECT_EQ (int (output_rgba.points.size ()), 1);
- EXPECT_EQ (int (output_rgba.width), 1);
- EXPECT_EQ (int (output_rgba.height), 1);
- EXPECT_EQ (bool (output_rgba.is_dense), true);
+ EXPECT_EQ (output_rgba.size (), 1);
+ EXPECT_EQ (output_rgba.width, 1);
+ EXPECT_EQ (output_rgba.height, 1);
+ EXPECT_TRUE (output_rgba.is_dense);
{
int rgba;
int r,g,b,a;
- memcpy (&rgba, &(output_rgba.points[0].rgba), sizeof(int));
+ memcpy (&rgba, &(output_rgba[0].rgba), sizeof(int));
a = (rgba >> 24) & 0xFF; r = (rgba >> 16) & 0xFF; g = (rgba >> 8 ) & 0xFF; b = (rgba >> 0 ) & 0xFF;
- EXPECT_NEAR (output_rgba.points[0].x, 0.0, 1e-4);
- EXPECT_NEAR (output_rgba.points[0].y, 0.0, 1e-4);
- EXPECT_NEAR (output_rgba.points[0].z, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgba[0].x, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgba[0].y, 0.0, 1e-4);
+ EXPECT_NEAR (output_rgba[0].z, 0.0, 1e-4);
EXPECT_NEAR (r, ave_r, 1.0);
EXPECT_NEAR (g, ave_g, 1.0);
EXPECT_NEAR (b, ave_b, 1.0);
grid.setInputCloud (cloud);
grid.filter (output);
- EXPECT_EQ (int (output.points.size ()), 23);
- EXPECT_EQ (int (output.width), 23);
- EXPECT_EQ (int (output.height), 1);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_EQ (output.size (), 23);
+ EXPECT_EQ (output.width, 23);
+ EXPECT_EQ (output.height, 1);
+ EXPECT_TRUE (output.is_dense);
- EXPECT_NEAR (output.points[0].x, -0.073619894683361053, 1e-4);
- EXPECT_NEAR (output.points[0].y, 0.16789889335632324, 1e-4);
- EXPECT_NEAR (output.points[0].z, -0.03018110990524292, 1e-4);
+ EXPECT_NEAR (output[0].x, -0.073619894683361053, 1e-4);
+ EXPECT_NEAR (output[0].y, 0.16789889335632324, 1e-4);
+ EXPECT_NEAR (output[0].z, -0.03018110990524292, 1e-4);
- EXPECT_NEAR (output.points[13].x, -0.06865914911031723, 1e-4);
- EXPECT_NEAR (output.points[13].y, 0.15243285894393921, 1e-4);
- EXPECT_NEAR (output.points[13].z, 0.03266800194978714, 1e-4);
+ EXPECT_NEAR (output[13].x, -0.06865914911031723, 1e-4);
+ EXPECT_NEAR (output[13].y, 0.15243285894393921, 1e-4);
+ EXPECT_NEAR (output[13].z, 0.03266800194978714, 1e-4);
grid.setSaveLeafLayout (true);
grid.filter (output);
// centroids should be identified correctly
- EXPECT_EQ (grid.getCentroidIndex (output.points[0]), 0);
- EXPECT_EQ (grid.getCentroidIndex (output.points[17]), 17);
+ EXPECT_EQ (grid.getCentroidIndex (output[0]), 0);
+ EXPECT_EQ (grid.getCentroidIndex (output[17]), 17);
EXPECT_EQ (grid.getCentroidIndexAt (grid.getGridCoordinates (-1,-1,-1)), -1);
//PCL_ERROR ("IGNORE PREVIOUS ERROR: testing it's functionality!\n");
// input point 38 [-0.066091, 0.11973, 0.050881]
- int centroidIdx = grid.getCentroidIndex (cloud->points[38]);
+ int centroidIdx = grid.getCentroidIndex ((*cloud)[38]);
EXPECT_NE (centroidIdx, -1);
// if getNeighborCentroidIndices works then the other helper functions work as well
- EXPECT_EQ (grid.getNeighborCentroidIndices (output.points[0], Eigen::MatrixXi::Zero(3,1))[0], 0);
- EXPECT_EQ (grid.getNeighborCentroidIndices (output.points[17], Eigen::MatrixXi::Zero(3,1))[0], 17);
+ EXPECT_EQ (grid.getNeighborCentroidIndices (output[0], Eigen::MatrixXi::Zero(3,1))[0], 0);
+ EXPECT_EQ (grid.getNeighborCentroidIndices (output[17], Eigen::MatrixXi::Zero(3,1))[0], 17);
// neighboring centroid should be in the right position
Eigen::MatrixXi directions = Eigen::Vector3i (0, 1, 0);
- std::vector<int> neighbors = grid.getNeighborCentroidIndices (cloud->points[38], directions);
+ std::vector<int> neighbors = grid.getNeighborCentroidIndices ((*cloud)[38], directions);
EXPECT_EQ (neighbors.size (), std::size_t (directions.cols ()));
EXPECT_NE (neighbors.at (0), -1);
- EXPECT_LE (std::abs (output.points[neighbors.at (0)].x - output.points[centroidIdx].x), 0.02);
- EXPECT_LE (std::abs (output.points[neighbors.at (0)].y - output.points[centroidIdx].y), 0.02);
- EXPECT_LE (output.points[neighbors.at (0)].z - output.points[centroidIdx].z, 0.02 * 2);
+ EXPECT_LE (std::abs (output[neighbors.at (0)].x - output[centroidIdx].x), 0.02);
+ EXPECT_LE (std::abs (output[neighbors.at (0)].y - output[centroidIdx].y), 0.02);
+ EXPECT_LE (output[neighbors.at (0)].z - output[centroidIdx].z, 0.02 * 2);
// testing search functions
grid.setSaveLeafLayout (false);
std::vector<float> distances;
grid.nearestKSearch (PointXYZ(0,1,0), 1, leaves, distances);
- EXPECT_EQ (int (leaves.size ()), 1);
+ EXPECT_EQ (leaves.size (), 1);
EXPECT_NEAR (leaves[0]->getMean ()[0], -0.0284687, 1e-4);
EXPECT_NEAR (leaves[0]->getMean ()[1], 0.170919, 1e-4);
outrem.setMinNeighborsInRadius (14);
outrem.filter (cloud_out);
- EXPECT_EQ (int (cloud_out.points.size ()), 307);
- EXPECT_EQ (int (cloud_out.width), 307);
- EXPECT_EQ (bool (cloud_out.is_dense), true);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].x, -0.077893, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].y, 0.16039, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].z, -0.021299, 1e-4);
+ EXPECT_EQ (cloud_out.size (), 307);
+ EXPECT_EQ (cloud_out.width, 307);
+ EXPECT_TRUE (cloud_out.is_dense);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].x, -0.077893, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].y, 0.16039, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].z, -0.021299, 1e-4);
// Test the pcl::PCLPointCloud2 method
PCLPointCloud2 cloud_out2;
outrem2.filter (cloud_out2);
fromPCLPointCloud2 (cloud_out2, cloud_out);
- EXPECT_EQ (int (cloud_out.points.size ()), 307);
- EXPECT_EQ (int (cloud_out.width), 307);
- EXPECT_EQ (bool (cloud_out.is_dense), true);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].x, -0.077893, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].y, 0.16039, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].z, -0.021299, 1e-4);
+ EXPECT_EQ (cloud_out.size (), 307);
+ EXPECT_EQ (cloud_out.width, 307);
+ EXPECT_TRUE (cloud_out.is_dense);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].x, -0.077893, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].y, 0.16039, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].z, -0.021299, 1e-4);
// Remove outliers using a spherical density criterion
RadiusOutlierRemoval<PointXYZ> outrem_(true);
outrem_.setMinNeighborsInRadius (14);
outrem_.filter (cloud_out);
- EXPECT_EQ (int (cloud_out.points.size ()), 307);
- EXPECT_EQ (int (cloud_out.width), 307);
- EXPECT_EQ (bool (cloud_out.is_dense), true);
- EXPECT_EQ (int (cloud_out.points.size ()), cloud->points.size ()-outrem_.getRemovedIndices()->size());
+ EXPECT_EQ (cloud_out.size (), 307);
+ EXPECT_EQ (cloud_out.width, 307);
+ EXPECT_TRUE (cloud_out.is_dense);
+ EXPECT_EQ (cloud_out.size (), cloud->size ()-outrem_.getRemovedIndices()->size());
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].x, -0.077893, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].y, 0.16039, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].z, -0.021299, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].x, -0.077893, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].y, 0.16039, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].z, -0.021299, 1e-4);
// Test the pcl::PCLPointCloud2 method
RadiusOutlierRemoval<PCLPointCloud2> outrem2_(true);
outrem2_.filter (cloud_out2);
fromPCLPointCloud2 (cloud_out2, cloud_out);
- EXPECT_EQ (int (cloud_out.points.size ()), 307);
- EXPECT_EQ (int (cloud_out.width), 307);
- EXPECT_EQ (bool (cloud_out.is_dense), true);
- EXPECT_EQ (int (cloud_out.points.size ()), cloud_blob->width*cloud_blob->height-outrem2_.getRemovedIndices()->size());
-
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].x, -0.077893, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].y, 0.16039, 1e-4);
- EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].z, -0.021299, 1e-4);
+ EXPECT_EQ (cloud_out.size (), 307);
+ EXPECT_EQ (cloud_out.width, 307);
+ EXPECT_TRUE (cloud_out.is_dense);
+ EXPECT_EQ (cloud_out.size (), cloud_blob->width*cloud_blob->height-outrem2_.getRemovedIndices()->size());
+
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].x, -0.077893, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].y, 0.16039, 1e-4);
+ EXPECT_NEAR (cloud_out[cloud_out.size () - 1].z, -0.021299, 1e-4);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
outrem.setStddevMulThresh (1.0);
outrem.filter (output);
- EXPECT_EQ (int (output.points.size ()), 352);
- EXPECT_EQ (int (output.width), 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.034667, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.15131, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.00071029, 1e-4);
+ EXPECT_EQ (output.size (), 352);
+ EXPECT_EQ (output.width, 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_NEAR (output[output.size () - 1].x, -0.034667, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.15131, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.00071029, 1e-4);
outrem.setNegative (true);
outrem.filter (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()) - 352);
- EXPECT_EQ (int (output.width), int (cloud->width) - 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.07793, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.17516, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.0444, 1e-4);
+ EXPECT_EQ (output.size (), cloud->size () - 352);
+ EXPECT_EQ (output.width, cloud->width - 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_NEAR (output[output.size () - 1].x, -0.07793, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.17516, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.0444, 1e-4);
// Test the pcl::PCLPointCloud2 method
PCLPointCloud2 output2;
fromPCLPointCloud2 (output2, output);
- EXPECT_EQ (int (output.points.size ()), 352);
- EXPECT_EQ (int (output.width), 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.034667, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.15131, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.00071029, 1e-4);
+ EXPECT_EQ (output.size (), 352);
+ EXPECT_EQ (output.width, 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_NEAR (output[output.size () - 1].x, -0.034667, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.15131, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.00071029, 1e-4);
outrem2.setNegative (true);
outrem2.filter (output2);
fromPCLPointCloud2 (output2, output);
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()) - 352);
- EXPECT_EQ (int (output.width), int (cloud->width) - 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.07793, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.17516, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.0444, 1e-4);
+ EXPECT_EQ (output.size (), cloud->size () - 352);
+ EXPECT_EQ (output.width, cloud->width - 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_NEAR (output[output.size () - 1].x, -0.07793, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.17516, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.0444, 1e-4);
// Remove outliers using a spherical density criterion
StatisticalOutlierRemoval<PointXYZ> outrem_(true);
outrem_.setStddevMulThresh (1.0);
outrem_.filter (output);
- EXPECT_EQ (int (output.points.size ()), 352);
- EXPECT_EQ (int (output.width), 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()), cloud->points.size ()-outrem_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.034667, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.15131, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.00071029, 1e-4);
+ EXPECT_EQ (output.size (), 352);
+ EXPECT_EQ (output.width, 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size (), cloud->size ()-outrem_.getRemovedIndices()->size());
+ EXPECT_NEAR (output[output.size () - 1].x, -0.034667, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.15131, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.00071029, 1e-4);
outrem_.setNegative (true);
outrem_.filter (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()) - 352);
- EXPECT_EQ (int (output.width), int (cloud->width) - 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()) ,cloud->points.size ()-outrem_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.07793, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.17516, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.0444, 1e-4);
+ EXPECT_EQ (output.size (), cloud->size () - 352);
+ EXPECT_EQ (output.width, cloud->width - 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size () ,cloud->size ()-outrem_.getRemovedIndices()->size());
+ EXPECT_NEAR (output[output.size () - 1].x, -0.07793, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.17516, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.0444, 1e-4);
// Test the pcl::PCLPointCloud2 method
StatisticalOutlierRemoval<PCLPointCloud2> outrem2_(true);
fromPCLPointCloud2 (output2, output);
- EXPECT_EQ (int (output.points.size ()), 352);
- EXPECT_EQ (int (output.width), 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()), cloud_blob->width*cloud_blob->height-outrem2_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.034667, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.15131, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.00071029, 1e-4);
+ EXPECT_EQ (output.size (), 352);
+ EXPECT_EQ (output.width, 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size (), cloud_blob->width*cloud_blob->height-outrem2_.getRemovedIndices()->size());
+ EXPECT_NEAR (output[output.size () - 1].x, -0.034667, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.15131, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.00071029, 1e-4);
outrem2_.setNegative (true);
outrem2_.filter (output2);
fromPCLPointCloud2 (output2, output);
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()) - 352);
- EXPECT_EQ (int (output.width), int (cloud->width) - 352);
- EXPECT_EQ (bool (output.is_dense), true);
- EXPECT_EQ (int (output.points.size ()), cloud_blob->width*cloud_blob->height-outrem2_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.07793, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.17516, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, -0.0444, 1e-4);
+ EXPECT_EQ (output.size (), cloud->size () - 352);
+ EXPECT_EQ (output.width, cloud->width - 352);
+ EXPECT_TRUE (output.is_dense);
+ EXPECT_EQ (output.size (), cloud_blob->width*cloud_blob->height-outrem2_.getRemovedIndices()->size());
+ EXPECT_NEAR (output[output.size () - 1].x, -0.07793, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.17516, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, -0.0444, 1e-4);
}
//////////////////////////////////////////////////////////////////////////////////////////////
condrem.setKeepOrganized (false);
condrem.filter (output);
- EXPECT_EQ (bool (output.isOrganized ()), false);
- EXPECT_EQ (int (output.points.size ()), 28);
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.087292, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.103140, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, 0.020825, 1e-4);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_FALSE (output.isOrganized ());
+ EXPECT_EQ (output.size (), 28);
+ EXPECT_NEAR (output[output.size () - 1].x, -0.087292, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.103140, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, 0.020825, 1e-4);
+ EXPECT_TRUE (output.is_dense);
// try the not dense version
condrem.setKeepOrganized (true);
num_not_nan++;
}
- EXPECT_EQ (bool (output.isOrganized ()), bool (cloud->isOrganized ()));
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()));
- EXPECT_EQ (int (output.width), int (cloud->width));
- EXPECT_EQ (int (output.height), int (cloud->height));
+ EXPECT_EQ (output.isOrganized (), cloud->isOrganized ());
+ EXPECT_EQ (output.size (), cloud->size ());
+ EXPECT_EQ (output.width, cloud->width);
+ EXPECT_EQ (output.height, cloud->height);
EXPECT_EQ (num_not_nan, 28);
- EXPECT_EQ (bool (output.is_dense), false);
+ EXPECT_FALSE (output.is_dense);
// build the filter
ConditionalRemoval<PointXYZ> condrem_ (true);
condrem_.setKeepOrganized (false);
condrem_.filter (output);
- EXPECT_EQ (bool (output.isOrganized ()), false);
- EXPECT_EQ (int (output.points.size ()), 28);
- EXPECT_EQ (int (output.points.size ()), cloud->points.size()-condrem_.getRemovedIndices()->size());
- EXPECT_NEAR (output.points[output.points.size () - 1].x, -0.087292, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].y, 0.103140, 1e-4);
- EXPECT_NEAR (output.points[output.points.size () - 1].z, 0.020825, 1e-4);
- EXPECT_EQ (bool (output.is_dense), true);
+ EXPECT_FALSE (output.isOrganized ());
+ EXPECT_EQ (output.size (), 28);
+ EXPECT_EQ (output.size (), cloud->size()-condrem_.getRemovedIndices()->size());
+ EXPECT_NEAR (output[output.size () - 1].x, -0.087292, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].y, 0.103140, 1e-4);
+ EXPECT_NEAR (output[output.size () - 1].z, 0.020825, 1e-4);
+ EXPECT_TRUE (output.is_dense);
// try the not dense version
condrem_.setKeepOrganized (true);
num_not_nan++;
}
- EXPECT_EQ (bool (output.isOrganized ()), bool (cloud->isOrganized ()));
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()));
- EXPECT_EQ (int (output.width), int (cloud->width));
- EXPECT_EQ (int (output.height), int (cloud->height));
+ EXPECT_EQ (output.isOrganized (), bool (cloud->isOrganized ()));
+ EXPECT_EQ (output.size (), cloud->size ());
+ EXPECT_EQ (output.width, cloud->width);
+ EXPECT_EQ (output.height, cloud->height);
EXPECT_EQ (num_not_nan, 28);
- EXPECT_EQ (bool (output.is_dense), false);
- EXPECT_EQ (int (num_not_nan), cloud->points.size()-condrem_.getRemovedIndices()->size());
+ EXPECT_FALSE (output.is_dense);
+ EXPECT_EQ (num_not_nan, cloud->size()-condrem_.getRemovedIndices()->size());
}
//////////////////////////////////////////////////////////////////////////////////////////////
// build some indices
pcl::IndicesPtr indices (new pcl::Indices (2));
(*indices)[0] = 0;
- (*indices)[1] = static_cast<int> (cloud->points.size ()) - 1;
+ (*indices)[1] = cloud->size () - 1;
// build a condition which is always true
ConditionAnd<PointXYZ>::Ptr true_cond (new ConditionAnd<PointXYZ> ());
condrem2.setKeepOrganized (false);
condrem2.filter (output);
- EXPECT_EQ (int (output.points.size ()), 2);
- EXPECT_EQ (int (output.width), 2);
- EXPECT_EQ (int (output.height), 1);
+ EXPECT_EQ (output.size (), 2);
+ EXPECT_EQ (output.width, 2);
+ EXPECT_EQ (output.height, 1);
- EXPECT_EQ (cloud->points[0].x, output.points[0].x);
- EXPECT_EQ (cloud->points[0].y, output.points[0].y);
- EXPECT_EQ (cloud->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[0].x, output[0].x);
+ EXPECT_EQ ((*cloud)[0].y, output[0].y);
+ EXPECT_EQ ((*cloud)[0].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].x, output.points[1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].y, output.points[1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].z, output.points[1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].x, output[1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].y, output[1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[1].z);
// try the not dense version
condrem2.setKeepOrganized (true);
condrem2.filter (output);
- EXPECT_EQ (cloud->points[0].x, output.points[0].x);
- EXPECT_EQ (cloud->points[0].y, output.points[0].y);
- EXPECT_EQ (cloud->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[0].x, output[0].x);
+ EXPECT_EQ ((*cloud)[0].y, output[0].y);
+ EXPECT_EQ ((*cloud)[0].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].x, output.points[output.points.size () - 1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].y, output.points[output.points.size () - 1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].z, output.points[output.points.size () - 1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].x, output[output.size () - 1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].y, output[output.size () - 1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[output.size () - 1].z);
int num_not_nan = 0;
for (const auto &point : output.points)
num_not_nan++;
}
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()));
- EXPECT_EQ (int (output.width), int (cloud->width));
- EXPECT_EQ (int (output.height), int (cloud->height));
+ EXPECT_EQ (output.size (), cloud->size ());
+ EXPECT_EQ (output.width, cloud->width);
+ EXPECT_EQ (output.height, cloud->height);
EXPECT_EQ (num_not_nan, 2);
// build the filter
condrem2_.setKeepOrganized (false);
condrem2_.filter (output);
- EXPECT_EQ (int (output.points.size ()), 2);
- EXPECT_EQ (int (output.width), 2);
- EXPECT_EQ (int (output.height), 1);
+ EXPECT_EQ (output.size (), 2);
+ EXPECT_EQ (output.width, 2);
+ EXPECT_EQ (output.height, 1);
- EXPECT_EQ (cloud->points[0].x, output.points[0].x);
- EXPECT_EQ (cloud->points[0].y, output.points[0].y);
- EXPECT_EQ (cloud->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[0].x, output[0].x);
+ EXPECT_EQ ((*cloud)[0].y, output[0].y);
+ EXPECT_EQ ((*cloud)[0].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].x, output.points[1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].y, output.points[1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].z, output.points[1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].x, output[1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].y, output[1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[1].z);
- EXPECT_EQ (int (output.points.size ()), int (indices->size ()) - int (condrem2_.getRemovedIndices ()->size ()));
+ EXPECT_EQ (output.size (), indices->size () - condrem2_.getRemovedIndices ()->size ());
// try the not dense version
condrem2_.setKeepOrganized (true);
condrem2_.filter (output);
- EXPECT_EQ (cloud->points[0].x, output.points[0].x);
- EXPECT_EQ (cloud->points[0].y, output.points[0].y);
- EXPECT_EQ (cloud->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*cloud)[0].x, output[0].x);
+ EXPECT_EQ ((*cloud)[0].y, output[0].y);
+ EXPECT_EQ ((*cloud)[0].z, output[0].z);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].x, output.points[output.points.size () - 1].x);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].y, output.points[output.points.size () - 1].y);
- EXPECT_EQ (cloud->points[cloud->points.size () - 1].z, output.points[output.points.size () - 1].z);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].x, output[output.size () - 1].x);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].y, output[output.size () - 1].y);
+ EXPECT_EQ ((*cloud)[cloud->size () - 1].z, output[output.size () - 1].z);
num_not_nan = 0;
for (const auto &point : output.points)
num_not_nan++;
}
- EXPECT_EQ (int (output.points.size ()), int (cloud->points.size ()));
- EXPECT_EQ (int (output.width), int (cloud->width));
- EXPECT_EQ (int (output.height), int (cloud->height));
+ EXPECT_EQ (output.size (), cloud->size ());
+ EXPECT_EQ (output.width, cloud->width);
+ EXPECT_EQ (output.height, cloud->height);
EXPECT_EQ (num_not_nan, 2);
- EXPECT_EQ (num_not_nan, int (indices->size ()) - int (condrem2_.getRemovedIndices ()->size ()));
+ EXPECT_EQ (num_not_nan, indices->size () - condrem2_.getRemovedIndices ()->size ());
}
//////////////////////////////////////////////////////////////////////////////////////////////
incloud->points.push_back (pt);
}
}
- incloud->width = 1;
- incloud->height = std::uint32_t (incloud->points.size ());
+ incloud->height = 1;
+ incloud->width = incloud->size ();
pcl::SamplingSurfaceNormal <pcl::PointNormal> ssn_filter;
ssn_filter.setInputCloud (incloud);
PointXYZ pt (.0f, .0f, .1f);
input->points.push_back (pt);
- input->width = 1;
- input->height = static_cast<std::uint32_t> (input->points.size ());
+ input->height = 1;
+ input->width = input->size ();
NormalEstimation<PointXYZ, PointNormal> ne;
ne.setInputCloud (input);
spfilter.filter (output);
// Should filter out the one shadow point that was added.
- EXPECT_EQ (int (output.points.size ()), 10000);
+ EXPECT_EQ (output.size (), 10000);
pcl::IndicesConstPtr removed = spfilter.getRemovedIndices ();
- EXPECT_EQ (int (removed->size ()), 1);
- EXPECT_EQ (removed->at (0), output.points.size ());
+ EXPECT_EQ (removed->size (), 1);
+ EXPECT_EQ (removed->at (0), output.size ());
// Try organized
spfilter.setKeepOrganized (true);
spfilter.filter (output);
EXPECT_EQ (output.size (), input->size ());
EXPECT_TRUE (std::isnan (output.at (input->size () - 1).x));
removed = spfilter.getRemovedIndices ();
- EXPECT_EQ (int (removed->size ()), 1);
+ EXPECT_EQ (removed->size (), 1);
// Now try negative
spfilter.setKeepOrganized (false);
spfilter.setNegative (true);
spfilter.filter (output);
- EXPECT_EQ (int (output.points.size ()), 1);
+ EXPECT_EQ (output.size (), 1);
EXPECT_EQ (output.at (0).x, pt.x);
EXPECT_EQ (output.at (0).y, pt.y);
EXPECT_EQ (output.at (0).z, pt.z);
removed = spfilter.getRemovedIndices ();
- EXPECT_EQ (int (removed->size ()), 10000);
+ EXPECT_EQ (removed->size (), 10000);
}
}
}
}
- input->width = 1;
- input->height = static_cast<std::uint32_t> (input->points.size ());
+ input->height = 1;
+ input->width = input->size ();
pcl::FrustumCulling<pcl::PointXYZ> fc (true); // Extract removed indices
fc.setInputCloud (input);
fc.filter (*output);
// Should filter all points in the input cloud
- EXPECT_EQ (output->points.size (), input->points.size ());
+ EXPECT_EQ (output->size (), input->size ());
pcl::IndicesConstPtr removed;
removed = fc.getRemovedIndices ();
- EXPECT_EQ (int (removed->size ()), 0);
+ EXPECT_EQ (removed->size (), 0);
// Check negative: no points should remain
fc.setNegative (true);
fc.filter (*output);
- EXPECT_EQ (int (output->size ()), 0);
+ EXPECT_EQ (output->size (), 0);
removed = fc.getRemovedIndices ();
EXPECT_EQ (removed->size (), input->size ());
// Make sure organized works
// apply it
condrem.filter (output);
- EXPECT_EQ (10, int (output.points.size ()));
+ EXPECT_EQ (10, output.size ());
- EXPECT_EQ (input->points[0].x, output.points[0].x);
- EXPECT_EQ (input->points[0].y, output.points[0].y);
- EXPECT_EQ (input->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*input)[0].x, output[0].x);
+ EXPECT_EQ ((*input)[0].y, output[0].y);
+ EXPECT_EQ ((*input)[0].z, output[0].z);
- EXPECT_EQ (input->points[9].x, output.points[9].x);
- EXPECT_EQ (input->points[9].y, output.points[9].y);
- EXPECT_EQ (input->points[9].z, output.points[9].z);
+ EXPECT_EQ ((*input)[9].x, output[9].x);
+ EXPECT_EQ ((*input)[9].y, output[9].y);
+ EXPECT_EQ ((*input)[9].z, output[9].z);
// rotate cylinder comparison along z-axis by PI/2
cyl_comp->transformComparison (getTransformation (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, float (M_PI) / 2.0f).inverse ());
condrem.filter (output);
- EXPECT_EQ (4, int (output.points.size ()));
+ EXPECT_EQ (4, output.size ());
- EXPECT_EQ (input->points[0].x, output.points[0].x);
- EXPECT_EQ (input->points[0].y, output.points[0].y);
- EXPECT_EQ (input->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*input)[0].x, output[0].x);
+ EXPECT_EQ ((*input)[0].y, output[0].y);
+ EXPECT_EQ ((*input)[0].z, output[0].z);
- EXPECT_EQ (input->points[3].x, output.points[3].x);
- EXPECT_EQ (input->points[3].y, output.points[3].y);
- EXPECT_EQ (input->points[3].z, output.points[3].z);
+ EXPECT_EQ ((*input)[3].x, output[3].x);
+ EXPECT_EQ ((*input)[3].y, output[3].y);
+ EXPECT_EQ ((*input)[3].z, output[3].z);
// change comparison to a simple plane (x < 5)
Eigen::Vector3f planeVector;
condrem.filter (output);
- EXPECT_EQ (6, int (output.points.size ()));
+ EXPECT_EQ (6, output.size ());
- EXPECT_EQ (input->points[0].x, output.points[0].x);
- EXPECT_EQ (input->points[0].y, output.points[0].y);
- EXPECT_EQ (input->points[0].z, output.points[0].z);
+ EXPECT_EQ ((*input)[0].x, output[0].x);
+ EXPECT_EQ ((*input)[0].y, output[0].y);
+ EXPECT_EQ ((*input)[0].z, output[0].z);
- EXPECT_EQ (input->points[5].x, output.points[5].x);
- EXPECT_EQ (input->points[5].y, output.points[5].y);
- EXPECT_EQ (input->points[5].z, output.points[5].z);
+ EXPECT_EQ ((*input)[5].x, output[5].x);
+ EXPECT_EQ ((*input)[5].y, output[5].y);
+ EXPECT_EQ ((*input)[5].z, output[5].z);
}
// Run estimation
pcl::NormalEstimation<pcl::PointXYZRGB, pcl::PointXYZRGBNormal> ne;
cloud_organized_normal.reserve (cloud_organized_nonan.size ());
- for (std::size_t i = 0; i < cloud_organized_nonan.size (); ++i)
+ for (index_t i = 0; i < static_cast<index_t>(cloud_organized_nonan.size ()); ++i)
{
// Output point
pcl::PointXYZRGBNormal normali;
loadPCDFile (file_name, *cloud_blob);
fromPCLPointCloud2 (*cloud_blob, *cloud);
- indices_.resize (cloud->points.size ());
- for (int i = 0; i < static_cast<int> (indices_.size ()); ++i)
+ indices_.resize (cloud->size ());
+ for (index_t i = 0; i < static_cast<index_t>(indices_.size ()); ++i)
indices_[i] = i;
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Point CLoud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2020-, Open Perception
+ *
+ * All rights reserved
+ */
+
+#include <pcl/common/generate.h>
+#include <pcl/filters/experimental/functor_filter.h>
+#include <pcl/test/gtest.h>
+#include <pcl/point_types.h>
+
+using namespace pcl;
+using namespace pcl::experimental;
+
+TEST(FunctorFilterTrait, CheckCompatibility)
+{
+ const auto copy_all = [](PointCloud<PointXYZ>, index_t) { return 0; };
+ EXPECT_TRUE((is_functor_for_filter_v<PointXYZ, decltype(copy_all)>));
+
+ const auto ref_all = [](PointCloud<PointXYZ>&, index_t&) { return 0; };
+ EXPECT_FALSE((is_functor_for_filter_v<PointXYZ, decltype(ref_all)>));
+
+ const auto ref_cloud = [](PointCloud<PointXYZ>&, index_t) { return 0; };
+ EXPECT_FALSE((is_functor_for_filter_v<PointXYZ, decltype(ref_cloud)>));
+
+ const auto const_ref_cloud = [](const PointCloud<PointXYZ>&, index_t) { return 0; };
+ EXPECT_TRUE((is_functor_for_filter_v<PointXYZ, decltype(const_ref_cloud)>));
+
+ const auto const_ref_all = [](const PointCloud<PointXYZ>&, const index_t&) {
+ return 0;
+ };
+ EXPECT_TRUE((is_functor_for_filter_v<PointXYZ, decltype(const_ref_all)>));
+}
+
+struct FunctorFilterRandom : public testing::TestWithParam<std::uint32_t> {
+ void
+ SetUp() override
+ {
+ cloud = make_shared<PointCloud<PointXYZ>>();
+
+ std::uint32_t seed = GetParam();
+ common::CloudGenerator<PointXYZ, common::UniformGenerator<float>> generator{
+ {-10., 0., seed}};
+ generator.fill(20, 20, negative_cloud);
+ generator.setParameters({0., 10., seed});
+ generator.fill(10, 10, positive_cloud);
+ *cloud = negative_cloud + positive_cloud;
+ }
+
+ shared_ptr<PointCloud<PointXYZ>> cloud;
+ PointCloud<PointXYZ> out_cloud, negative_cloud, positive_cloud;
+};
+
+TEST_P(FunctorFilterRandom, functioning)
+{
+
+ const auto lambda = [](const PointCloud<PointXYZ>& cloud, index_t idx) {
+ const auto& pt = cloud[idx];
+ return (pt.getArray3fMap() < 0).all();
+ };
+
+ for (const auto& keep_removed : {true, false}) {
+ FunctorFilter<PointXYZ, decltype(lambda)> filter{lambda, keep_removed};
+ filter.setInputCloud(cloud);
+ const auto removed_size = filter.getRemovedIndices()->size();
+
+ filter.setNegative(false);
+ filter.filter(out_cloud);
+
+ EXPECT_EQ(out_cloud.size(), negative_cloud.size());
+ if (keep_removed) {
+ EXPECT_EQ(filter.getRemovedIndices()->size(), positive_cloud.size());
+ }
+ else {
+ EXPECT_EQ(filter.getRemovedIndices()->size(), removed_size);
+ }
+
+ filter.setNegative(true);
+ filter.filter(out_cloud);
+
+ EXPECT_EQ(out_cloud.size(), positive_cloud.size());
+ if (keep_removed) {
+ EXPECT_EQ(filter.getRemovedIndices()->size(), negative_cloud.size());
+ }
+ else {
+ EXPECT_EQ(filter.getRemovedIndices()->size(), removed_size);
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(RandomSeed,
+ FunctorFilterRandom,
+ testing::Values(123, 456, 789));
+
+namespace type_test {
+int
+free_func(const PointCloud<PointXYZ>&, const index_t& idx)
+{
+ return idx % 2;
+}
+
+static const auto lambda_func = [](const PointCloud<PointXYZ>& cloud, index_t idx) {
+ return free_func(cloud, idx);
+};
+
+struct StaticFunctor {
+ static int
+ functor(PointCloud<PointXYZ> cloud, index_t idx)
+ {
+ return free_func(cloud, idx);
+ }
+ int
+ operator()(PointCloud<PointXYZ> cloud, index_t idx)
+ {
+ return free_func(cloud, idx);
+ }
+};
+struct StaticFunctorConst {
+ int
+ operator()(PointCloud<PointXYZ> cloud, index_t idx) const
+ {
+ return free_func(cloud, idx);
+ }
+};
+
+using LambdaT = decltype(lambda_func);
+using StdFunctorBoolT = std::function<bool(PointCloud<PointXYZ>, index_t)>;
+using StdFunctorIntT = std::function<int(PointCloud<PointXYZ>, index_t)>;
+using FreeFuncT = decltype(free_func)*;
+using StaticFunctorT = decltype(StaticFunctor::functor)*;
+using NonConstFuntorT = StaticFunctor;
+using ConstFuntorT = StaticFunctorConst;
+
+template <typename FunctorT>
+struct Helper {};
+
+#define HELPER_MACRO(TYPE, VALUE) \
+ template <> \
+ struct Helper<TYPE> { \
+ using type = TYPE; \
+ static type value; \
+ }; \
+ TYPE Helper<TYPE>::value = VALUE
+
+HELPER_MACRO(LambdaT, lambda_func);
+HELPER_MACRO(StdFunctorBoolT, lambda_func);
+HELPER_MACRO(StdFunctorIntT, lambda_func);
+HELPER_MACRO(FreeFuncT, free_func);
+HELPER_MACRO(StaticFunctorT, StaticFunctor::functor);
+HELPER_MACRO(NonConstFuntorT, {});
+HELPER_MACRO(ConstFuntorT, {});
+
+#undef HELPER_MACRO
+
+using types = ::testing::Types<LambdaT,
+ StdFunctorBoolT,
+ StdFunctorIntT,
+ FreeFuncT,
+ StaticFunctorT,
+ NonConstFuntorT,
+ ConstFuntorT>;
+} // namespace type_test
+
+template <typename T>
+struct FunctorFilterFunctor : public ::testing::Test {
+ void
+ SetUp() override
+ {
+ cloud.resize(2);
+ }
+ PointCloud<PointXYZ> cloud;
+};
+TYPED_TEST_SUITE_P(FunctorFilterFunctor);
+
+TYPED_TEST_P(FunctorFilterFunctor, type_check)
+{
+ using FunctorT = TypeParam;
+ const auto& functor = type_test::Helper<FunctorT>::value;
+
+ FunctorFilter<PointXYZ, FunctorT> filter_lambda{functor};
+ EXPECT_EQ(filter_lambda.getFunctor()(this->cloud, 0), 0);
+ EXPECT_EQ(filter_lambda.getFunctor()(this->cloud, 1), 1);
+}
+
+REGISTER_TYPED_TEST_SUITE_P(FunctorFilterFunctor, type_check);
+INSTANTIATE_TYPED_TEST_SUITE_P(pcl, FunctorFilterFunctor, type_test::types);
+
+int
+main(int argc, char** argv)
+{
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
filter.setInputCloud (cloud_in);
filter.filter (*cloud_filter_out);
//compare results
- EXPECT_EQ (cloud_filter_out->points.size (), ransac_inliers.size ());
+ EXPECT_EQ (cloud_filter_out->size (), ransac_inliers.size ());
//TODO: also compare content
}
#include <pcl/common/transforms.h>
-#include <pcl/common/eigen.h>
using namespace pcl;
// Check that indices are sorted
EXPECT_LT (indices[i], indices[i+1]);
// Compare original points with sampled indices against sampled points
- EXPECT_NEAR (cloud_walls->points[indices[i]].x, cloud_out.points[i].x, 1e-4);
- EXPECT_NEAR (cloud_walls->points[indices[i]].y, cloud_out.points[i].y, 1e-4);
- EXPECT_NEAR (cloud_walls->points[indices[i]].z, cloud_out.points[i].z, 1e-4);
+ EXPECT_NEAR ((*cloud_walls)[indices[i]].x, cloud_out[i].x, 1e-4);
+ EXPECT_NEAR ((*cloud_walls)[indices[i]].y, cloud_out[i].y, 1e-4);
+ EXPECT_NEAR ((*cloud_walls)[indices[i]].z, cloud_out[i].z, 1e-4);
}
IndicesConstPtr removed = sample.getRemovedIndices ();
// Check that indices are sorted
EXPECT_LT (indices2[i], indices2[i+1]);
// Compare original points with sampled indices against sampled points
- EXPECT_NEAR (cloud_walls->points[indices2[i]].x, cloud_out.points[i].x, 1e-4);
- EXPECT_NEAR (cloud_walls->points[indices2[i]].y, cloud_out.points[i].y, 1e-4);
- EXPECT_NEAR (cloud_walls->points[indices2[i]].z, cloud_out.points[i].z, 1e-4);
+ EXPECT_NEAR ((*cloud_walls)[indices2[i]].x, cloud_out[i].x, 1e-4);
+ EXPECT_NEAR ((*cloud_walls)[indices2[i]].y, cloud_out[i].y, 1e-4);
+ EXPECT_NEAR ((*cloud_walls)[indices2[i]].z, cloud_out[i].z, 1e-4);
}
}
*/
#include <pcl/test/gtest.h>
-#include <pcl/common/common.h>
#include <pcl/geometry/line_iterator.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/geometry/triangle_mesh.h>
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include "test_mesh_common_functions.h"
#include <pcl/geometry/polygon_mesh.h>
#include <pcl/geometry/mesh_conversion.h>
#include <pcl/memory.h>
-#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
*
*/
-#include <string>
#include <sstream>
#include <pcl/test/gtest.h>
*/
#include <cstdio>
-#include <fstream>
#include <string>
#include <pcl/test/gtest.h>
*/
#include <vector>
-#include <typeinfo>
#include <pcl/test/gtest.h>
*/
#include <vector>
-#include <typeinfo>
#include <pcl/test/gtest.h>
*/
#include <vector>
-#include <typeinfo>
#include <pcl/test/gtest.h>
--- /dev/null
+if(NOT HAVE_CUDA)
+ return()
+endif()
+
+add_subdirectory(octree)
--- /dev/null
+set(SUBSYS_NAME tests_gpu_octree)
+set(SUBSYS_DESC "Point cloud library gpu octree tests")
+set(SUBSYS_DEPS common octree gpu_containers gpu_octree gpu_utils)
+
+set(DEFAULT ON)
+set(build TRUE)
+set(REASON "")
+PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
+PCL_SUBSYS_DEPEND(build "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${OPT_DEPS})
+
+if(NOT build)
+ return()
+endif()
+
+PCL_ADD_TEST(gpu_octree_performance test_gpu_octree_perfomance FILES perfomance.cpp LINK_WITH pcl_gtest pcl_common pcl_octree pcl_gpu_octree)
+PCL_ADD_TEST(gpu_octree_approx_nearest test_gpu_approx_nearest FILES test_approx_nearest.cpp LINK_WITH pcl_gtest pcl_common pcl_octree pcl_gpu_octree pcl_gpu_utils)
+PCL_ADD_TEST(gpu_octree_bfrs test_gpu_bfrs FILES test_bfrs_gpu.cpp LINK_WITH pcl_gtest pcl_common pcl_octree pcl_gpu_octree)
+PCL_ADD_TEST(gpu_octree_host_radius test_gpu_host_radius_search FILES test_host_radius_search.cpp LINK_WITH pcl_gtest pcl_common pcl_octree pcl_gpu_octree)
+PCL_ADD_TEST(gpu_octree_knn_search test_gpu_knn_search FILES test_knn_search.cpp LINK_WITH pcl_gtest pcl_common pcl_octree pcl_gpu_octree)
+PCL_ADD_TEST(gpu_octree_radius_search test_gpu_radius_search FILES test_radius_search.cpp LINK_WITH pcl_gtest pcl_common pcl_octree pcl_gpu_octree)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+
+#ifndef _PCL_TEST_GPU_OCTREE_DATAGEN_
+#define _PCL_TEST_GPU_OCTREE_DATAGEN_
+
+#include <vector>
+#include <algorithm>
+#include <iostream>
+#include <Eigen/StdVector>
+#include <cstdlib>
+
+
+#if defined (_WIN32) || defined(_WIN64)
+ EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(pcl::PointXYZ)
+#endif
+
+
+namespace pcl
+{
+
+namespace gpu
+{
+
+struct DataGenerator
+{
+ using PointType = Octree::PointType;
+
+ std::size_t data_size;
+ std::size_t tests_num;
+
+ float cube_size;
+ float max_radius;
+
+ float shared_radius;
+
+ std::vector<PointType> points;
+ std::vector<PointType> queries;
+ std::vector<float> radiuses;
+ std::vector< std::vector<int> > bfresutls;
+
+ std::vector<int> indices;
+
+ DataGenerator() : data_size(871000), tests_num(10000), cube_size(1024.f)
+ {
+ max_radius = cube_size/15.f;
+ shared_radius = cube_size/20.f;
+ }
+
+ void operator()()
+ {
+ srand (0);
+
+ points.resize(data_size);
+ for(std::size_t i = 0; i < data_size; ++i)
+ {
+ points[i].x = ((float)rand())/RAND_MAX * cube_size;
+ points[i].y = ((float)rand())/RAND_MAX * cube_size;
+ points[i].z = ((float)rand())/RAND_MAX * cube_size;
+ }
+
+
+ queries.resize(tests_num);
+ radiuses.resize(tests_num);
+ for (std::size_t i = 0; i < tests_num; ++i)
+ {
+ queries[i].x = ((float)rand())/RAND_MAX * cube_size;
+ queries[i].y = ((float)rand())/RAND_MAX * cube_size;
+ queries[i].z = ((float)rand())/RAND_MAX * cube_size;
+ radiuses[i] = ((float)rand())/RAND_MAX * max_radius;
+ };
+
+ for(std::size_t i = 0; i < tests_num/2; ++i)
+ indices.push_back(i*2);
+ }
+
+ void bruteForceSearch(bool log = false, float radius = -1.f)
+ {
+ if (log)
+ std::cout << "BruteForceSearch";
+
+ int value100 = std::min<int>(tests_num, 50);
+ int step = tests_num/value100;
+
+ bfresutls.resize(tests_num);
+ for(std::size_t i = 0; i < tests_num; ++i)
+ {
+ if (log && i % step == 0)
+ {
+ std::cout << ".";
+ std::cout.flush();
+ }
+
+ std::vector<int>& curr_res = bfresutls[i];
+ curr_res.clear();
+
+ float query_radius = radius > 0 ? radius : radiuses[i];
+ const PointType& query = queries[i];
+
+ for(std::size_t ind = 0; ind < points.size(); ++ind)
+ {
+ const PointType& point = points[ind];
+
+ float dx = query.x - point.x;
+ float dy = query.y - point.y;
+ float dz = query.z - point.z;
+
+ if (dx*dx + dy*dy + dz*dz < query_radius * query_radius)
+ curr_res.push_back(ind);
+ }
+
+ std::sort(curr_res.begin(), curr_res.end());
+ }
+ if (log)
+ std::cout << "Done" << std::endl;
+ }
+
+ void printParams() const
+ {
+ std::cout << "Points number = " << data_size << std::endl;
+ std::cout << "Queries number = " << tests_num << std::endl;
+ std::cout << "Cube size = " << cube_size << std::endl;
+ std::cout << "Max radius = " << max_radius << std::endl;
+ std::cout << "Shared radius = " << shared_radius << std::endl;
+ }
+
+ template<typename Dst>
+ struct ConvPoint
+ {
+ Dst operator()(const PointType& src) const
+ {
+ Dst dst;
+ dst.x = src.x;
+ dst.y = src.y;
+ dst.z = src.z;
+ return dst;
+ }
+ };
+};
+
+} // namespace gpu
+} // namespace pcl
+
+#endif /* _PCL_TEST_GPU_OCTREE_DATAGEN_ */
+
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#if defined _MSC_VER
+ #pragma warning (disable : 4996 4530)
+#endif
+
+#include <gtest/gtest.h>
+
+#include<iostream>
+#include<algorithm>
+
+#if defined _MSC_VER
+ #pragma warning (disable: 4521)
+#endif
+
+#include <pcl/point_cloud.h>
+#include <pcl/octree/octree.h>
+
+#if defined _MSC_VER
+ #pragma warning (default: 4521)
+#endif
+
+
+#include <pcl/gpu/octree/octree.hpp>
+#include <pcl/gpu/containers/device_array.h>
+#include <pcl/common/time.h>
+#include "data_source.hpp"
+
+using namespace pcl::gpu;
+
+using pcl::ScopeTime;
+
+#if defined HAVE_OPENCV
+ #include "opencv2/contrib/contrib.hpp"
+#endif
+
+//TEST(PCL_OctreeGPU, DISABLED_performance)
+TEST(PCL_OctreeGPU, performance)
+{
+ DataGenerator data;
+ data.data_size = 871000;
+ data.tests_num = 10000;
+ data.cube_size = 1024.f;
+ data.max_radius = data.cube_size/15.f;
+ data.shared_radius = data.cube_size/15.f;
+ data.printParams();
+
+ //const int k = 32;
+
+ std::cout << "sizeof(pcl::gpu::Octree::PointType): " << sizeof(pcl::gpu::Octree::PointType) << std::endl;
+ //std::cout << "k = " << k << std::endl;
+ //generate data
+ data();
+
+ //prepare device cloud
+ pcl::gpu::Octree::PointCloud cloud_device;
+ cloud_device.upload(data.points);
+
+ //prepare queries_device
+ pcl::gpu::Octree::Queries queries_device;
+ pcl::gpu::Octree::Radiuses radiuses_device;
+ queries_device.upload(data.queries);
+ radiuses_device.upload(data.radiuses);
+
+ //prepare host cloud
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
+ cloud_host->width = data.points.size();
+ cloud_host->height = 1;
+ cloud_host->points.resize (cloud_host->width * cloud_host->height);
+ std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
+
+ float host_octree_resolution = 25.f;
+
+ std::cout << "[!] Host octree resolution: " << host_octree_resolution << std::endl << std::endl;
+
+ std::cout << "====== Build performance =====" << std::endl;
+ // build device octree
+ pcl::gpu::Octree octree_device;
+ octree_device.setCloud(cloud_device);
+ {
+ ScopeTime up("gpu-build");
+ octree_device.build();
+ }
+ {
+ ScopeTime up("gpu-download");
+ octree_device.internalDownload();
+ }
+
+ //build host octree
+ pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(host_octree_resolution);
+ octree_host.setInputCloud (cloud_host);
+ {
+ ScopeTime t("host-build");
+ octree_host.addPointsFromInputCloud();
+ }
+
+ // build opencv octree
+#ifdef HAVE_OPENCV
+ cv::Octree octree_opencv;
+ const static int opencv_octree_points_per_leaf = 32;
+ std::vector<cv::Point3f> opencv_points(data.size());
+ std::transform(data.points.begin(), data.points.end(), opencv_points.begin(), DataGenerator::ConvPoint<cv::Point3f>());
+
+ {
+ ScopeTime t("opencv-build");
+ octree_opencv.buildTree(opencv_points, 10, opencv_octree_points_per_leaf);
+ }
+#endif
+
+ //// Radius search performance ///
+
+ const int max_answers = 500;
+ float dist;
+ int inds;
+
+ //host buffers
+ std::vector<int> indeces;
+ std::vector<float> pointRadiusSquaredDistance;
+#ifdef HAVE_OPENCV
+ std::vector<cv::Point3f> opencv_results;
+#endif
+
+ //reserve
+ indeces.reserve(data.data_size);
+ pointRadiusSquaredDistance.reserve(data.data_size);
+#ifdef HAVE_OPENCV
+ opencv_results.reserve(data.data_size);
+#endif
+
+ //device buffers
+ pcl::gpu::DeviceArray<int> bruteforce_results_device, buffer(cloud_device.size());
+ pcl::gpu::NeighborIndices result_device(queries_device.size(), max_answers);
+
+ //pcl::gpu::Octree::BatchResult distsKNN_device(queries_device.size() * k);
+ //pcl::gpu::Octree::BatchResultSqrDists indsKNN_device(queries_device.size() * k);
+
+ std::cout << "====== Separate radius for each query =====" << std::endl;
+
+ {
+ ScopeTime up("gpu--radius-search-batch-all");
+ octree_device.radiusSearch(queries_device, radiuses_device, max_answers, result_device);
+ }
+
+ {
+ ScopeTime up("gpu-radius-search-{host}-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], indeces, max_answers);
+ }
+
+ {
+ ScopeTime up("host-radius-search-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z),
+ data.radiuses[i], indeces, pointRadiusSquaredDistance, max_answers);
+ }
+
+ {
+ ScopeTime up("gpu_bruteforce-radius-search-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ pcl::gpu::bruteForceRadiusSearchGPU(cloud_device, data.queries[i], data.radiuses[i], bruteforce_results_device, buffer);
+ }
+
+ std::cout << "====== Shared radius (" << data.shared_radius << ") =====" << std::endl;
+
+ {
+ ScopeTime up("gpu-radius-search-batch-all");
+ octree_device.radiusSearch(queries_device, data.shared_radius, max_answers, result_device);
+ }
+
+ {
+ ScopeTime up("gpu-radius-search-{host}-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_device.radiusSearchHost(data.queries[i], data.shared_radius, indeces, max_answers);
+ }
+
+ {
+ ScopeTime up("host-radius-search-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z),
+ data.radiuses[i], indeces, pointRadiusSquaredDistance, max_answers);
+ }
+
+ {
+ ScopeTime up("gpu-radius-bruteforce-search-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ pcl::gpu::bruteForceRadiusSearchGPU(cloud_device, data.queries[i], data.shared_radius, bruteforce_results_device, buffer);
+ }
+
+ std::cout << "====== Approx nearest search =====" << std::endl;
+
+ {
+ ScopeTime up("gpu-approx-nearest-batch-all");
+ octree_device.approxNearestSearch(queries_device, result_device);
+ }
+
+ {
+ ScopeTime up("gpu-approx-nearest-search-{host}-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_device.approxNearestSearchHost(data.queries[i], inds, dist);
+ }
+
+ {
+ ScopeTime up("host-approx-nearest-search-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_host.approxNearestSearch(data.queries[i], inds, dist);
+ }
+
+ /* std::cout << "====== knn search ( k fixed to " << k << " ) =====" << std::endl;
+ {
+ ScopeTime up("gpu-knn-batch-all");
+ octree_device.nearestKSearchBatch(queries_device, k, distsKNN_device, indsKNN_device);
+ }
+
+ {
+ ScopeTime up("host-knn-search-all");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_host.nearestKSearch(data.queries[i], k, indeces, pointRadiusSquaredDistance);
+ }*/
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
+
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#if defined _MSC_VER
+ #pragma warning (disable : 4996 4530)
+#endif
+
+#include <gtest/gtest.h>
+
+#include <iostream>
+#include <fstream>
+#include <algorithm>
+
+#if defined _MSC_VER
+ #pragma warning (disable: 4521)
+#endif
+#include <pcl/point_cloud.h>
+#include <pcl/octree/octree_search.h>
+#if defined _MSC_VER
+ #pragma warning (default: 4521)
+#endif
+
+#include <pcl/gpu/octree/octree.hpp>
+#include <pcl/gpu/containers/device_array.h>
+
+#include "data_source.hpp"
+
+using namespace pcl::gpu;
+
+//TEST(PCL_OctreeGPU, DISABLED_approxNearesSearch)
+TEST(PCL_OctreeGPU, approxNearesSearch)
+{
+ DataGenerator data;
+ data.data_size = 871000;
+ data.tests_num = 10000;
+ data.cube_size = 1024.f;
+ data.max_radius = data.cube_size/30.f;
+ data.shared_radius = data.cube_size/30.f;
+ data.printParams();
+
+ const float host_octree_resolution = 25.f;
+
+ //generate
+ data();
+
+ //prepare device cloud
+ pcl::gpu::Octree::PointCloud cloud_device;
+ cloud_device.upload(data.points);
+
+
+ //prepare host cloud
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
+ cloud_host->width = data.points.size();
+ cloud_host->height = 1;
+ cloud_host->points.resize (cloud_host->width * cloud_host->height);
+ std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
+
+ //gpu build
+ pcl::gpu::Octree octree_device;
+ octree_device.setCloud(cloud_device);
+ octree_device.build();
+
+ //build host octree
+ pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(host_octree_resolution);
+ octree_host.setInputCloud (cloud_host);
+ octree_host.addPointsFromInputCloud();
+
+ //upload queries
+ pcl::gpu::Octree::Queries queries_device;
+ queries_device.upload(data.queries);
+
+
+ //prepare output buffers on device
+ pcl::gpu::NeighborIndices result_device(data.tests_num, 1);
+ std::vector<int> result_host_pcl(data.tests_num);
+ std::vector<int> result_host_gpu(data.tests_num);
+ std::vector<float> dists_pcl(data.tests_num);
+ std::vector<float> dists_gpu(data.tests_num);
+
+ //search GPU shared
+ octree_device.approxNearestSearch(queries_device, result_device);
+
+ std::vector<int> downloaded;
+ result_device.data.download(downloaded);
+
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ octree_host.approxNearestSearch(data.queries[i], result_host_pcl[i], dists_pcl[i]);
+ octree_device.approxNearestSearchHost(data.queries[i], result_host_gpu[i], dists_gpu[i]);
+ }
+
+ ASSERT_EQ ( ( downloaded == result_host_gpu ), true );
+
+ int count_gpu_better = 0;
+ int count_pcl_better = 0;
+ float diff_pcl_better = 0;
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ float diff = dists_pcl[i] - dists_gpu[i];
+ bool gpu_better = diff > 0;
+
+ ++(gpu_better ? count_gpu_better : count_pcl_better);
+
+ if (!gpu_better)
+ diff_pcl_better +=std::abs(diff);
+ }
+
+ diff_pcl_better /=count_pcl_better;
+
+ std::cout << "count_gpu_better: " << count_gpu_better << std::endl;
+ std::cout << "count_pcl_better: " << count_pcl_better << std::endl;
+ std::cout << "avg_diff_pcl_better: " << diff_pcl_better << std::endl;
+
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
+
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#include <gtest/gtest.h>
+
+#include <iostream>
+#include <numeric>
+
+#if defined _MSC_VER
+ #pragma warning (disable: 4521)
+#endif
+
+#include <pcl/point_cloud.h>
+
+#if defined _MSC_VER
+ #pragma warning (default: 4521)
+#endif
+
+#include <pcl/gpu/octree/octree.hpp>
+#include <pcl/gpu/containers/device_array.h>
+
+#include "data_source.hpp"
+
+using namespace pcl::gpu;
+
+
+//TEST (PCL_GPU, DISABLED_bruteForceRadiusSeachGPU)
+TEST (PCL_GPU, bruteForceRadiusSeachGPU)
+{
+ DataGenerator data;
+ data.data_size = 871000;
+ data.tests_num = 100;
+ data.cube_size = 1024.f;
+ data.max_radius = data.cube_size/15.f;
+ data.shared_radius = data.cube_size/20.f;
+ data.printParams();
+
+ //generate
+ data();
+
+ // brute force radius search
+ data.bruteForceSearch();
+
+ //prepare gpu cloud
+ pcl::gpu::Octree::PointCloud cloud_device;
+ cloud_device.upload(data.points);
+
+ pcl::gpu::DeviceArray<int> results_device, buffer(cloud_device.size());
+
+ std::vector<int> results_host;
+ std::vector<std::size_t> sizes;
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ pcl::gpu::bruteForceRadiusSearchGPU(cloud_device, data.queries[i], data.radiuses[i], results_device, buffer);
+
+ results_device.download(results_host);
+ std::sort(results_host.begin(), results_host.end());
+
+ ASSERT_EQ ( (results_host == data.bfresutls[i]), true );
+ sizes.push_back(results_device.size());
+ }
+
+ float avg_size = std::accumulate(sizes.begin(), sizes.end(), (std::size_t)0) * (1.f/sizes.size());;
+
+ std::cout << "avg_result_size = " << avg_size << std::endl;
+ ASSERT_GT(avg_size, 5);
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
+
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#include <numeric>
+#include <algorithm>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#if defined _MSC_VER
+ #pragma warning (disable: 4521)
+#endif
+
+#include <pcl/point_cloud.h>
+#include <pcl/point_types.h>
+#include <pcl/octree/octree_search.h>
+
+#if defined _MSC_VER
+ #pragma warning (default: 4521)
+#endif
+
+#include <pcl/gpu/octree/octree.hpp>
+#include <pcl/gpu/containers/device_array.h>
+#include <pcl/gpu/containers/initialization.h>
+
+#include "data_source.hpp"
+
+using namespace pcl;
+using namespace pcl::gpu;
+
+//TEST(PCL_OctreeGPU, DISABLED_hostRadiusSearch)
+TEST(PCL_OctreeGPU, hostRadiusSearch)
+{
+ DataGenerator data;
+ data.data_size = 871000;
+ data.tests_num = 10000;
+ data.cube_size = 1024.f;
+ data.max_radius = data.cube_size/15.f;
+ data.shared_radius = data.cube_size/20.f;
+ data.printParams();
+
+ //generate
+ data();
+
+ //prepare device cloud
+ pcl::gpu::Octree::PointCloud cloud_device;
+ cloud_device.upload(data.points);
+
+ //prepare host cloud
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
+ cloud_host->width = data.points.size();
+ cloud_host->height = 1;
+ cloud_host->points.resize (cloud_host->width * cloud_host->height);
+ std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
+
+ // build device octree
+ pcl::gpu::Octree octree_device;
+ octree_device.setCloud(cloud_device);
+ octree_device.build();
+
+
+
+ // build host octree
+ float resolution = 25.f;
+ std::cout << "[!]Octree resolution: " << resolution << std::endl;
+ pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(resolution);
+ octree_host.setInputCloud (cloud_host);
+ octree_host.addPointsFromInputCloud ();
+
+ //perform bruteForceSearch
+ data.bruteForceSearch(true);
+
+ std::vector<int> sizes;
+ sizes.reserve(data.tests_num);
+ octree_device.internalDownload();
+
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ //search host on octree that was built on device
+ std::vector<int> results_host_gpu; //host search
+ octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], results_host_gpu);
+
+ //search host
+ std::vector<float> dists;
+ std::vector<int> results_host;
+ octree_host.radiusSearch(pcl::PointXYZ(data.queries[i].x, data.queries[i].y, data.queries[i].z), data.radiuses[i], results_host, dists);
+
+ std::sort(results_host_gpu.begin(), results_host_gpu.end());
+ std::sort(results_host.begin(), results_host.end());
+
+ ASSERT_EQ ( (results_host_gpu == results_host ), true );
+ ASSERT_EQ ( (results_host_gpu == data.bfresutls[i]), true );
+ sizes.push_back(results_host.size());
+ }
+
+ float avg_size = std::accumulate(sizes.begin(), sizes.end(), 0) * (1.f/sizes.size());;
+
+ std::cout << "avg_result_size = " << avg_size << std::endl;
+ ASSERT_GT(avg_size, 5);
+}
+
+
+int main (int argc, char** argv)
+{
+ const int device = 0;
+ pcl::gpu::setDevice(device);
+ pcl::gpu::printShortCudaDeviceInfo(device);
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#if defined _MSC_VER
+ #pragma warning (disable : 4996 4530)
+#endif
+
+#include <gtest/gtest.h>
+
+#include<iostream>
+#include<fstream>
+#include<algorithm>
+
+#if defined _MSC_VER
+ #pragma warning (disable: 4521)
+#endif
+#include <pcl/point_cloud.h>
+#include <pcl/octree/octree_search.h>
+#if defined _MSC_VER
+ #pragma warning (default: 4521)
+#endif
+
+#include <pcl/gpu/octree/octree.hpp>
+#include <pcl/gpu/containers/device_array.h>
+#include <pcl/common/time.h>
+#include "data_source.hpp"
+
+using namespace pcl::gpu;
+
+struct PriorityPair
+{
+ int index;
+ float dist2;
+
+ bool operator<(const PriorityPair& other) const { return dist2 < other.dist2; }
+
+ bool operator==(const PriorityPair& other) const { return dist2 == other.dist2 && index == other.index; }
+};
+
+//TEST(PCL_OctreeGPU, DISABLED_exactNeighbourSearch)
+TEST(PCL_OctreeGPU, exactNeighbourSearch)
+{
+ DataGenerator data;
+ data.data_size = 871000;
+ data.tests_num = 10000;
+ data.cube_size = 1024.f;
+ data.max_radius = data.cube_size/30.f;
+ data.shared_radius = data.cube_size/30.f;
+ data.printParams();
+
+ const float host_octree_resolution = 25.f;
+ const int k = 1; // only this is supported
+
+ //generate
+ data();
+
+ //prepare device cloud
+ pcl::gpu::Octree::PointCloud cloud_device;
+ cloud_device.upload(data.points);
+
+ //prepare host cloud
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_host(new pcl::PointCloud<pcl::PointXYZ>);
+ cloud_host->width = data.points.size();
+ cloud_host->height = 1;
+ cloud_host->points.resize (cloud_host->width * cloud_host->height);
+ std::transform(data.points.begin(), data.points.end(), cloud_host->points.begin(), DataGenerator::ConvPoint<pcl::PointXYZ>());
+
+ //gpu build
+ pcl::gpu::Octree octree_device;
+ octree_device.setCloud(cloud_device);
+ octree_device.build();
+
+ //build host octree
+ pcl::octree::OctreePointCloudSearch<pcl::PointXYZ> octree_host(host_octree_resolution);
+ octree_host.setInputCloud (cloud_host);
+ octree_host.addPointsFromInputCloud();
+
+ //upload queries
+ pcl::gpu::Octree::Queries queries_device;
+ queries_device.upload(data.queries);
+
+ //prepare output buffers on device
+ pcl::gpu::NeighborIndices result_device(data.tests_num, k);
+
+ //prepare output buffers on host
+ std::vector<std::vector< int> > result_host(data.tests_num);
+ std::vector<std::vector<float> > dists_host(data.tests_num);
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ result_host[i].reserve(k);
+ dists_host[i].reserve(k);
+ }
+
+ //search GPU shared
+ {
+ pcl::ScopeTime time("1nn-gpu");
+ octree_device.nearestKSearchBatch(queries_device, k, result_device);
+ }
+
+ std::vector<int> downloaded, downloaded_cur;
+ result_device.data.download(downloaded);
+
+ {
+ pcl::ScopeTime time("1nn-cpu");
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ octree_host.nearestKSearch(data.queries[i], k, result_host[i], dists_host[i]);
+ }
+
+ //verify results
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ //std::cout << i << std::endl;
+ std::vector<int>& results_host_cur = result_host[i];
+ std::vector<float>& dists_host_cur = dists_host[i];
+
+ int beg = i * k;
+ int end = beg + k;
+
+ downloaded_cur.assign(downloaded.begin() + beg, downloaded.begin() + end);
+
+ std::vector<PriorityPair> pairs_host;
+ std::vector<PriorityPair> pairs_gpu;
+ for(int n = 0; n < k; ++n)
+ {
+ PriorityPair host;
+ host.index = results_host_cur[n];
+ host.dist2 = dists_host_cur[n];
+ pairs_host.push_back(host);
+
+ PriorityPair gpu;
+ gpu.index = downloaded_cur[n];
+
+ float dist = (data.queries[i].getVector3fMap() - data.points[gpu.index].getVector3fMap()).norm();
+ gpu.dist2 = dist * dist;
+ pairs_gpu.push_back(gpu);
+ }
+
+ std::sort(pairs_host.begin(), pairs_host.end());
+ std::sort(pairs_gpu.begin(), pairs_gpu.end());
+
+ while (pairs_host.size ())
+ {
+ ASSERT_EQ ( pairs_host.back().index , pairs_gpu.back().index );
+ EXPECT_NEAR ( pairs_host.back().dist2 , pairs_gpu.back().dist2, 1e-2 );
+
+ pairs_host.pop_back();
+ pairs_gpu.pop_back();
+ }
+ }
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
+
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2011, Willow Garage, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Willow Garage, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
+ */
+
+#include <gtest/gtest.h>
+
+#include <iostream>
+#include <fstream>
+#include <numeric>
+
+#if defined _MSC_VER
+ #pragma warning (disable: 4521)
+#endif
+
+#include <pcl/point_cloud.h>
+
+#if defined _MSC_VER
+ #pragma warning (default: 4521)
+#endif
+
+#include <pcl/gpu/octree/octree.hpp>
+#include <pcl/gpu/containers/device_array.h>
+
+#include "data_source.hpp"
+
+using namespace pcl::gpu;
+
+//TEST(PCL_OctreeGPU, DISABLED_batchRadiusSearch)
+TEST(PCL_OctreeGPU, batchRadiusSearch)
+{
+ DataGenerator data;
+ data.data_size = 871000;
+ data.tests_num = 10000;
+ data.cube_size = 1024.f;
+ data.max_radius = data.cube_size/30.f;
+ data.shared_radius = data.cube_size/30.f;
+ data.printParams();
+
+ const int max_answers = 333;
+
+ //generate
+ data();
+
+ //prepare gpu cloud
+
+ pcl::gpu::Octree::PointCloud cloud_device;
+ cloud_device.upload(data.points);
+
+ //gpu build
+ pcl::gpu::Octree octree_device;
+ octree_device.setCloud(cloud_device);
+ octree_device.build();
+
+ //upload queries
+ pcl::gpu::Octree::Queries queries_device;
+ pcl::gpu::Octree::Radiuses radiuses_device;
+ queries_device.upload(data.queries);
+ radiuses_device.upload(data.radiuses);
+
+ //prepare output buffers on device
+
+ pcl::gpu::NeighborIndices result_device1(queries_device.size(), max_answers);
+ pcl::gpu::NeighborIndices result_device2(queries_device.size(), max_answers);
+ pcl::gpu::NeighborIndices result_device3(data.indices.size(), max_answers);
+
+ //prepare output buffers on host
+ std::vector< std::vector<int> > host_search1(data.tests_num);
+ std::vector< std::vector<int> > host_search2(data.tests_num);
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ host_search1[i].reserve(max_answers);
+ host_search2[i].reserve(max_answers);
+ }
+
+ //search GPU shared
+ octree_device.radiusSearch(queries_device, data.shared_radius, max_answers, result_device1);
+
+ //search GPU individual
+ octree_device.radiusSearch(queries_device, radiuses_device, max_answers, result_device2);
+
+ //search GPU shared with indices
+ pcl::gpu::Octree::Indices indices;
+ indices.upload(data.indices);
+ octree_device.radiusSearch(queries_device, indices, data.shared_radius, max_answers, result_device3);
+
+ //search CPU
+ octree_device.internalDownload();
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ octree_device.radiusSearchHost(data.queries[i], data.shared_radius, host_search1[i], max_answers);
+ octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], host_search2[i], max_answers);
+ }
+
+ //download results
+ std::vector<int> sizes1;
+ std::vector<int> sizes2;
+ std::vector<int> sizes3;
+ result_device1.sizes.download(sizes1);
+ result_device2.sizes.download(sizes2);
+ result_device3.sizes.download(sizes3);
+
+ std::vector<int> downloaded_buffer1, downloaded_buffer2, downloaded_buffer3, results_batch;
+ result_device1.data.download(downloaded_buffer1);
+ result_device2.data.download(downloaded_buffer2);
+ result_device3.data.download(downloaded_buffer3);
+
+ //data.bruteForceSearch();
+
+ //verify results
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ std::vector<int>& results_host = host_search1[i];
+
+ int beg = i * max_answers;
+ int end = beg + sizes1[i];
+
+ results_batch.assign(downloaded_buffer1.begin() + beg, downloaded_buffer1.begin() + end);
+
+ std::sort(results_batch.begin(), results_batch.end());
+ std::sort(results_host.begin(), results_host.end());
+
+ if ((int)results_batch.size() == max_answers && results_batch.size() < results_host.size() && max_answers)
+ results_host.resize(max_answers);
+
+ ASSERT_EQ ( ( results_batch == results_host ), true );
+
+ //vector<int>& results_bf = data.bfresutls[i];
+ //ASSERT_EQ ( ( results_bf == results_batch), true );
+ //ASSERT_EQ ( ( results_bf == results_host ), true );
+ }
+
+ float avg_size1 = std::accumulate(sizes1.begin(), sizes1.end(), 0) * (1.f/sizes1.size());
+
+ std::cout << "avg_result_size1 = " << avg_size1 << std::endl;
+ ASSERT_GT(avg_size1, 5);
+
+
+ //verify results
+ for(std::size_t i = 0; i < data.tests_num; ++i)
+ {
+ std::vector<int>& results_host = host_search2[i];
+
+ int beg = i * max_answers;
+ int end = beg + sizes2[i];
+
+ results_batch.assign(downloaded_buffer2.begin() + beg, downloaded_buffer2.begin() + end);
+
+ std::sort(results_batch.begin(), results_batch.end());
+ std::sort(results_host.begin(), results_host.end());
+
+ if ((int)results_batch.size() == max_answers && results_batch.size() < results_host.size() && max_answers)
+ results_host.resize(max_answers);
+
+ ASSERT_EQ ( ( results_batch == results_host ), true );
+
+ //vector<int>& results_bf = data.bfresutls[i];
+ //ASSERT_EQ ( ( results_bf == results_batch), true );
+ //ASSERT_EQ ( ( results_bf == results_host ), true );
+ }
+
+ float avg_size2 = std::accumulate(sizes2.begin(), sizes2.end(), 0) * (1.f/sizes2.size());
+
+ std::cout << "avg_result_size2 = " << avg_size2 << std::endl;
+ ASSERT_GT(avg_size2, 5);
+
+
+ //verify results
+ for(std::size_t i = 0; i < data.tests_num; i+=2)
+ {
+ std::vector<int>& results_host = host_search1[i];
+
+ int beg = i/2 * max_answers;
+ int end = beg + sizes3[i/2];
+
+ results_batch.assign(downloaded_buffer3.begin() + beg, downloaded_buffer3.begin() + end);
+
+ std::sort(results_batch.begin(), results_batch.end());
+ std::sort(results_host.begin(), results_host.end());
+
+ if ((int)results_batch.size() == max_answers && results_batch.size() < results_host.size() && max_answers)
+ results_host.resize(max_answers);
+
+ ASSERT_EQ ( ( results_batch == results_host ), true );
+
+ //vector<int>& results_bf = data.bfresutls[i];
+ //ASSERT_EQ ( ( results_bf == results_batch), true );
+ //ASSERT_EQ ( ( results_bf == results_host ), true );
+ }
+
+ float avg_size3 = std::accumulate(sizes3.begin(), sizes3.end(), 0) * (1.f/sizes3.size());
+
+ std::cout << "avg_result_size3 = " << avg_size3 << std::endl;
+ ASSERT_GT(avg_size3, 5);
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
+
#define TYPED_TEST_SUITE TYPED_TEST_CASE
#endif
+/**
+ * \brief Macro choose between TYPED_TEST_CASE_P and TYPED_TEST_SUITE_P depending on the GTest version
+ *
+ * \ingroup test
+ */
+#if !defined(TYPED_TEST_SUITE_P)
+ #define TYPED_TEST_SUITE_P TYPED_TEST_CASE_P
+#endif
+
/**
* \brief Macro choose between INSTANTIATE_TEST_CASE_P and INSTANTIATE_TEST_SUITE_P depending on the GTest version
*
#define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P
#endif
+/**
+ * \brief Macro choose between INSTANTIATE_TYPED_TEST_CASE_P and INSTANTIATE_TYPED_TEST_SUITE_P depending on the GTest version
+ *
+ * \ingroup test
+ */
+#if !defined(INSTANTIATE_TYPED_TEST_SUITE_P)
+ #define INSTANTIATE_TYPED_TEST_SUITE_P INSTANTIATE_TYPED_TEST_CASE_P
+#endif
+
+/**
+ * \brief Macro choose between REGISTER_TYPED_TEST_CASE_P and REGISTER_TYPED_TEST_SUITE_P depending on the GTest version
+ *
+ * \ingroup test
+ */
+#if !defined(REGISTER_TYPED_TEST_SUITE_P)
+ #define REGISTER_TYPED_TEST_SUITE_P REGISTER_TYPED_TEST_CASE_P
+#endif
PCL_ADD_TEST(buffers test_buffers
FILES test_buffers.cpp
LINK_WITH pcl_gtest pcl_common)
+
+PCL_ADD_TEST(io_octree_compression test_octree_compression
+ FILES test_octree_compression.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_io pcl_octree)
#include <pcl/test/gtest.h>
#include <pcl/point_types.h>
-#include <pcl/common/io.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/pcd_grabber.h>
#include <pcl/io/image_grabber.h>
#include <thread>
#include <vector>
-using namespace std;
using namespace std::chrono_literals;
using PointT = pcl::PointXYZRGBA;
using CloudT = pcl::PointCloud<PointT>;
-string tiff_dir_;
-string pclzf_dir_;
-string pcd_dir_;
+std::string tiff_dir_;
+std::string pclzf_dir_;
+std::string pcd_dir_;
std::vector<CloudT::ConstPtr> pcds_;
std::vector<std::string> pcd_files_;
pcl::PCLPointCloud2 blob;
int res = loadPCDFile ("complex_ascii.pcd", blob);
- EXPECT_NE (int (res), -1);
+ EXPECT_NE (res, -1);
EXPECT_EQ (blob.width, 1);
EXPECT_EQ (blob.height, 1);
- EXPECT_EQ (blob.is_dense, true);
+ EXPECT_TRUE (blob.is_dense);
EXPECT_EQ (blob.data.size (), 4 * 33 + 10 * 1 + 4 * 3);
// Check fields
pcl::PCLPointCloud2 blob;
int res = loadPCDFile ("all_types.pcd", blob);
- EXPECT_NE (int (res), -1);
+ EXPECT_NE (res, -1);
EXPECT_EQ (blob.width, 1);
EXPECT_EQ (blob.height, 1);
EXPECT_EQ (blob.data.size (), 1 + 1 * 2 + 2 * 1 + 2 * 2 + 4 * 1 + 4 * 2 + 4 * 1 + 8 * 2);
- EXPECT_EQ (blob.is_dense, true);
+ EXPECT_TRUE (blob.is_dense);
EXPECT_EQ (blob.fields.size (), 8);
// Check fields
// Copy the point cloud data
cloud_c = cloud_a;
cloud_c += cloud_b;
- EXPECT_EQ (cloud_c.points.size (), cloud_a.points.size () + cloud_b.points.size ());
+ EXPECT_EQ (cloud_c.size (), cloud_a.size () + cloud_b.size ());
EXPECT_EQ (cloud_c.width, cloud_a.width + cloud_b.width);
- EXPECT_EQ (int (cloud_c.height), 1);
+ EXPECT_EQ (cloud_c.height, 1);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- EXPECT_FLOAT_EQ (cloud_c.points[i].x, cloud_a.points[i].x);
- EXPECT_FLOAT_EQ (cloud_c.points[i].y, cloud_a.points[i].y);
- EXPECT_FLOAT_EQ (cloud_c.points[i].z, cloud_a.points[i].z);
+ EXPECT_FLOAT_EQ (cloud_c[i].x, cloud_a[i].x);
+ EXPECT_FLOAT_EQ (cloud_c[i].y, cloud_a[i].y);
+ EXPECT_FLOAT_EQ (cloud_c[i].z, cloud_a[i].z);
}
- for (std::size_t i = cloud_a.points.size (); i < cloud_c.points.size (); ++i)
+ for (std::size_t i = cloud_a.size (); i < cloud_c.size (); ++i)
{
- EXPECT_FLOAT_EQ (cloud_c.points[i].x, cloud_b.points[i - cloud_a.points.size ()].x);
- EXPECT_FLOAT_EQ (cloud_c.points[i].y, cloud_b.points[i - cloud_a.points.size ()].y);
- EXPECT_FLOAT_EQ (cloud_c.points[i].z, cloud_b.points[i - cloud_a.points.size ()].z);
+ EXPECT_FLOAT_EQ (cloud_c[i].x, cloud_b[i - cloud_a.size ()].x);
+ EXPECT_FLOAT_EQ (cloud_c[i].y, cloud_b[i - cloud_a.size ()].y);
+ EXPECT_FLOAT_EQ (cloud_c[i].z, cloud_b[i - cloud_a.size ()].z);
}
}
cloud_a.points.resize (cloud_a.width * cloud_a.height);
cloud_b.points.resize (cloud_b.width * cloud_b.height);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (auto& point: cloud_a)
{
- cloud_a[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud_a[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud_a[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ point.x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ point.y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ point.z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
}
- for (std::size_t i = 0; i < cloud_b.points.size (); ++i)
+ for (auto& point: cloud_b)
{
- cloud_b[i].normal_x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud_b[i].normal_y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud_b[i].normal_z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ point.normal_x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ point.normal_y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ point.normal_z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
}
pcl::concatenateFields (cloud_a, cloud_b, cloud_c);
- EXPECT_EQ (cloud_c.points.size (), cloud_a.points.size ());
+ EXPECT_EQ (cloud_c.size (), cloud_a.size ());
EXPECT_EQ (cloud_c.width, cloud_a.width);
EXPECT_EQ (cloud_c.height, cloud_a.height);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- EXPECT_FLOAT_EQ (cloud_c.points[i].x, cloud_a.points[i].x);
- EXPECT_FLOAT_EQ (cloud_c.points[i].y, cloud_a.points[i].y);
- EXPECT_FLOAT_EQ (cloud_c.points[i].z, cloud_a.points[i].z);
- EXPECT_FLOAT_EQ (cloud_c.points[i].normal[0], cloud_b.points[i].normal[0]);
- EXPECT_FLOAT_EQ (cloud_c.points[i].normal[1], cloud_b.points[i].normal[1]);
- EXPECT_FLOAT_EQ (cloud_c.points[i].normal[2], cloud_b.points[i].normal[2]);
+ EXPECT_FLOAT_EQ (cloud_c[i].x, cloud_a[i].x);
+ EXPECT_FLOAT_EQ (cloud_c[i].y, cloud_a[i].y);
+ EXPECT_FLOAT_EQ (cloud_c[i].z, cloud_a[i].z);
+ EXPECT_FLOAT_EQ (cloud_c[i].normal[0], cloud_b[i].normal[0]);
+ EXPECT_FLOAT_EQ (cloud_c[i].normal[1], cloud_b[i].normal[1]);
+ EXPECT_FLOAT_EQ (cloud_c[i].normal[2], cloud_b[i].normal[2]);
}
}
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
- std::size_t nr_p = cloud.points.size ();
+ const auto nr_p = cloud.size ();
// Randomly create a new point cloud
for (std::size_t i = 0; i < nr_p; ++i)
{
cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].intensity = static_cast<float> (i);
+ cloud[i].intensity = static_cast<float> (i);
}
PointXYZI first, last;
- first.x = cloud.points[0].x; first.y = cloud.points[0].y; first.z = cloud.points[0].z; first.intensity = cloud.points[0].intensity;
- last.x = cloud.points[nr_p - 1].x; last.y = cloud.points[nr_p - 1].y; last.z = cloud.points[nr_p - 1].z; last.intensity = cloud.points[nr_p - 1].intensity;
+ first.x = cloud[0].x; first.y = cloud[0].y; first.z = cloud[0].z; first.intensity = cloud[0].intensity;
+ last.x = cloud[nr_p - 1].x; last.y = cloud[nr_p - 1].y; last.z = cloud[nr_p - 1].z; last.intensity = cloud[nr_p - 1].intensity;
// Tests for PointCloud::operator()
EXPECT_FLOAT_EQ (first.x, cloud (0, 0).x);
EXPECT_EQ (fields.size (), std::size_t (4));
int x_idx = pcl::getFieldIndex<PointXYZI> ("x", fields);
EXPECT_EQ (x_idx, 0);
- EXPECT_EQ (fields[x_idx].offset, std::uint32_t (0));
+ EXPECT_EQ (fields[x_idx].offset, 0);
EXPECT_EQ (fields[x_idx].name, "x");
EXPECT_EQ (fields[x_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (fields[x_idx].count, std::uint32_t (1));
+ EXPECT_EQ (fields[x_idx].count, 1);
int y_idx = pcl::getFieldIndex<PointXYZI> ("y", fields);
EXPECT_EQ (y_idx, 1);
- EXPECT_EQ (fields[y_idx].offset, std::uint32_t (4));
+ EXPECT_EQ (fields[y_idx].offset, 4);
EXPECT_EQ (fields[y_idx].name, "y");
EXPECT_EQ (fields[y_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (fields[y_idx].count, std::uint32_t (1));
+ EXPECT_EQ (fields[y_idx].count, 1);
int z_idx = pcl::getFieldIndex<PointXYZI> ("z", fields);
EXPECT_EQ (z_idx, 2);
- EXPECT_EQ (fields[z_idx].offset, std::uint32_t (8));
+ EXPECT_EQ (fields[z_idx].offset, 8);
EXPECT_EQ (fields[z_idx].name, "z");
EXPECT_EQ (fields[z_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (fields[z_idx].count, std::uint32_t (1));
+ EXPECT_EQ (fields[z_idx].count, 1);
int intensity_idx = pcl::getFieldIndex<PointXYZI> ("intensity", fields);
EXPECT_EQ (intensity_idx, 3);
- EXPECT_EQ (fields[intensity_idx].offset, std::uint32_t (16)); // NOTE: intensity_idx.offset should be 12, but we are padding in PointXYZ (!)
+ EXPECT_EQ (fields[intensity_idx].offset, 16); // NOTE: intensity_idx.offset should be 12, but we are padding in PointXYZ (!)
EXPECT_EQ (fields[intensity_idx].name, "intensity");
EXPECT_EQ (fields[intensity_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (fields[intensity_idx].count, std::uint32_t (1));
+ EXPECT_EQ (fields[intensity_idx].count, 1);
// Convert from data type to blob
toPCLPointCloud2 (cloud, cloud_blob);
// Test getFieldIndex
x_idx = pcl::getFieldIndex (cloud_blob, "x");
EXPECT_EQ (x_idx, 0);
- EXPECT_EQ (cloud_blob.fields[x_idx].offset, std::uint32_t (0));
+ EXPECT_EQ (cloud_blob.fields[x_idx].offset, 0);
EXPECT_EQ (cloud_blob.fields[x_idx].name, "x");
EXPECT_EQ (cloud_blob.fields[x_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (cloud_blob.fields[x_idx].count, std::uint32_t (1));
+ EXPECT_EQ (cloud_blob.fields[x_idx].count, 1);
y_idx = pcl::getFieldIndex (cloud_blob, "y");
EXPECT_EQ (y_idx, 1);
- EXPECT_EQ (cloud_blob.fields[y_idx].offset, std::uint32_t (4));
+ EXPECT_EQ (cloud_blob.fields[y_idx].offset, 4);
EXPECT_EQ (cloud_blob.fields[y_idx].name, "y");
EXPECT_EQ (cloud_blob.fields[y_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (cloud_blob.fields[y_idx].count, std::uint32_t (1));
+ EXPECT_EQ (cloud_blob.fields[y_idx].count, 1);
z_idx = pcl::getFieldIndex (cloud_blob, "z");
EXPECT_EQ (z_idx, 2);
- EXPECT_EQ (cloud_blob.fields[z_idx].offset, std::uint32_t (8));
+ EXPECT_EQ (cloud_blob.fields[z_idx].offset, 8);
EXPECT_EQ (cloud_blob.fields[z_idx].name, "z");
EXPECT_EQ (cloud_blob.fields[z_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (cloud_blob.fields[z_idx].count, std::uint32_t (1));
+ EXPECT_EQ (cloud_blob.fields[z_idx].count, 1);
intensity_idx = pcl::getFieldIndex (cloud_blob, "intensity");
EXPECT_EQ (intensity_idx, 3);
//EXPECT_EQ (cloud_blob.fields[intensity_idx].offset, (std::uint32_t)12); // NOTE: the fields.offset is 16 in PointCloud<PointXYZI>, but we are obtaining the correct offset in toPCLPointCloud2
- EXPECT_EQ (cloud_blob.fields[intensity_idx].offset, std::uint32_t (16)); // NOTE: the fields.offset is 16 in PointCloud<PointXYZI>, but we are obtaining the correct offset in toPCLPointCloud2
+ EXPECT_EQ (cloud_blob.fields[intensity_idx].offset, 16); // NOTE: the fields.offset is 16 in PointCloud<PointXYZI>, but we are obtaining the correct offset in toPCLPointCloud2
EXPECT_EQ (cloud_blob.fields[intensity_idx].name, "intensity");
EXPECT_EQ (cloud_blob.fields[intensity_idx].datatype, pcl::PCLPointField::FLOAT32);
- EXPECT_EQ (cloud_blob.fields[intensity_idx].count, std::uint32_t (1));
+ EXPECT_EQ (cloud_blob.fields[intensity_idx].count, 1);
fromPCLPointCloud2 (cloud_blob, cloud);
for (std::size_t i = 0; i < nr_p; ++i)
- EXPECT_EQ (cloud.points[i].intensity, i);
+ EXPECT_EQ (cloud[i].intensity, i);
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width); // test for toPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), cloud.height); // test for toPCLPointCloud2 ()
+ EXPECT_EQ (cloud_blob.width, cloud.width); // test for toPCLPointCloud2 ()
+ EXPECT_EQ (cloud_blob.height, cloud.height); // test for toPCLPointCloud2 ()
EXPECT_EQ (bool (cloud_blob.is_dense), cloud.is_dense); // test for toPCLPointCloud2 ()
//EXPECT_EQ ((std::size_t)cloud_blob.data.size () * 2, // PointXYZI is 16*2 (XYZ+1, Intensity+3)
// cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for toPCLPointCloud2 ()
// Make sure we have permissions to write there
PCDWriter w;
int res = w.writeASCII ("test_pcl_io.pcd", cloud_blob, Eigen::Vector4f::Zero (), Eigen::Quaternionf::Identity (), 10);
- EXPECT_EQ (int (res), 0); // test for savePCDFileASCII ()
+ EXPECT_EQ (res, 0); // test for savePCDFileASCII ()
// Please make sure that this file exists, otherwise the test will fail.
res = loadPCDFile ("test_pcl_io.pcd", cloud_blob);
- EXPECT_NE (int (res), -1); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), cloud.height); // test for loadPCDFile ()
+ EXPECT_NE (res, -1); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.width, cloud.width); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.height, cloud.height); // test for loadPCDFile ()
EXPECT_EQ (bool (cloud_blob.is_dense), cloud.is_dense); // test for loadPCDFile ()
EXPECT_EQ (std::size_t (cloud_blob.data.size () * 2), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for loadPCDFile ()
// Convert from blob to data type
fromPCLPointCloud2 (cloud_blob, cloud);
- EXPECT_EQ (std::uint32_t (cloud.width), cloud_blob.width); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud.height), cloud_blob.height); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (int (cloud.is_dense), cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::size_t (cloud.points.size ()), nr_p); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.width, cloud_blob.width); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.height, cloud_blob.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), nr_p); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].x, first.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].x, first.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].y, first.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].z, first.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
// Make sure we have permissions to write there
res = savePCDFile ("test_pcl_io.pcd", cloud_blob, Eigen::Vector4f::Zero (), Eigen::Quaternionf::Identity (), true);
- EXPECT_EQ (int (res), 0); // test for savePCDFileBinary ()
+ EXPECT_EQ (res, 0); // test for savePCDFileBinary ()
// Please make sure that this file exists, otherwise the test will fail.
res = loadPCDFile ("test_pcl_io.pcd", cloud_blob);
- EXPECT_NE (int (res), -1); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), cloud.height); // test for loadPCDFile ()
+ EXPECT_NE (res, -1); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.width, cloud.width); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.height, cloud.height); // test for loadPCDFile ()
EXPECT_EQ (bool (cloud_blob.is_dense), cloud.is_dense);
EXPECT_EQ (std::size_t (cloud_blob.data.size () * 2), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for loadPCDFile ()
// Convert from blob to data type
fromPCLPointCloud2 (cloud_blob, cloud);
- EXPECT_EQ (std::uint32_t (cloud.width), cloud_blob.width); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud.height), cloud_blob.height); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (int (cloud.is_dense), cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::size_t (cloud.points.size ()), nr_p); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.width, cloud_blob.width); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.height, cloud_blob.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), nr_p); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].x, first.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].x, first.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].y, first.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].z, first.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
// Save as ASCII
try
std::cerr << e.detailedMessage () << std::endl;
}
res = loadPCDFile ("test_pcl_io_binary.pcd", cloud_blob);
- EXPECT_NE (int (res), -1); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), cloud.height); // test for loadPCDFile ()
+ EXPECT_NE (res, -1); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.width, cloud.width); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.height, cloud.height); // test for loadPCDFile ()
EXPECT_EQ (bool (cloud_blob.is_dense), cloud.is_dense);
EXPECT_EQ (std::size_t (cloud_blob.data.size () * 2), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for loadPCDFile ()
// Convert from blob to data type
fromPCLPointCloud2 (cloud_blob, cloud);
- EXPECT_EQ (std::uint32_t (cloud.width), cloud_blob.width); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud.height), cloud_blob.height); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (int (cloud.is_dense), cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::size_t (cloud.points.size ()), nr_p); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.width, cloud_blob.width); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.height, cloud_blob.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), nr_p); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].x, first.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].x, first.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].y, first.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].z, first.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
// Save as ASCII
try
std::cerr << e.detailedMessage () << std::endl;
}
res = loadPCDFile ("test_pcl_io_ascii.pcd", cloud_blob);
- EXPECT_NE (int (res), -1); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), cloud.height); // test for loadPCDFile ()
- EXPECT_EQ (bool (cloud_blob.is_dense), true);
+ EXPECT_NE (res, -1); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.width, cloud.width); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.height, cloud.height); // test for loadPCDFile ()
+ EXPECT_TRUE (cloud_blob.is_dense);
EXPECT_EQ (std::size_t (cloud_blob.data.size () * 2), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for loadPCDFile ()
// Convert from blob to data type
fromPCLPointCloud2 (cloud_blob, cloud);
- EXPECT_EQ (std::uint32_t (cloud.width), cloud_blob.width); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud.height), cloud_blob.height); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (int (cloud.is_dense), cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::size_t (cloud.points.size ()), nr_p); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.width, cloud_blob.width); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.height, cloud_blob.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), nr_p); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].x, first.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].x, first.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].y, first.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].z, first.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (float (cloud.points[nr_p - 1].intensity), float (last.intensity)); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (float (cloud[nr_p - 1].intensity), float (last.intensity)); // test for fromPCLPointCloud2 ()
std::vector<int> indices (cloud.width * cloud.height / 2);
for (int i = 0; i < static_cast<int> (indices.size ()); ++i) indices[i] = i;
std::cerr << e.detailedMessage () << std::endl;
}
res = loadPCDFile ("test_pcl_io_binary.pcd", cloud_blob);
- EXPECT_NE (int (res), -1); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width * cloud.height / 2); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), 1); // test for loadPCDFile ()
+ EXPECT_NE (res, -1); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.width, cloud.width * cloud.height / 2); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.height, 1); // test for loadPCDFile ()
EXPECT_EQ (bool (cloud_blob.is_dense), cloud.is_dense);
EXPECT_EQ (std::size_t (cloud_blob.data.size () * 2), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for loadPCDFile ()
// Convert from blob to data type
fromPCLPointCloud2 (cloud_blob, cloud);
- EXPECT_EQ (std::uint32_t (cloud.width), cloud_blob.width); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud.height), cloud_blob.height); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (int (cloud.is_dense), cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::size_t (cloud.points.size ()), nr_p / 2); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.width, cloud_blob.width); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.height, cloud_blob.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), nr_p / 2); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].x, first.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (float (cloud.points[0].intensity), float (first.intensity)); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].x, first.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].y, first.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].z, first.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (float (cloud[0].intensity), float (first.intensity)); // test for fromPCLPointCloud2 ()
indices.resize (cloud.width * cloud.height / 2);
for (int i = 0; i < static_cast<int> (indices.size ()); ++i) indices[i] = i;
std::cerr << e.detailedMessage () << std::endl;
}
res = loadPCDFile ("test_pcl_io_ascii.pcd", cloud_blob);
- EXPECT_NE (int (res), -1); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width * cloud.height / 2); // test for loadPCDFile ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), 1); // test for loadPCDFile ()
- EXPECT_EQ (bool (cloud_blob.is_dense), true);
+ EXPECT_NE (res, -1); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.width, cloud.width * cloud.height / 2); // test for loadPCDFile ()
+ EXPECT_EQ (cloud_blob.height, 1); // test for loadPCDFile ()
+ EXPECT_TRUE (cloud_blob.is_dense);
EXPECT_EQ (std::size_t (cloud_blob.data.size () * 2), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for loadPCDFile ()
// Convert from blob to data type
fromPCLPointCloud2 (cloud_blob, cloud);
- EXPECT_EQ (std::uint32_t (cloud.width), cloud_blob.width); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud.height), cloud_blob.height); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (int (cloud.is_dense), cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::size_t (cloud.points.size ()), nr_p / 4); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.width, cloud_blob.width); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.height, cloud_blob.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), nr_p / 4); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].x, first.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].x, first.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].y, first.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].z, first.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
remove ("test_pcl_io_ascii.pcd");
remove ("test_pcl_io_binary.pcd");
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
- std::size_t nr_p = cloud.points.size ();
+ const auto nr_p = cloud.size ();
// Randomly create a new point cloud
for (std::size_t i = 0; i < nr_p; ++i)
{
- cloud.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].intensity = static_cast<float> (i);
+ cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].intensity = static_cast<float> (i);
}
PointXYZI first, last;
- first.x = cloud.points[0].x; first.y = cloud.points[0].y; first.z = cloud.points[0].z; first.intensity = cloud.points[0].intensity;
- last.x = cloud.points[nr_p - 1].x; last.y = cloud.points[nr_p - 1].y; last.z = cloud.points[nr_p - 1].z; last.intensity = cloud.points[nr_p - 1].intensity;
+ first.x = cloud[0].x; first.y = cloud[0].y; first.z = cloud[0].z; first.intensity = cloud[0].intensity;
+ last.x = cloud[nr_p - 1].x; last.y = cloud[nr_p - 1].y; last.z = cloud[nr_p - 1].z; last.intensity = cloud[nr_p - 1].intensity;
// Convert from data type to blob
toPCLPointCloud2 (cloud, cloud_blob);
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width); // test for toPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud_blob.height), cloud.height); // test for toPCLPointCloud2 ()
+ EXPECT_EQ (cloud_blob.width, cloud.width); // test for toPCLPointCloud2 ()
+ EXPECT_EQ (cloud_blob.height, cloud.height); // test for toPCLPointCloud2 ()
EXPECT_EQ (bool (cloud_blob.is_dense), cloud.is_dense); // test for toPCLPointCloud2 ()
//EXPECT_EQ ((std::size_t)cloud_blob.data.size () * 2, // PointXYZI is 16*2 (XYZ+1, Intensity+3)
// cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for toPCLPointCloud2 ()
PCDReader reader;
reader.read ("test_pcl_io.pcd", cloud_blob);
- EXPECT_EQ (std::uint32_t (cloud_blob.width), cloud.width);
- EXPECT_EQ (std::uint32_t (cloud_blob.height), cloud.height);
+ EXPECT_EQ (cloud_blob.width, cloud.width);
+ EXPECT_EQ (cloud_blob.height, cloud.height);
EXPECT_EQ (bool (cloud_blob.is_dense), cloud.is_dense);
//EXPECT_EQ ((std::size_t)cloud_blob.data.size () * 2, // PointXYZI is 16*2 (XYZ+1, Intensity+3)
// cloud_blob.width * cloud_blob.height * sizeof (PointXYZI)); // test for loadPCDFile ()
// Convert from blob to data type
fromPCLPointCloud2 (cloud_blob, cloud);
- EXPECT_EQ (std::uint32_t (cloud.width), cloud_blob.width); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::uint32_t (cloud.height), cloud_blob.height); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (int (cloud.is_dense), cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (std::size_t (cloud.points.size ()), nr_p); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.width, cloud_blob.width); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.height, cloud_blob.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud_blob.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), nr_p); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].x, first.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].x, first.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].y, first.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].z, first.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].x, last.x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
remove ("test_pcl_io.pcd");
}
cloud.push_back (p);
}
cloud.height = 1;
- cloud.width = std::uint32_t (cloud.size ());
+ cloud.width = cloud.size ();
cloud.is_dense = true;
io::savePCDFile ("temp_binary_color.pcd", cloud, true);
afile<< std::setprecision(10);
srand (static_cast<unsigned int> (time (nullptr)));
- std::size_t nr_p = cloud.points.size ();
+ const auto nr_p = cloud.size ();
// Randomly create a new point cloud
for (std::size_t i = 0; i < nr_p; ++i)
{
- cloud.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].intensity = static_cast<float> (i);
- afile << cloud.points[i].x << " , " << cloud.points[i].y << " , " << cloud.points[i].z << " , " << cloud.points[i].intensity << " \n";
+ cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].intensity = static_cast<float> (i);
+ afile << cloud[i].x << " , " << cloud[i].y << " , " << cloud[i].z << " , " << cloud[i].intensity << " \n";
}
afile.close();
reader.setInputFields<pcl::PointXYZI> ();
EXPECT_GE(reader.read("test_pcd.txt", rcloud), 0);
- EXPECT_EQ(cloud.points.size(), rcloud.points.size() );
+ EXPECT_EQ(cloud.size(), rcloud.size() );
- for(std::size_t i=0;i < rcloud.points.size(); i++){
- EXPECT_FLOAT_EQ(cloud.points[i].x, rcloud.points[i].x);
- EXPECT_FLOAT_EQ(cloud.points[i].y,rcloud.points[i].y);
- EXPECT_FLOAT_EQ(cloud.points[i].z, rcloud.points[i].z);
- EXPECT_FLOAT_EQ(cloud.points[i].intensity, rcloud.points[i].intensity);
+ for(std::size_t i=0;i < rcloud.size(); i++){
+ EXPECT_FLOAT_EQ(cloud[i].x, rcloud[i].x);
+ EXPECT_FLOAT_EQ(cloud[i].y,rcloud[i].y);
+ EXPECT_FLOAT_EQ(cloud[i].z, rcloud[i].z);
+ EXPECT_FLOAT_EQ(cloud[i].intensity, rcloud[i].intensity);
}
remove ("test_pcd.txt");
pcl::PCLPointCloud2 blob;
pcl::OBJReader objreader = pcl::OBJReader();
int res = objreader.read ("test_obj.obj", blob);
- EXPECT_NE (int (res), -1);
+ EXPECT_NE (res, -1);
EXPECT_EQ (blob.width, 8);
EXPECT_EQ (blob.height, 1);
- EXPECT_EQ (blob.is_dense, true);
+ EXPECT_TRUE (blob.is_dense);
EXPECT_EQ (blob.data.size (), 8 * 6 * 4); // 8 verts, 6 values per vertex (vx,vy,vz,vnx,vny,vnz), 4 byte per value
// Check fields
cloud.width = 2; cloud.height = 1;
cloud.points.resize (2);
- cloud.points[0].x = cloud.points[0].y = cloud.points[0].z = 1;
- cloud.points[1].x = cloud.points[1].y = cloud.points[1].z = 2;
+ cloud[0].x = cloud[0].y = cloud[0].z = 1;
+ cloud[1].x = cloud[1].y = cloud[1].z = 2;
for (int i = 0; i < 33; ++i)
{
- cloud.points[0].histogram[i] = static_cast<float> (i);
- cloud.points[1].histogram[i] = 33.0f - static_cast<float> (i);
+ cloud[0].histogram[i] = static_cast<float> (i);
+ cloud[1].histogram[i] = 33.0f - static_cast<float> (i);
}
savePCDFile ("v.pcd", cloud);
cloud.points.clear ();
loadPCDFile ("v.pcd", cloud);
- EXPECT_EQ (int (cloud.width), 2);
- EXPECT_EQ (int (cloud.height), 1);
- EXPECT_EQ (cloud.is_dense, true);
- EXPECT_EQ (int (cloud.points.size ()), 2);
+ EXPECT_EQ (cloud.width, 2);
+ EXPECT_EQ (cloud.height, 1);
+ EXPECT_TRUE (cloud.is_dense);
+ EXPECT_EQ (cloud.size (), 2);
- EXPECT_EQ (cloud.points[0].x, 1); EXPECT_EQ (cloud.points[0].y, 1); EXPECT_EQ (cloud.points[0].z, 1);
- EXPECT_EQ (cloud.points[1].x, 2); EXPECT_EQ (cloud.points[1].y, 2); EXPECT_EQ (cloud.points[1].z, 2);
+ EXPECT_EQ (cloud[0].x, 1); EXPECT_EQ (cloud[0].y, 1); EXPECT_EQ (cloud[0].z, 1);
+ EXPECT_EQ (cloud[1].x, 2); EXPECT_EQ (cloud[1].y, 2); EXPECT_EQ (cloud[1].z, 2);
for (int i = 0; i < 33; ++i)
{
- ASSERT_EQ (cloud.points[0].histogram[i], i);
- ASSERT_EQ (cloud.points[1].histogram[i], 33-i);
+ ASSERT_EQ (cloud[0].histogram[i], i);
+ ASSERT_EQ (cloud[1].histogram[i], 33-i);
}
remove ("v.pcd");
PointCloud<PointXYZ> cloud;
cloud.points.resize (5);
- for (int i = 0; i < int (cloud.points.size ()); ++i)
- cloud.points[i].x = cloud.points[i].y = cloud.points[i].z = static_cast<float> (i);
+ for (std::size_t i = 0; i < cloud.size (); ++i)
+ cloud[i].x = cloud[i].y = cloud[i].z = static_cast<float> (i);
pcl::PCLPointCloud2 blob;
toPCLPointCloud2 (cloud, blob);
Eigen::MatrixXf mat;
getPointCloudAsEigen (blob, mat);
- EXPECT_EQ (mat.cols (), int (cloud.points.size ()));
+ EXPECT_EQ (mat.cols (), static_cast<Eigen::Index>(cloud.size ()));
EXPECT_EQ (mat.rows (), 4);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- EXPECT_EQ (mat (0, i), cloud.points[i].x);
- EXPECT_EQ (mat (1, i), cloud.points[i].y);
- EXPECT_EQ (mat (2, i), cloud.points[i].z);
+ EXPECT_EQ (mat (0, i), cloud[i].x);
+ EXPECT_EQ (mat (1, i), cloud[i].y);
+ EXPECT_EQ (mat (2, i), cloud[i].z);
EXPECT_EQ (mat (3, i), 1);
}
getEigenAsPointCloud (mat, blob);
fromPCLPointCloud2 (blob, cloud);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- EXPECT_EQ (cloud.points[i].x, i);
- EXPECT_EQ (cloud.points[i].y, i);
- EXPECT_EQ (cloud.points[i].z, i);
+ EXPECT_EQ (cloud[i].x, i);
+ EXPECT_EQ (cloud[i].y, i);
+ EXPECT_EQ (cloud[i].z, i);
}
getPointCloudAsEigen (blob, mat);
- EXPECT_EQ (mat.cols (), int (cloud.points.size ()));
+ EXPECT_EQ (mat.cols (), static_cast<Eigen::Index>(cloud.size ()));
EXPECT_EQ (mat.rows (), 4);
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- EXPECT_EQ (mat (0, i), cloud.points[i].x);
- EXPECT_EQ (mat (1, i), cloud.points[i].y);
- EXPECT_EQ (mat (2, i), cloud.points[i].z);
+ EXPECT_EQ (mat (0, i), cloud[i].x);
+ EXPECT_EQ (mat (1, i), cloud[i].y);
+ EXPECT_EQ (mat (2, i), cloud[i].z);
EXPECT_EQ (mat (3, i), 1);
}
}
cloud_a.points.resize (cloud_a.width * cloud_a.height);
cloud_b.points.resize (cloud_b.width * cloud_b.height);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- cloud_a.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud_a.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud_a.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud_b.points[i].rgba = 255;
+ cloud_a[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud_a[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud_a[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud_b[i].rgba = 255;
}
pcl::copyPointCloud (cloud_a, cloud_b);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- EXPECT_EQ (cloud_b.points[i].x, cloud_a.points[i].x);
- EXPECT_EQ (cloud_b.points[i].y, cloud_a.points[i].y);
- EXPECT_EQ (cloud_b.points[i].z, cloud_a.points[i].z);
- EXPECT_EQ (cloud_b.points[i].rgba, 255);
- cloud_a.points[i].x = cloud_a.points[i].y = cloud_a.points[i].z = 0;
+ EXPECT_EQ (cloud_b[i].x, cloud_a[i].x);
+ EXPECT_EQ (cloud_b[i].y, cloud_a[i].y);
+ EXPECT_EQ (cloud_b[i].z, cloud_a[i].z);
+ EXPECT_EQ (cloud_b[i].rgba, 255);
+ cloud_a[i].x = cloud_a[i].y = cloud_a[i].z = 0;
}
pcl::copyPointCloud (cloud_b, cloud_a);
- for (std::size_t i = 0; i < cloud_a.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_a.size (); ++i)
{
- EXPECT_EQ (cloud_b.points[i].x, cloud_a.points[i].x);
- EXPECT_EQ (cloud_b.points[i].y, cloud_a.points[i].y);
- EXPECT_EQ (cloud_b.points[i].z, cloud_a.points[i].z);
- EXPECT_EQ (cloud_b.points[i].rgba, 255);
+ EXPECT_EQ (cloud_b[i].x, cloud_a[i].x);
+ EXPECT_EQ (cloud_b[i].y, cloud_a[i].y);
+ EXPECT_EQ (cloud_b[i].z, cloud_a[i].z);
+ EXPECT_EQ (cloud_b[i].rgba, 255);
}
}
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
- std::size_t nr_p = cloud.points.size ();
+ const auto nr_p = cloud.size ();
// Randomly create a new point cloud
for (std::size_t i = 0; i < nr_p; ++i)
{
- cloud.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
}
PCDWriter writer;
int res = writer.writeBinaryCompressed<PointXYZ> ("test_pcl_io_compressed.pcd", cloud);
EXPECT_EQ (cloud2.width, cloud.width);
EXPECT_EQ (cloud2.height, cloud.height);
EXPECT_EQ (cloud2.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud2.points.size (), cloud.points.size ());
+ EXPECT_EQ (cloud2.size (), cloud.size ());
- for (std::size_t i = 0; i < cloud2.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud2.size (); ++i)
{
- ASSERT_EQ (cloud2.points[i].x, cloud.points[i].x);
- ASSERT_EQ (cloud2.points[i].y, cloud.points[i].y);
- ASSERT_EQ (cloud2.points[i].z, cloud.points[i].z);
+ ASSERT_EQ (cloud2[i].x, cloud[i].x);
+ ASSERT_EQ (cloud2[i].y, cloud[i].y);
+ ASSERT_EQ (cloud2[i].z, cloud[i].z);
}
pcl::PCLPointCloud2 blob;
EXPECT_EQ (cloud2.width, blob.width);
EXPECT_EQ (cloud2.height, blob.height);
EXPECT_EQ (cloud2.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud2.points.size (), cloud.points.size ());
+ EXPECT_EQ (cloud2.size (), cloud.size ());
- for (std::size_t i = 0; i < cloud2.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud2.size (); ++i)
{
- EXPECT_EQ (cloud2.points[i].x, cloud.points[i].x);
- EXPECT_EQ (cloud2.points[i].y, cloud.points[i].y);
- EXPECT_EQ (cloud2.points[i].z, cloud.points[i].z);
+ EXPECT_EQ (cloud2[i].x, cloud[i].x);
+ EXPECT_EQ (cloud2[i].y, cloud[i].y);
+ EXPECT_EQ (cloud2[i].z, cloud[i].z);
}
}
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
- std::size_t nr_p = cloud.points.size ();
+ const auto nr_p = cloud.size ();
// Randomly create a new point cloud
for (std::size_t i = 0; i < nr_p; ++i)
{
- cloud.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].normal_x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].normal_y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].normal_z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].rgb = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].normal_x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].normal_y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].normal_z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].rgb = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
}
pcl::PCLPointCloud2 blob;
EXPECT_EQ (cloud2.width, blob.width);
EXPECT_EQ (cloud2.height, blob.height);
EXPECT_EQ (cloud2.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud2.points.size (), cloud.points.size ());
+ EXPECT_EQ (cloud2.size (), cloud.size ());
- for (std::size_t i = 0; i < cloud2.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud2.size (); ++i)
{
- EXPECT_EQ (cloud2.points[i].x, cloud.points[i].x);
- EXPECT_EQ (cloud2.points[i].y, cloud.points[i].y);
- EXPECT_EQ (cloud2.points[i].z, cloud.points[i].z);
- EXPECT_EQ (cloud2.points[i].normal_x, cloud.points[i].normal_x);
- EXPECT_EQ (cloud2.points[i].normal_y, cloud.points[i].normal_y);
- EXPECT_EQ (cloud2.points[i].normal_z, cloud.points[i].normal_z);
- EXPECT_EQ (cloud2.points[i].rgb, cloud.points[i].rgb);
+ EXPECT_EQ (cloud2[i].x, cloud[i].x);
+ EXPECT_EQ (cloud2[i].y, cloud[i].y);
+ EXPECT_EQ (cloud2[i].z, cloud[i].z);
+ EXPECT_EQ (cloud2[i].normal_x, cloud[i].normal_x);
+ EXPECT_EQ (cloud2[i].normal_y, cloud[i].normal_y);
+ EXPECT_EQ (cloud2[i].normal_z, cloud[i].normal_z);
+ EXPECT_EQ (cloud2[i].rgb, cloud[i].rgb);
}
remove ("test_pcl_io_compressed.pcd");
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
- std::size_t nr_p = cloud.points.size ();
+ const auto nr_p = cloud.size ();
// Randomly create a new point cloud
for (std::size_t i = 0; i < nr_p; ++i)
{
- cloud.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].normal_x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].normal_y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].normal_z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].rgb = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].normal_x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].normal_y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].normal_z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].rgb = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
}
pcl::PCLPointCloud2 blob;
EXPECT_EQ (cloud2.width, blob.width);
EXPECT_EQ (cloud2.height, blob.height);
EXPECT_EQ (cloud2.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud2.points.size (), cloud.points.size ());
+ EXPECT_EQ (cloud2.size (), cloud.size ());
- for (std::size_t i = 0; i < cloud2.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud2.size (); ++i)
{
- EXPECT_EQ (cloud2.points[i].x, cloud.points[i].x);
- EXPECT_EQ (cloud2.points[i].y, cloud.points[i].y);
- EXPECT_EQ (cloud2.points[i].z, cloud.points[i].z);
- EXPECT_EQ (cloud2.points[i].normal_x, cloud.points[i].normal_x);
- EXPECT_EQ (cloud2.points[i].normal_y, cloud.points[i].normal_y);
- EXPECT_EQ (cloud2.points[i].normal_z, cloud.points[i].normal_z);
- EXPECT_EQ (cloud2.points[i].rgb, cloud.points[i].rgb);
+ EXPECT_EQ (cloud2[i].x, cloud[i].x);
+ EXPECT_EQ (cloud2[i].y, cloud[i].y);
+ EXPECT_EQ (cloud2[i].z, cloud[i].z);
+ EXPECT_EQ (cloud2[i].normal_x, cloud[i].normal_x);
+ EXPECT_EQ (cloud2[i].normal_y, cloud[i].normal_y);
+ EXPECT_EQ (cloud2[i].normal_z, cloud[i].normal_z);
+ EXPECT_EQ (cloud2[i].rgb, cloud[i].rgb);
}
}
cloud.is_dense = true;
srand (static_cast<unsigned int> (time (nullptr)));
- std::size_t nr_p = cloud.points.size ();
+ const auto nr_p = cloud.size ();
// Randomly create a new point cloud
- cloud.points[0].x = std::numeric_limits<float>::quiet_NaN ();
- cloud.points[0].y = std::numeric_limits<float>::quiet_NaN ();
- cloud.points[0].z = std::numeric_limits<float>::quiet_NaN ();
+ cloud[0].x = std::numeric_limits<float>::quiet_NaN ();
+ cloud[0].y = std::numeric_limits<float>::quiet_NaN ();
+ cloud[0].z = std::numeric_limits<float>::quiet_NaN ();
for (std::size_t i = 1; i < nr_p; ++i)
{
- cloud.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
}
PCDWriter writer;
try
EXPECT_EQ (cloud2.width, cloud.width);
EXPECT_EQ (cloud2.height, cloud.height);
- EXPECT_EQ (cloud2.is_dense, false);
- EXPECT_EQ (cloud2.points.size (), cloud.points.size ());
+ EXPECT_FALSE (cloud2.is_dense);
+ EXPECT_EQ (cloud2.size (), cloud.size ());
- EXPECT_TRUE (std::isnan(cloud2.points[0].x));
- EXPECT_TRUE (std::isnan(cloud2.points[0].y));
- EXPECT_TRUE (std::isnan(cloud2.points[0].z));
- for (std::size_t i = 1; i < cloud2.points.size (); ++i)
+ EXPECT_TRUE (std::isnan(cloud2[0].x));
+ EXPECT_TRUE (std::isnan(cloud2[0].y));
+ EXPECT_TRUE (std::isnan(cloud2[0].z));
+ for (std::size_t i = 1; i < cloud2.size (); ++i)
{
- ASSERT_FLOAT_EQ (cloud2.points[i].x, cloud.points[i].x);
- ASSERT_FLOAT_EQ (cloud2.points[i].y, cloud.points[i].y);
- ASSERT_FLOAT_EQ (cloud2.points[i].z, cloud.points[i].z);
+ ASSERT_FLOAT_EQ (cloud2[i].x, cloud[i].x);
+ ASSERT_FLOAT_EQ (cloud2[i].y, cloud[i].y);
+ ASSERT_FLOAT_EQ (cloud2[i].z, cloud[i].z);
}
}
catch (const std::exception&)
#include <pcl/test/gtest.h>
-#include <iostream>
-
-#include <pcl/PCLPointCloud2.h>
-#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/compression/octree_pointcloud_compression.h>
#include <pcl/compression/compression_profiles.h>
-#include <string>
#include <exception>
-using namespace std;
-
int total_runs = 0;
char* pcd_file;
pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>* PointCloudDecoder = new pcl::io::OctreePointCloudCompression<pcl::PointXYZRGB>();
// iterate over various voxel sizes
- for (int i = 0; i < sizeof(voxel_sizes)/sizeof(voxel_sizes[0]); i++) {
+ for (std::size_t i = 0; i < sizeof(voxel_sizes)/sizeof(voxel_sizes[0]); i++) {
pcl::octree::OctreePointCloud<pcl::PointXYZRGB> octree(voxel_sizes[i]);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr octree_out(new pcl::PointCloud<pcl::PointXYZRGB>());
octree.setInputCloud((*input_cloud_ptr).makeShared());
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/test/gtest.h>
+#include <fstream> // for ofstream
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, PLYReaderWriter)
// cloud has proper structure
EXPECT_EQ (cloud_rgb.height, 1);
EXPECT_EQ (cloud_rgb.width, 4);
- EXPECT_EQ (cloud_rgb.points.size(), 4);
+ EXPECT_EQ (cloud_rgb.size(), 4);
EXPECT_TRUE (cloud_rgb.is_dense);
// scope cloud data
*/
#include <pcl/test/gtest.h>
-#include <pcl/PCLPointCloud2.h>
#include <pcl/type_traits.h>
#include <pcl/point_types.h>
-#include <pcl/common/io.h>
-#include <pcl/console/print.h>
#include <pcl/io/ply_io.h>
-#include <pcl/io/ascii_io.h>
#include <pcl/io/vtk_lib_io.h>
-#include <fstream>
-#include <locale>
-#include <stdexcept>
std::string mesh_file_vtk_;
cloud.height = 2;
cloud.is_dense = true;
cloud.points.resize (cloud.width * cloud.height);
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- cloud.points[i].label = i;
+ for (std::size_t i = 0; i < cloud.size (); i++)
+ cloud[i].label = i;
pcl::PCLImage image;
PointCloudImageExtractorFromLabelField<PointT> pcie;
EXPECT_EQ (cloud.width, image.width);
EXPECT_EQ (cloud.height, image.height);
- for (std::size_t i = 0; i < cloud.points.size (); i++)
+ for (std::size_t i = 0; i < cloud.size (); i++)
EXPECT_EQ (i, data[i]);
}
cloud.height = 2;
cloud.is_dense = true;
cloud.points.resize (cloud.width * cloud.height);
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- cloud.points[i].label = i % 2;
+ for (std::size_t i = 0; i < cloud.size (); i++)
+ cloud[i].label = i % 2;
pcl::PCLImage image;
PointCloudImageExtractorFromLabelField<PointT> pcie;
cloud.height = 2;
cloud.is_dense = true;
cloud.points.resize (cloud.width * cloud.height);
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- cloud.points[i].label = i % 2;
+ for (std::size_t i = 0; i < cloud.size (); i++)
+ cloud[i].label = i % 2;
pcl::PCLImage image;
PointCloudImageExtractorFromLabelField<PointT> pcie;
EXPECT_EQ (cloud.height, image.height);
// Fill in different labels and extract another image
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- cloud.points[i].label = i % 2 + 10;
+ for (std::size_t i = 0; i < cloud.size (); i++)
+ cloud[i].label = i % 2 + 10;
pcl::PCLImage image2;
ASSERT_TRUE (pcie.extract (cloud, image2));
cloud.height = 2;
cloud.is_dense = true;
cloud.points.resize (cloud.width * cloud.height);
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- cloud.points[i].z = 1.0 + i;
+ for (std::size_t i = 0; i < cloud.size (); i++)
+ cloud[i].z = 1.0 + i;
pcl::PCLImage image;
PointCloudImageExtractorFromZField<PointT> pcie;
EXPECT_EQ (cloud.height, image.height);
// by default Z field extractor scales with factor 10000
- for (std::size_t i = 0; i < cloud.points.size (); i++)
+ for (std::size_t i = 0; i < cloud.size (); i++)
EXPECT_EQ (10000 * (i + 1), data[i]);
}
cloud.is_dense = true;
cloud.points.resize (cloud.width * cloud.height);
- cloud.points[0].curvature = 1.0;
- cloud.points[1].curvature = 2.0;
- cloud.points[2].curvature = 1.0;
- cloud.points[3].curvature = 2.0;
+ cloud[0].curvature = 1.0;
+ cloud[1].curvature = 2.0;
+ cloud[2].curvature = 1.0;
+ cloud[3].curvature = 2.0;
pcl::PCLImage image;
PointCloudImageExtractorFromCurvatureField<PointT> pcie;
cloud.is_dense = true;
cloud.points.resize (cloud.width * cloud.height);
- cloud.points[0].intensity = 10.0;
- cloud.points[1].intensity = 23.3;
- cloud.points[2].intensity = 28.9;
- cloud.points[3].intensity = 40.0;
+ cloud[0].intensity = 10.0;
+ cloud[1].intensity = 23.3;
+ cloud[2].intensity = 28.9;
+ cloud[3].intensity = 40.0;
pcl::PCLImage image;
PointCloudImageExtractorFromIntensityField<PointT> pcie;
EXPECT_EQ (cloud.height, image.height);
// by default Intensity field extractor does not apply scaling
- for (std::size_t i = 0; i < cloud.points.size (); i++)
- EXPECT_EQ (static_cast<unsigned short> (cloud.points[i].intensity), data[i]);
+ for (std::size_t i = 0; i < cloud.size (); i++)
+ EXPECT_EQ (static_cast<unsigned short> (cloud[i].intensity), data[i]);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cloud.is_dense = false;
cloud.points.resize (cloud.width * cloud.height);
- cloud.points[0].curvature = 1.0;
- cloud.points[1].curvature = 2.0;
- cloud.points[2].curvature = 1.0;
- cloud.points[3].curvature = 2.0;
- cloud.points[3].z = std::numeric_limits<float>::quiet_NaN ();
+ cloud[0].curvature = 1.0;
+ cloud[1].curvature = 2.0;
+ cloud[2].curvature = 1.0;
+ cloud[3].curvature = 2.0;
+ cloud[3].z = std::numeric_limits<float>::quiet_NaN ();
pcl::PCLImage image;
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
+#include <algorithm>
#include <iostream> // For debug
#include <map>
-
-using namespace std;
using namespace pcl;
boost::property_tree::ptree xml_property_tree;
for (float y = -0.5f; y <= 0.5f; y += resolution)
for (float x = -0.5f; x <= 0.5f; x += resolution)
cloud.points.emplace_back(x, y, z);
- cloud.width = static_cast<std::uint32_t> (cloud.points.size ());
+ cloud.width = cloud.size ();
cloud.height = 1;
cloud_big.width = 640;
kdtree.setInputCloud (cloud.makeShared ());
MyPoint test_point(0.0f, 0.0f, 0.0f);
double max_dist = 0.15;
- set<int> brute_force_result;
- for (std::size_t i=0; i < cloud.points.size(); ++i)
- if (euclideanDistance(cloud.points[i], test_point) < max_dist)
+ std::set<int> brute_force_result;
+ for (std::size_t i=0; i < cloud.size(); ++i)
+ if (euclideanDistance(cloud[i], test_point) < max_dist)
brute_force_result.insert(i);
std::vector<int> k_indices;
std::vector<float> k_distances;
for (const int &k_index : k_indices)
{
- set<int>::iterator brute_force_result_it = brute_force_result.find (k_index);
+ std::set<int>::iterator brute_force_result_it = brute_force_result.find (k_index);
bool ok = brute_force_result_it != brute_force_result.end ();
//if (!ok) std::cerr << k_indices[i] << " is not correct...\n";
//else std::cerr << k_indices[i] << " is correct...\n";
- EXPECT_EQ (ok, true);
+ EXPECT_TRUE (ok);
if (ok)
brute_force_result.erase (brute_force_result_it);
}
bool error = !brute_force_result.empty ();
//if (error) std::cerr << "Missed too many neighbors!\n";
- EXPECT_EQ (error, false);
+ EXPECT_FALSE (error);
{
KdTreeFLANN<MyPoint> kdtree;
kdtree.setInputCloud (cloud.makeShared ());
MyPoint test_point (0.01f, 0.01f, 0.01f);
unsigned int no_of_neighbors = 20;
- multimap<float, int> sorted_brute_force_result;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ std::multimap<float, int> sorted_brute_force_result;
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- float distance = euclideanDistance (cloud.points[i], test_point);
- sorted_brute_force_result.insert (make_pair (distance, static_cast<int> (i)));
+ float distance = euclideanDistance (cloud[i], test_point);
+ sorted_brute_force_result.insert (std::make_pair (distance, static_cast<int> (i)));
}
float max_dist = 0.0f;
unsigned int counter = 0;
- for (multimap<float, int>::iterator it = sorted_brute_force_result.begin (); it != sorted_brute_force_result.end () && counter < no_of_neighbors; ++it)
+ for (std::multimap<float, int>::iterator it = sorted_brute_force_result.begin (); it != sorted_brute_force_result.end () && counter < no_of_neighbors; ++it)
{
- max_dist = max (max_dist, it->first);
+ max_dist = std::max (max_dist, it->first);
++counter;
}
// Check if all found neighbors have distance smaller than max_dist
for (const int &k_index : k_indices)
{
- const MyPoint& point = cloud.points[k_index];
+ const MyPoint& point = cloud[k_index];
bool ok = euclideanDistance (test_point, point) <= max_dist;
if (!ok)
ok = (std::abs (euclideanDistance (test_point, point)) - max_dist) <= 1e-6;
//if (!ok) std::cerr << k_index << " is not correct...\n";
//else std::cerr << k_index << " is correct...\n";
- EXPECT_EQ (ok, true);
+ EXPECT_TRUE (ok);
}
ScopeTime scopeTime ("FLANN nearestKSearch");
};
- ASSERT_EQ (keypoints.points.size (), correct_nr_keypoints);
+ ASSERT_EQ (keypoints.size (), correct_nr_keypoints);
for (std::size_t i = 0; i < correct_nr_keypoints; ++i)
{
- EXPECT_NEAR (keypoints.points[i].x, correct_keypoints[i][0], 1e-6);
- EXPECT_NEAR (keypoints.points[i].y, correct_keypoints[i][1], 1e-6);
- EXPECT_NEAR (keypoints.points[i].z, correct_keypoints[i][2], 1e-6);
+ EXPECT_NEAR (keypoints[i].x, correct_keypoints[i][0], 1e-6);
+ EXPECT_NEAR (keypoints[i].y, correct_keypoints[i][1], 1e-6);
+ EXPECT_NEAR (keypoints[i].z, correct_keypoints[i][2], 1e-6);
}
tree.reset (new search::KdTree<PointXYZ> ());
{-0.030035f, 0.066130f, 0.038942f}
};
- ASSERT_EQ (keypoints.points.size (), correct_nr_keypoints);
+ ASSERT_EQ (keypoints.size (), correct_nr_keypoints);
for (std::size_t i = 0; i < correct_nr_keypoints; ++i)
{
- EXPECT_NEAR (keypoints.points[i].x, correct_keypoints[i][0], 1e-6);
- EXPECT_NEAR (keypoints.points[i].y, correct_keypoints[i][1], 1e-6);
- EXPECT_NEAR (keypoints.points[i].z, correct_keypoints[i][2], 1e-6);
+ EXPECT_NEAR (keypoints[i].x, correct_keypoints[i][0], 1e-6);
+ EXPECT_NEAR (keypoints[i].y, correct_keypoints[i][1], 1e-6);
+ EXPECT_NEAR (keypoints[i].z, correct_keypoints[i][2], 1e-6);
}
tree.reset (new search::KdTree<PointXYZ> ());
using namespace pcl;
using namespace pcl::io;
-using namespace std;
struct KeypointT
{
sift_detector.setInputCloud (cloud_xyzi);
sift_detector.compute (keypoints);
- ASSERT_EQ (keypoints.width, keypoints.points.size ());
+ ASSERT_EQ (keypoints.width, keypoints.size ());
ASSERT_EQ (keypoints.height, 1);
- EXPECT_EQ (keypoints.points.size (), static_cast<std::size_t> (169));
+ EXPECT_EQ (keypoints.size (), static_cast<std::size_t> (169));
EXPECT_EQ (keypoints.header, cloud_xyzi->header);
EXPECT_EQ (keypoints.sensor_origin_ (0), cloud_xyzi->sensor_origin_ (0));
EXPECT_EQ (keypoints.sensor_origin_ (1), cloud_xyzi->sensor_origin_ (1));
sift_detector.setMinimumContrast (0.06f);
sift_detector.compute (keypoints);
- ASSERT_EQ (keypoints.width, keypoints.points.size ());
+ ASSERT_EQ (keypoints.width, keypoints.size ());
ASSERT_EQ (keypoints.height, 1);
// Compare to previously validated output
{-0.1002f, -0.1002f, 1.9933f, 0.3175f}
};
- ASSERT_EQ (keypoints.points.size (), correct_nr_keypoints);
+ ASSERT_EQ (keypoints.size (), correct_nr_keypoints);
for (std::size_t i = 0; i < correct_nr_keypoints; ++i)
{
- EXPECT_NEAR (keypoints.points[i].x, correct_keypoints[i][0], 1e-4);
- EXPECT_NEAR (keypoints.points[i].y, correct_keypoints[i][1], 1e-4);
- EXPECT_NEAR (keypoints.points[i].z, correct_keypoints[i][2], 1e-4);
- EXPECT_NEAR (keypoints.points[i].scale, correct_keypoints[i][3], 1e-4);
+ EXPECT_NEAR (keypoints[i].x, correct_keypoints[i][0], 1e-4);
+ EXPECT_NEAR (keypoints[i].y, correct_keypoints[i][1], 1e-4);
+ EXPECT_NEAR (keypoints[i].z, correct_keypoints[i][2], 1e-4);
+ EXPECT_NEAR (keypoints[i].scale, correct_keypoints[i][3], 1e-4);
}
}
tree.radiusSearch (i_point, max_radius, nn_indices, nn_dist);
// Are they all unique?
- set<int> unique_indices;
+ std::set<int> unique_indices;
for (const int &nn_index : nn_indices)
{
unique_indices.insert (nn_index);
#include <vector>
-#include <cstdio>
-
-using namespace std;
-
#include <pcl/common/time.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
for (const int &i : tmpVector)
{
- ASSERT_GE (cloud->points[i].x, min_pt(0));
- ASSERT_GE (cloud->points[i].y, min_pt(1));
- ASSERT_GE (cloud->points[i].z, min_pt(2));
- ASSERT_LE (cloud->points[i].x, max_pt(0));
- ASSERT_LE (cloud->points[i].y, max_pt(1));
- ASSERT_LE (cloud->points[i].z, max_pt(2));
+ ASSERT_GE ((*cloud)[i].x, min_pt(0));
+ ASSERT_GE ((*cloud)[i].y, min_pt(1));
+ ASSERT_GE ((*cloud)[i].z, min_pt(2));
+ ASSERT_LE ((*cloud)[i].x, max_pt(0));
+ ASSERT_LE ((*cloud)[i].y, max_pt(1));
+ ASSERT_LE ((*cloud)[i].z, max_pt(2));
}
leaf_count++;
cloudB->push_back (newPoint);
}
- ASSERT_EQ (cloudA->points.size (), octreeA.getLeafCount ());
+ ASSERT_EQ (cloudA->size (), octreeA.getLeafCount ());
// checks for getVoxelDataAtPoint() and isVoxelOccupiedAtPoint() functionality
for (const auto &point : cloudA->points)
ASSERT_EQ (branch_count + leaf_count, node_count);
ASSERT_EQ (octreeB.getLeafCount (), leaf_count);
- for (std::size_t i = 0; i < cloudB->points.size (); i++)
+ for (std::size_t i = 0; i < cloudB->size (); i++)
{
std::vector<int> pointIdxVec;
- octreeB.voxelSearch (cloudB->points[i], pointIdxVec);
+ octreeB.voxelSearch ((*cloudB)[i], pointIdxVec);
bool bIdxFound = false;
std::vector<int>::const_iterator current = pointIdxVec.begin ();
for (float x = 0.05f; x < 7.0f; x += 0.1f)
cloudIn->push_back (PointXYZ (x, y, z));
- cloudIn->width = static_cast<std::uint32_t> (cloudIn->points.size ());
+ cloudIn->width = cloudIn->size ();
cloudIn->height = 1;
OctreePointCloudDensity<PointXYZ> octreeA (1.0f); // low resolution
for (float x = 0.05f; x < 7.0f; x += 0.1f)
cloudIn->push_back (PointXYZ (x, y, z));
- cloudIn->width = static_cast<std::uint32_t> (cloudIn->points.size ());
+ cloudIn->width = cloudIn->size ();
cloudIn->height = 1;
OctreePointCloud<PointXYZ> octreeA (1.0f); // low resolution
leafNodeCounter++;
}
- ASSERT_EQ (cloudIn->points.size (), indexVector.size());
+ ASSERT_EQ (cloudIn->size (), indexVector.size());
ASSERT_EQ (octreeA.getLeafCount (), leafNodeCounter);
unsigned int traversCounter = 0;
// generate point data for point cloud
for (std::size_t i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
}
// check occupancy of voxels
for (std::size_t i = 0; i < 1000; i++)
{
- ASSERT_TRUE (octree.isVoxelOccupiedAtPoint (cloudIn->points[i]));
- octree.deleteVoxelAtPoint (cloudIn->points[i]);
- ASSERT_FALSE (octree.isVoxelOccupiedAtPoint (cloudIn->points[i]));
+ ASSERT_TRUE (octree.isVoxelOccupiedAtPoint ((*cloudIn)[i]));
+ octree.deleteVoxelAtPoint ((*cloudIn)[i]);
+ ASSERT_FALSE (octree.isVoxelOccupiedAtPoint ((*cloudIn)[i]));
}
}
}
// generate point data for point cloud
for (std::size_t i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
}
for (std::size_t i = 0; i < 10; i++)
{
// these three points should always be assigned to the same voxel in the octree
- cloudIn->points[i * 3 + 0] = PointXYZ (static_cast<float> (i) + 0.2f, static_cast<float> (i) + 0.2f, static_cast<float> (i) + 0.2f);
- cloudIn->points[i * 3 + 1] = PointXYZ (static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f);
- cloudIn->points[i * 3 + 2] = PointXYZ (static_cast<float> (i) + 0.6f, static_cast<float> (i) + 0.6f, static_cast<float> (i) + 0.6f);
+ (*cloudIn)[i * 3 + 0] = PointXYZ (static_cast<float> (i) + 0.2f, static_cast<float> (i) + 0.2f, static_cast<float> (i) + 0.2f);
+ (*cloudIn)[i * 3 + 1] = PointXYZ (static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f);
+ (*cloudIn)[i * 3 + 2] = PointXYZ (static_cast<float> (i) + 0.6f, static_cast<float> (i) + 0.6f, static_cast<float> (i) + 0.6f);
}
// assign point cloud to octree
for (std::size_t i = 0; i < 10; i++)
{
// these three points should always be assigned to the same voxel in the octree
- cloudIn->points[i * 3 + 0] = PointXYZ (static_cast<float> (i) + 0.1f, static_cast<float> (i) + 0.1f, static_cast<float> (i) + 0.1f);
- cloudIn->points[i * 3 + 1] = PointXYZ (static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f);
- cloudIn->points[i * 3 + 2] = PointXYZ (static_cast<float> (i) + 0.7f, static_cast<float> (i) + 0.7f, static_cast<float> (i) + 0.7f);
+ (*cloudIn)[i * 3 + 0] = PointXYZ (static_cast<float> (i) + 0.1f, static_cast<float> (i) + 0.1f, static_cast<float> (i) + 0.1f);
+ (*cloudIn)[i * 3 + 1] = PointXYZ (static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f, static_cast<float> (i) + 0.4f);
+ (*cloudIn)[i * 3 + 2] = PointXYZ (static_cast<float> (i) + 0.7f, static_cast<float> (i) + 0.7f, static_cast<float> (i) + 0.7f);
}
// add points from new cloud to octree
cloudIn->points.resize (cloudIn->width * cloudIn->height);
for (std::size_t i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
}
k_sqr_distances_bruteforce.clear ();
// push all points and their distance to the search point into a priority queue - bruteforce approach.
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
+ ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
+ ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
- prioPointQueueEntry pointEntry (cloudIn->points[i], pointDist, static_cast<int> (i));
+ prioPointQueueEntry pointEntry ((*cloudIn)[i], pointDist, static_cast<int> (i));
pointCandidates.push (pointEntry);
}
{
bool inBox;
bool idxInResults;
- const PointXYZ& pt = cloudIn->points[i];
+ const PointXYZ& pt = (*cloudIn)[i];
inBox = (pt.x >= lowerBoxCorner (0)) && (pt.x <= upperBoxCorner (0)) &&
(pt.y >= lowerBoxCorner (1)) && (pt.y <= upperBoxCorner (1)) &&
cloudIn->points.resize (cloudIn->width * cloudIn->height);
for (std::size_t i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX));
}
double BFdistance = std::numeric_limits<double>::max ();
int BFindex = 0;
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
+ ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
+ ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
if (pointDist < BFdistance)
{
// generate point cloud data
for (std::size_t i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (static_cast<float> (10.0 * rand () / RAND_MAX),
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (10.0 * rand () / RAND_MAX),
static_cast<float> (5.0 * rand () / RAND_MAX));
}
// bruteforce radius search
std::vector<int> cloudSearchBruteforce;
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
pointDist = sqrt (
- (cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x)
- + (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y)
- + (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ ((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x)
+ + ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y)
+ + ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
if (pointDist <= searchRadius)
{
while (current != cloudNWRSearch.end ())
{
pointDist = sqrt (
- (cloudIn->points[*current].x - searchPoint.x) * (cloudIn->points[*current].x - searchPoint.x)
- + (cloudIn->points[*current].y - searchPoint.y) * (cloudIn->points[*current].y - searchPoint.y)
- + (cloudIn->points[*current].z - searchPoint.z) * (cloudIn->points[*current].z - searchPoint.z));
+ ((*cloudIn)[*current].x - searchPoint.x) * ((*cloudIn)[*current].x - searchPoint.x)
+ + ((*cloudIn)[*current].y - searchPoint.y) * ((*cloudIn)[*current].y - searchPoint.y)
+ + ((*cloudIn)[*current].z - searchPoint.z) * ((*cloudIn)[*current].z - searchPoint.z));
ASSERT_TRUE (pointDist <= searchRadius);
static_cast<float> (12.0 * rand () / RAND_MAX),
static_cast<float> (12.0 * rand () / RAND_MAX));
- cloudIn->points[0] = pcl::PointXYZ (p[0], p[1], p[2]);
+ (*cloudIn)[0] = pcl::PointXYZ (p[0], p[1], p[2]);
// direction vector
Eigen::Vector3f dir (p - o);
{
tmin -= 0.25f;
Eigen::Vector3f n_p = o + (tmin * dir);
- cloudIn->points[j] = pcl::PointXYZ (n_p[0], n_p[1], n_p[2]);
+ (*cloudIn)[j] = pcl::PointXYZ (n_p[0], n_p[1], n_p[2]);
}
// insert cloud point into octree
octree_search.getIntersectedVoxelIndices (o, dir, indicesInRay);
// check if all voxels in the cloud are penetraded by the ray
- ASSERT_EQ (cloudIn->points.size (), voxelsInRay.size ());
+ ASSERT_EQ (cloudIn->size (), voxelsInRay.size ());
// check if all indices of penetrated voxels are in cloud
- ASSERT_EQ (cloudIn->points.size (), indicesInRay.size ());
+ ASSERT_EQ (cloudIn->size (), indicesInRay.size ());
octree_search.getIntersectedVoxelCenters (o, dir, voxelsInRay2, 1);
octree_search.getIntersectedVoxelIndices (o, dir, indicesInRay2, 1);
ASSERT_EQ (1u, indicesInRay2.size ());
// check if this point is the closest point to the origin
- pcl::PointXYZ pt = cloudIn->points[ indicesInRay2[0] ];
+ pcl::PointXYZ pt = (*cloudIn)[ indicesInRay2[0] ];
Eigen::Vector3f d = Eigen::Vector3f (pt.x, pt.y, pt.z) - o;
float min_dist = d.norm ();
for (unsigned int i = 0; i < cloudIn->width * cloudIn->height; i++)
{
- pt = cloudIn->points[i];
+ pt = (*cloudIn)[i];
d = Eigen::Vector3f (pt.x, pt.y, pt.z) - o;
ASSERT_GE (d.norm (), min_dist);
}
#include <pcl/octree/octree_pointcloud_adjacency.h>
#include <pcl/octree/octree_base.h>
#include <pcl/octree/octree_iterator.h>
-#include <pcl/common/projection_matrix.h>
#include <pcl/point_types.h>
#include <pcl/test/gtest.h>
#include <pcl/test/gtest.h>
#include <vector>
-#include <cstdio>
#include <iostream>
#include <random>
-using namespace std;
#include <pcl/common/time.h>
-#include <pcl/console/print.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
test_cloud->points.push_back (tmp);
}
- EXPECT_EQ (numPts, test_cloud->points.size ());
+ EXPECT_EQ (numPts, test_cloud->size ());
octree_disk pcl_cloud (4, min, max, outofcore_path, "ECEF");
pcl_cloud.addPointCloud (test_cloud);
- EXPECT_EQ (test_cloud->points.size (), pcl_cloud.getNumPointsAtDepth (pcl_cloud.getDepth ()));
+ EXPECT_EQ (test_cloud->size (), pcl_cloud.getNumPointsAtDepth (pcl_cloud.getDepth ()));
cleanUpFilesystem ();
}
octree_disk pcl_cloud (4, min, max, outofcore_path, "ECEF");
- ASSERT_EQ (test_cloud->points.size (), pcl_cloud.addPointCloud (test_cloud)) << "Points lost when adding the first cloud to the tree\n";
+ ASSERT_EQ (test_cloud->size (), pcl_cloud.addPointCloud (test_cloud)) << "Points lost when adding the first cloud to the tree\n";
ASSERT_EQ (numPts, pcl_cloud.getNumPointsAtDepth (pcl_cloud.getDepth ())) << "Book keeping of number of points at query depth does not match number of points inserted to the leaves\n";
#include <pcl/recognition/cg/geometric_consistency.h>
#include <pcl/common/eigen.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/search/search.h>
#include <pcl/features/normal_3d.h>
#include <pcl/features/feature.h>
#include <pcl/features/fpfh.h>
*
*/
-#include <random>
-
#include <pcl/test/gtest.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/common/eigen.h>
+#include <pcl/common/random.h> // NormalGenerator
#include <pcl/common/transforms.h>
#include <pcl/io/pcd_io.h>
#include <pcl/registration/correspondence_rejection_median_distance.h>
pcl::transformPointCloud (cloud, target, t, q);
// Noisify the target with a known seed and N(0, 0.005) using deterministic sampling
- std::mt19937 rng(1e6);
- std::normal_distribution<> nd(0, 0.005);
+ pcl::common::NormalGenerator<float> nd(0, 0.005, 1e6);
for (auto &point : target)
{
- point.x += static_cast<float> (nd(rng));
- point.y += static_cast<float> (nd(rng));
- point.z += static_cast<float> (nd(rng));
+ point.x += nd.run();
+ point.y += nd.run();
+ point.z += nd.run();
}
// Ensure deterministic sampling inside the rejector
* but not too many
*/
EXPECT_GE(accepted_frac, ground_truth_frac);
- EXPECT_LE(accepted_frac, 1.5f*ground_truth_frac);
-
+ // Factor 1.5 raised to 1.6 as there is a variance in the noise added from the various standard implementations
+ // See #2995 for details
+ EXPECT_LE(accepted_frac, 1.6f*ground_truth_frac);
+
/*
* Test criterion 2: expect high precision/recall. The true positives are the unscrambled correspondences
* where the query/match index are equal.
using namespace pcl;
using namespace pcl::io;
using namespace pcl::registration;
-using namespace std;
PointCloud<PointXYZ> cloud_source, cloud_target;
// align
fpcs_ia.align (source_aligned);
- EXPECT_EQ (static_cast <int> (source_aligned.points.size ()), static_cast <int> (cloud_source.points.size ()));
+ EXPECT_EQ (source_aligned.size (), cloud_source.size ());
// check for correct coarse transformation marix
//Eigen::Matrix4f transform_res_from_fpcs = fpcs_ia.getFinalTransformation ();
using namespace pcl;
using namespace pcl::io;
using namespace pcl::registration;
-using namespace std;
PointCloud<PointXYZI> cloud_source, cloud_target;
break;
}
- EXPECT_EQ (static_cast <int> (cloud_source_aligned.points.size ()), static_cast <int> (cloud_source.points.size ()));
+ EXPECT_EQ (cloud_source_aligned.size (), cloud_source.size ());
EXPECT_NEAR (angle3d, 0.f, max_angle3d);
EXPECT_NEAR (translation3d, 0.f, max_translation3d);
}
reg.setTransformationEpsilon (1e-8);
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.001);
// Check again, for all possible caching schemes
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.001);
}
}
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ> cloud_source, cloud_target, cloud_reg;
PointCloud<PointXYZRGBA> cloud_with_color;
feature3.points.push_back (f);
}
}
- feature0.width = static_cast<std::uint32_t> (feature0.points.size ());
- feature1.width = static_cast<std::uint32_t> (feature1.points.size ());
- feature2.width = static_cast<std::uint32_t> (feature2.points.size ());
- feature3.width = static_cast<std::uint32_t> (feature3.points.size ());
+ feature0.width = feature0.size ();
+ feature1.width = feature1.size ();
+ feature2.width = feature2.size ();
+ feature3.width = feature3.size ();
KdTreeFLANN<FeatureT> tree;
// Register
reg.align (cloud_reg);
- EXPECT_EQ (int (cloud_reg.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (cloud_reg.size (), cloud_source.size ());
Eigen::Matrix4f transformation = reg.getFinalTransformation ();
EXPECT_NEAR (transformation (0, 0), 0.8806, 1e-3);
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
// We get different results on 32 vs 64-bit systems. To address this, we've removed the explicit output test
// on the transformation matrix. Instead, we're testing to make sure the algorithm converges to a sufficiently
// low error by checking the fitness score.
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.001);
}
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.005);
// Check again, for all possible caching schemes
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.005);
}
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.0001);
// Check again, for all possible caching schemes
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.001);
}
reg_guess.setMaximumIterations (50);
reg_guess.setTransformationEpsilon (1e-8);
reg_guess.align (output, transform.matrix ());
- EXPECT_EQ (int (output.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (output.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.0001);
}
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (src->points.size ()));
+ EXPECT_EQ (output.size (), src->size ());
EXPECT_LT (reg.getFitnessScore (), 0.003);
// Check again, for all possible caching schemes
// Register
reg.align (output);
- EXPECT_EQ (int (output.points.size ()), int (src->points.size ()));
+ EXPECT_EQ (output.size (), src->size ());
EXPECT_LT (reg.getFitnessScore (), 0.003);
}
}
ppf_estimator.compute (*ppf_signature_target);
- std::vector<pair<float, float> > dim_range_input, dim_range_target;
+ std::vector<std::pair<float, float> > dim_range_input, dim_range_target;
for (std::size_t i = 0; i < 3; ++i) dim_range_input.emplace_back(static_cast<float> (-M_PI), static_cast<float> (M_PI));
dim_range_input.emplace_back(0.0f, 1.0f);
for (std::size_t i = 0; i < 3; ++i) dim_range_target.emplace_back(static_cast<float> (-M_PI) * 10.0f, static_cast<float> (M_PI) * 10.0f);
float similarity_value = PyramidFeatureHistogram<PPFSignature>::comparePyramidFeatureHistograms (pyramid_source, pyramid_target);
EXPECT_NEAR (similarity_value, 0.74101555347442627, 1e-4);
- std::vector<pair<float, float> > dim_range_target2;
+ std::vector<std::pair<float, float> > dim_range_target2;
for (std::size_t i = 0; i < 3; ++i) dim_range_target2.emplace_back(static_cast<float> (-M_PI) * 5.0f, static_cast<float> (M_PI) * 5.0f);
dim_range_target2.emplace_back(0.0f, 20.0f);
EXPECT_NEAR (similarity_value2, 0.80097091197967529, 1e-4);
- std::vector<pair<float, float> > dim_range_target3;
+ std::vector<std::pair<float, float> > dim_range_target3;
for (std::size_t i = 0; i < 3; ++i) dim_range_target3.emplace_back(static_cast<float> (-M_PI) * 2.0f, static_cast<float> (M_PI) * 2.0f);
dim_range_target3.emplace_back(0.0f, 10.0f);
return (RUN_ALL_TESTS ());
// Tranpose the cloud_model
- /*for (std::size_t i = 0; i < cloud_model.points.size (); ++i)
+ /*for (std::size_t i = 0; i < cloud_model.size (); ++i)
{
- // cloud_model.points[i].z += 1;
+ // cloud_model[i].z += 1;
}*/
}
/* ]--- */
src->points.push_back (p);
}
- src->width = static_cast<std::uint32_t> (src->points.size ());
+ src->width = src->size ();
// Create a test matrix
Eigen::Matrix4f ground_truth_tform = Eigen::Matrix4f::Identity ();
src->points.push_back (p);
}
- src->width = static_cast<std::uint32_t> (src->points.size ());
+ src->width = src->size ();
// Create a test matrix
Eigen::Matrix4f ground_truth_tform = Eigen::Matrix4f::Identity ();
src->points.push_back (p);
}
- src->width = static_cast<std::uint32_t> (src->points.size ());
+ src->width = src->size ();
// Create a test matrix
// (alpha, beta, gamma) = (-0.0180524, 0.0525268, -0.0999635)
return (RUN_ALL_TESTS ());
// Tranpose the cloud_model
- /*for (std::size_t i = 0; i < cloud_model.points.size (); ++i)
+ /*for (std::size_t i = 0; i < cloud_model.size (); ++i)
{
- // cloud_model.points[i].z += 1;
+ // cloud_model[i].z += 1;
}*/
}
/* ]--- */
// Register
reg.align (cloud_reg);
- EXPECT_EQ (int (cloud_reg.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (cloud_reg.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.0005);
// Check again, for all possible caching schemes
// Register
reg.align (cloud_reg);
- EXPECT_EQ (int (cloud_reg.points.size ()), int (cloud_source.points.size ()));
+ EXPECT_EQ (cloud_reg.size (), cloud_source.size ());
EXPECT_LT (reg.getFitnessScore (), 0.0005);
}
}
reg.align (cloud_reg);
// Check output consistency and quality of alignment
- EXPECT_EQ (static_cast<int> (cloud_reg.points.size ()), static_cast<int> (cloud_source.points.size ()));
- float inlier_fraction = static_cast<float> (reg.getInliers ().size ()) / static_cast<float> (cloud_source.points.size ());
+ EXPECT_EQ (cloud_reg.size (), cloud_source.size ());
+ float inlier_fraction = static_cast<float> (reg.getInliers ().size ()) / static_cast<float> (cloud_source.size ());
EXPECT_GT (inlier_fraction, 0.95f);
// Check again, for all possible caching schemes
reg.align (cloud_reg);
// Check output consistency and quality of alignment
- EXPECT_EQ (int (cloud_reg.points.size ()), int (cloud_source.points.size ()));
- inlier_fraction = static_cast<float> (reg.getInliers ().size ()) / static_cast<float> (cloud_source.points.size ());
+ EXPECT_EQ (cloud_reg.size (), cloud_source.size ());
+ inlier_fraction = static_cast<float> (reg.getInliers ().size ()) / static_cast<float> (cloud_source.size ());
EXPECT_GT (inlier_fraction, 0.95f);
}
}
#include <chrono>
#include <condition_variable>
-#include <memory>
#include <mutex>
#include <thread>
cloud.points.resize (point_count);
for (unsigned idx = 0; idx < point_count; ++idx)
{
- cloud.points[idx].x = static_cast<float> (idx);
- cloud.points[idx].y = 0.0;
- cloud.points[idx].z = 0.0;
+ cloud[idx].x = static_cast<float> (idx);
+ cloud[idx].y = 0.0;
+ cloud[idx].z = 0.0;
}
SampleConsensusModelSpherePtr model (new SampleConsensusModelSphere<PointXYZ> (cloud.makeShared ()));
#if defined(DEBUG) || defined(_DEBUG)
EXPECT_EQ (std::cv_status::no_timeout, cv.wait_for (lock, 15s));
#else
- EXPECT_EQ (std::cv_status::no_timeout, cv.wait_for (lock, 1s));
+ EXPECT_EQ (std::cv_status::no_timeout, cv.wait_for (lock, 2s));
#endif
+ // release lock to avoid deadlock
+ lock.unlock();
thread.join ();
}
PointCloud<PointXYZ> cloud;
cloud.points.resize (10);
- cloud.points[0].getVector3fMap () << 1.0f, 2.00f, 3.00f;
- cloud.points[1].getVector3fMap () << 4.0f, 5.00f, 6.00f;
- cloud.points[2].getVector3fMap () << 7.0f, 8.00f, 9.00f;
- cloud.points[3].getVector3fMap () << 10.0f, 11.00f, 12.00f;
- cloud.points[4].getVector3fMap () << 13.0f, 14.00f, 15.00f;
- cloud.points[5].getVector3fMap () << 16.0f, 17.00f, 18.00f;
- cloud.points[6].getVector3fMap () << 19.0f, 20.00f, 21.00f;
- cloud.points[7].getVector3fMap () << 22.0f, 23.00f, 24.00f;
- cloud.points[8].getVector3fMap () << -5.0f, 1.57f, 0.75f;
- cloud.points[9].getVector3fMap () << 4.0f, 2.00f, 3.00f;
+ cloud[0].getVector3fMap () << 1.0f, 2.00f, 3.00f;
+ cloud[1].getVector3fMap () << 4.0f, 5.00f, 6.00f;
+ cloud[2].getVector3fMap () << 7.0f, 8.00f, 9.00f;
+ cloud[3].getVector3fMap () << 10.0f, 11.00f, 12.00f;
+ cloud[4].getVector3fMap () << 13.0f, 14.00f, 15.00f;
+ cloud[5].getVector3fMap () << 16.0f, 17.00f, 18.00f;
+ cloud[6].getVector3fMap () << 19.0f, 20.00f, 21.00f;
+ cloud[7].getVector3fMap () << 22.0f, 23.00f, 24.00f;
+ cloud[8].getVector3fMap () << -5.0f, 1.57f, 0.75f;
+ cloud[9].getVector3fMap () << 4.0f, 2.00f, 3.00f;
// Create a shared line model pointer directly
SampleConsensusModelLinePtr model (new SampleConsensusModelLine<PointXYZ> (cloud.makeShared ()));
PointCloud<PointXYZ> proj_points;
model->projectPoints (inliers, coeff_refined, proj_points);
- EXPECT_XYZ_NEAR (PointXYZ ( 7.0, 8.0, 9.0), proj_points.points[2], 1e-4);
- EXPECT_XYZ_NEAR (PointXYZ (10.0, 11.0, 12.0), proj_points.points[3], 1e-4);
- EXPECT_XYZ_NEAR (PointXYZ (16.0, 17.0, 18.0), proj_points.points[5], 1e-4);
+ EXPECT_XYZ_NEAR (PointXYZ ( 7.0, 8.0, 9.0), proj_points[2], 1e-4);
+ EXPECT_XYZ_NEAR (PointXYZ (10.0, 11.0, 12.0), proj_points[3], 1e-4);
+ EXPECT_XYZ_NEAR (PointXYZ (16.0, 17.0, 18.0), proj_points[5], 1e-4);
}
TEST (SampleConsensusModelLine, OnGroundPlane)
// All the points are on the ground plane (z=0).
// The line is parallel to the x axis, so all the inlier points have the same z and y coordinates.
- cloud.points[0].getVector3fMap () << 0.0f, 0.0f, 0.0f;
- cloud.points[1].getVector3fMap () << 1.0f, 0.0f, 0.0f;
- cloud.points[2].getVector3fMap () << 2.0f, 0.0f, 0.0f;
- cloud.points[3].getVector3fMap () << 3.0f, 0.0f, 0.0f;
- cloud.points[4].getVector3fMap () << 4.0f, 0.0f, 0.0f;
- cloud.points[5].getVector3fMap () << 5.0f, 0.0f, 0.0f;
+ cloud[0].getVector3fMap () << 0.0f, 0.0f, 0.0f;
+ cloud[1].getVector3fMap () << 1.0f, 0.0f, 0.0f;
+ cloud[2].getVector3fMap () << 2.0f, 0.0f, 0.0f;
+ cloud[3].getVector3fMap () << 3.0f, 0.0f, 0.0f;
+ cloud[4].getVector3fMap () << 4.0f, 0.0f, 0.0f;
+ cloud[5].getVector3fMap () << 5.0f, 0.0f, 0.0f;
// Outliers
- cloud.points[6].getVector3fMap () << 2.1f, 2.0f, 0.0f;
- cloud.points[7].getVector3fMap () << 5.0f, 4.1f, 0.0f;
- cloud.points[8].getVector3fMap () << 0.4f, 1.3f, 0.0f;
- cloud.points[9].getVector3fMap () << 3.3f, 0.1f, 0.0f;
+ cloud[6].getVector3fMap () << 2.1f, 2.0f, 0.0f;
+ cloud[7].getVector3fMap () << 5.0f, 4.1f, 0.0f;
+ cloud[8].getVector3fMap () << 0.4f, 1.3f, 0.0f;
+ cloud[9].getVector3fMap () << 3.3f, 0.1f, 0.0f;
// Create a shared line model pointer directly
SampleConsensusModelLinePtr model (new SampleConsensusModelLine<PointXYZ> (cloud.makeShared ()));
PointCloud<PointXYZ> cloud (16, 1);
// Line 1
- cloud.points[0].getVector3fMap () << 1.0f, 2.00f, 3.00f;
- cloud.points[1].getVector3fMap () << 4.0f, 5.00f, 6.00f;
- cloud.points[2].getVector3fMap () << 7.0f, 8.00f, 9.00f;
- cloud.points[3].getVector3fMap () << 10.0f, 11.00f, 12.00f;
- cloud.points[4].getVector3fMap () << 13.0f, 14.00f, 15.00f;
- cloud.points[5].getVector3fMap () << 16.0f, 17.00f, 18.00f;
- cloud.points[6].getVector3fMap () << 19.0f, 20.00f, 21.00f;
- cloud.points[7].getVector3fMap () << 22.0f, 23.00f, 24.00f;
+ cloud[0].getVector3fMap () << 1.0f, 2.00f, 3.00f;
+ cloud[1].getVector3fMap () << 4.0f, 5.00f, 6.00f;
+ cloud[2].getVector3fMap () << 7.0f, 8.00f, 9.00f;
+ cloud[3].getVector3fMap () << 10.0f, 11.00f, 12.00f;
+ cloud[4].getVector3fMap () << 13.0f, 14.00f, 15.00f;
+ cloud[5].getVector3fMap () << 16.0f, 17.00f, 18.00f;
+ cloud[6].getVector3fMap () << 19.0f, 20.00f, 21.00f;
+ cloud[7].getVector3fMap () << 22.0f, 23.00f, 24.00f;
// Random points
- cloud.points[8].getVector3fMap () << -5.0f, 1.57f, 0.75f;
- cloud.points[9].getVector3fMap () << 4.0f, 2.00f, 3.00f;
+ cloud[8].getVector3fMap () << -5.0f, 1.57f, 0.75f;
+ cloud[9].getVector3fMap () << 4.0f, 2.00f, 3.00f;
// Line 2 (parallel to the Z axis)
- cloud.points[10].getVector3fMap () << -1.00f, 5.00f, 0.0f;
- cloud.points[11].getVector3fMap () << -1.05f, 5.01f, 1.0f;
- cloud.points[12].getVector3fMap () << -1.01f, 5.05f, 2.0f;
- cloud.points[13].getVector3fMap () << -1.05f, 5.01f, 3.0f;
- cloud.points[14].getVector3fMap () << -1.01f, 5.05f, 4.0f;
- cloud.points[15].getVector3fMap () << -1.05f, 5.01f, 5.0f;
+ cloud[10].getVector3fMap () << -1.00f, 5.00f, 0.0f;
+ cloud[11].getVector3fMap () << -1.05f, 5.01f, 1.0f;
+ cloud[12].getVector3fMap () << -1.01f, 5.05f, 2.0f;
+ cloud[13].getVector3fMap () << -1.05f, 5.01f, 3.0f;
+ cloud[14].getVector3fMap () << -1.01f, 5.05f, 4.0f;
+ cloud[15].getVector3fMap () << -1.05f, 5.01f, 5.0f;
// Create a shared line model pointer directly
const double eps = 0.1; //angle eps in radians
PointCloud<PointXYZ> proj_points;
model->projectPoints (inliers, coeff, proj_points);
- EXPECT_XYZ_NEAR (PointXYZ (-1.05, 5.05, 3.0), proj_points.points[13], 0.1);
- EXPECT_XYZ_NEAR (PointXYZ (-1.05, 5.05, 4.0), proj_points.points[14], 0.1);
+ EXPECT_XYZ_NEAR (PointXYZ (-1.05, 5.05, 3.0), proj_points[13], 0.1);
+ EXPECT_XYZ_NEAR (PointXYZ (-1.05, 5.05, 4.0), proj_points[14], 0.1);
}
int
// Projection tests
PointCloud<PointXYZ> proj_points;
model->projectPoints (inliers, coeff_refined, proj_points);
- EXPECT_XYZ_NEAR (PointXYZ (1.1266, 0.0152, -0.0156), proj_points.points[20], proj_tol);
- EXPECT_XYZ_NEAR (PointXYZ (1.1843, -0.0635, -0.0201), proj_points.points[30], proj_tol);
- EXPECT_XYZ_NEAR (PointXYZ (1.0749, -0.0586, 0.0587), proj_points.points[50], proj_tol);
+ EXPECT_XYZ_NEAR (PointXYZ (1.1266, 0.0152, -0.0156), proj_points[20], proj_tol);
+ EXPECT_XYZ_NEAR (PointXYZ (1.1843, -0.0635, -0.0201), proj_points[30], proj_tol);
+ EXPECT_XYZ_NEAR (PointXYZ (1.0749, -0.0586, 0.0587), proj_points[50], proj_tol);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Basic tests
PointCloud<PointXYZ>::ConstPtr cloud = model->getInputCloud ();
- ASSERT_EQ (cloud_->points.size (), cloud->points.size ());
+ ASSERT_EQ (cloud_->size (), cloud->size ());
model->setInputCloud (cloud);
cloud = model->getInputCloud ();
- ASSERT_EQ (cloud_->points.size (), cloud->points.size ());
+ ASSERT_EQ (cloud_->size (), cloud->size ());
auto indices = model->getIndices ();
ASSERT_EQ (indices_.size (), indices->size ());
for (std::size_t idx = 0; idx < cloud.size (); ++idx)
{
- cloud.points[idx].x = static_cast<float> ((rand () % 200) - 100);
- cloud.points[idx].y = static_cast<float> ((rand () % 200) - 100);
- cloud.points[idx].z = 0.0f;
+ cloud[idx].x = static_cast<float> ((rand () % 200) - 100);
+ cloud[idx].y = static_cast<float> ((rand () % 200) - 100);
+ cloud[idx].z = 0.0f;
- normals.points[idx].normal_x = 0.0f;
- normals.points[idx].normal_y = 0.0f;
- normals.points[idx].normal_z = 1.0f;
+ normals[idx].normal_x = 0.0f;
+ normals[idx].normal_y = 0.0f;
+ normals[idx].normal_z = 1.0f;
}
// Create a shared plane model pointer directly
fromPCLPointCloud2 (cloud_blob, *cloud_);
fromPCLPointCloud2 (cloud_blob, *normals_);
- indices_.resize (cloud_->points.size ());
+ indices_.resize (cloud_->size ());
for (std::size_t i = 0; i < indices_.size (); ++i) { indices_[i] = int (i); }
testing::InitGoogleTest (&argc, argv);
// Use a custom point cloud for these tests until we need something better
PointCloud<PointXYZ> cloud;
cloud.points.resize (10);
- cloud.points[0].getVector3fMap () << 1.7068f, 1.0684f, 2.2147f;
- cloud.points[1].getVector3fMap () << 2.4708f, 2.3081f, 1.1736f;
- cloud.points[2].getVector3fMap () << 2.7609f, 1.9095f, 1.3574f;
- cloud.points[3].getVector3fMap () << 2.8016f, 1.6704f, 1.5009f;
- cloud.points[4].getVector3fMap () << 1.8517f, 2.0276f, 1.0112f;
- cloud.points[5].getVector3fMap () << 1.8726f, 1.3539f, 2.7523f;
- cloud.points[6].getVector3fMap () << 2.5179f, 2.3218f, 1.2074f;
- cloud.points[7].getVector3fMap () << 2.4026f, 2.5114f, 2.7588f;
- cloud.points[8].getVector3fMap () << 2.6999f, 2.5606f, 1.5571f;
- cloud.points[9].getVector3fMap () << 0.0000f, 0.0000f, 0.0000f;
+ cloud[0].getVector3fMap () << 1.7068f, 1.0684f, 2.2147f;
+ cloud[1].getVector3fMap () << 2.4708f, 2.3081f, 1.1736f;
+ cloud[2].getVector3fMap () << 2.7609f, 1.9095f, 1.3574f;
+ cloud[3].getVector3fMap () << 2.8016f, 1.6704f, 1.5009f;
+ cloud[4].getVector3fMap () << 1.8517f, 2.0276f, 1.0112f;
+ cloud[5].getVector3fMap () << 1.8726f, 1.3539f, 2.7523f;
+ cloud[6].getVector3fMap () << 2.5179f, 2.3218f, 1.2074f;
+ cloud[7].getVector3fMap () << 2.4026f, 2.5114f, 2.7588f;
+ cloud[8].getVector3fMap () << 2.6999f, 2.5606f, 1.5571f;
+ cloud[9].getVector3fMap () << 0.0000f, 0.0000f, 0.0000f;
// Create a shared sphere model pointer directly
SampleConsensusModelSpherePtr model (new SampleConsensusModelSphere<PointXYZ> (cloud.makeShared ()));
PointCloud<PointXYZ> cloud;
PointCloud<Normal> normals;
cloud.points.resize (27); normals.points.resize (27);
- cloud.points[ 0].getVector3fMap () << -0.014695f, 0.009549f, 0.954775f;
- cloud.points[ 1].getVector3fMap () << 0.014695f, 0.009549f, 0.954775f;
- cloud.points[ 2].getVector3fMap () << -0.014695f, 0.040451f, 0.954775f;
- cloud.points[ 3].getVector3fMap () << 0.014695f, 0.040451f, 0.954775f;
- cloud.points[ 4].getVector3fMap () << -0.009082f, -0.015451f, 0.972049f;
- cloud.points[ 5].getVector3fMap () << 0.009082f, -0.015451f, 0.972049f;
- cloud.points[ 6].getVector3fMap () << -0.038471f, 0.009549f, 0.972049f;
- cloud.points[ 7].getVector3fMap () << 0.038471f, 0.009549f, 0.972049f;
- cloud.points[ 8].getVector3fMap () << -0.038471f, 0.040451f, 0.972049f;
- cloud.points[ 9].getVector3fMap () << 0.038471f, 0.040451f, 0.972049f;
- cloud.points[10].getVector3fMap () << -0.009082f, 0.065451f, 0.972049f;
- cloud.points[11].getVector3fMap () << 0.009082f, 0.065451f, 0.972049f;
- cloud.points[12].getVector3fMap () << -0.023776f, -0.015451f, 0.982725f;
- cloud.points[13].getVector3fMap () << 0.023776f, -0.015451f, 0.982725f;
- cloud.points[14].getVector3fMap () << -0.023776f, 0.065451f, 0.982725f;
- cloud.points[15].getVector3fMap () << 0.023776f, 0.065451f, 0.982725f;
- cloud.points[16].getVector3fMap () << -0.000000f, -0.025000f, 1.000000f;
- cloud.points[17].getVector3fMap () << 0.000000f, -0.025000f, 1.000000f;
- cloud.points[18].getVector3fMap () << -0.029389f, -0.015451f, 1.000000f;
- cloud.points[19].getVector3fMap () << 0.029389f, -0.015451f, 1.000000f;
- cloud.points[20].getVector3fMap () << -0.047553f, 0.009549f, 1.000000f;
- cloud.points[21].getVector3fMap () << 0.047553f, 0.009549f, 1.000000f;
- cloud.points[22].getVector3fMap () << -0.047553f, 0.040451f, 1.000000f;
- cloud.points[23].getVector3fMap () << 0.047553f, 0.040451f, 1.000000f;
- cloud.points[24].getVector3fMap () << -0.029389f, 0.065451f, 1.000000f;
- cloud.points[25].getVector3fMap () << 0.029389f, 0.065451f, 1.000000f;
- cloud.points[26].getVector3fMap () << 0.000000f, 0.075000f, 1.000000f;
-
- normals.points[ 0].getNormalVector3fMap () << -0.293893f, -0.309017f, -0.904509f;
- normals.points[ 1].getNormalVector3fMap () << 0.293893f, -0.309017f, -0.904508f;
- normals.points[ 2].getNormalVector3fMap () << -0.293893f, 0.309017f, -0.904509f;
- normals.points[ 3].getNormalVector3fMap () << 0.293893f, 0.309017f, -0.904508f;
- normals.points[ 4].getNormalVector3fMap () << -0.181636f, -0.809017f, -0.559017f;
- normals.points[ 5].getNormalVector3fMap () << 0.181636f, -0.809017f, -0.559017f;
- normals.points[ 6].getNormalVector3fMap () << -0.769421f, -0.309017f, -0.559017f;
- normals.points[ 7].getNormalVector3fMap () << 0.769421f, -0.309017f, -0.559017f;
- normals.points[ 8].getNormalVector3fMap () << -0.769421f, 0.309017f, -0.559017f;
- normals.points[ 9].getNormalVector3fMap () << 0.769421f, 0.309017f, -0.559017f;
- normals.points[10].getNormalVector3fMap () << -0.181636f, 0.809017f, -0.559017f;
- normals.points[11].getNormalVector3fMap () << 0.181636f, 0.809017f, -0.559017f;
- normals.points[12].getNormalVector3fMap () << -0.475528f, -0.809017f, -0.345491f;
- normals.points[13].getNormalVector3fMap () << 0.475528f, -0.809017f, -0.345491f;
- normals.points[14].getNormalVector3fMap () << -0.475528f, 0.809017f, -0.345491f;
- normals.points[15].getNormalVector3fMap () << 0.475528f, 0.809017f, -0.345491f;
- normals.points[16].getNormalVector3fMap () << -0.000000f, -1.000000f, 0.000000f;
- normals.points[17].getNormalVector3fMap () << 0.000000f, -1.000000f, 0.000000f;
- normals.points[18].getNormalVector3fMap () << -0.587785f, -0.809017f, 0.000000f;
- normals.points[19].getNormalVector3fMap () << 0.587785f, -0.809017f, 0.000000f;
- normals.points[20].getNormalVector3fMap () << -0.951057f, -0.309017f, 0.000000f;
- normals.points[21].getNormalVector3fMap () << 0.951057f, -0.309017f, 0.000000f;
- normals.points[22].getNormalVector3fMap () << -0.951057f, 0.309017f, 0.000000f;
- normals.points[23].getNormalVector3fMap () << 0.951057f, 0.309017f, 0.000000f;
- normals.points[24].getNormalVector3fMap () << -0.587785f, 0.809017f, 0.000000f;
- normals.points[25].getNormalVector3fMap () << 0.587785f, 0.809017f, 0.000000f;
- normals.points[26].getNormalVector3fMap () << 0.000000f, 1.000000f, 0.000000f;
+ cloud[ 0].getVector3fMap () << -0.014695f, 0.009549f, 0.954775f;
+ cloud[ 1].getVector3fMap () << 0.014695f, 0.009549f, 0.954775f;
+ cloud[ 2].getVector3fMap () << -0.014695f, 0.040451f, 0.954775f;
+ cloud[ 3].getVector3fMap () << 0.014695f, 0.040451f, 0.954775f;
+ cloud[ 4].getVector3fMap () << -0.009082f, -0.015451f, 0.972049f;
+ cloud[ 5].getVector3fMap () << 0.009082f, -0.015451f, 0.972049f;
+ cloud[ 6].getVector3fMap () << -0.038471f, 0.009549f, 0.972049f;
+ cloud[ 7].getVector3fMap () << 0.038471f, 0.009549f, 0.972049f;
+ cloud[ 8].getVector3fMap () << -0.038471f, 0.040451f, 0.972049f;
+ cloud[ 9].getVector3fMap () << 0.038471f, 0.040451f, 0.972049f;
+ cloud[10].getVector3fMap () << -0.009082f, 0.065451f, 0.972049f;
+ cloud[11].getVector3fMap () << 0.009082f, 0.065451f, 0.972049f;
+ cloud[12].getVector3fMap () << -0.023776f, -0.015451f, 0.982725f;
+ cloud[13].getVector3fMap () << 0.023776f, -0.015451f, 0.982725f;
+ cloud[14].getVector3fMap () << -0.023776f, 0.065451f, 0.982725f;
+ cloud[15].getVector3fMap () << 0.023776f, 0.065451f, 0.982725f;
+ cloud[16].getVector3fMap () << -0.000000f, -0.025000f, 1.000000f;
+ cloud[17].getVector3fMap () << 0.000000f, -0.025000f, 1.000000f;
+ cloud[18].getVector3fMap () << -0.029389f, -0.015451f, 1.000000f;
+ cloud[19].getVector3fMap () << 0.029389f, -0.015451f, 1.000000f;
+ cloud[20].getVector3fMap () << -0.047553f, 0.009549f, 1.000000f;
+ cloud[21].getVector3fMap () << 0.047553f, 0.009549f, 1.000000f;
+ cloud[22].getVector3fMap () << -0.047553f, 0.040451f, 1.000000f;
+ cloud[23].getVector3fMap () << 0.047553f, 0.040451f, 1.000000f;
+ cloud[24].getVector3fMap () << -0.029389f, 0.065451f, 1.000000f;
+ cloud[25].getVector3fMap () << 0.029389f, 0.065451f, 1.000000f;
+ cloud[26].getVector3fMap () << 0.000000f, 0.075000f, 1.000000f;
+
+ normals[ 0].getNormalVector3fMap () << -0.293893f, -0.309017f, -0.904509f;
+ normals[ 1].getNormalVector3fMap () << 0.293893f, -0.309017f, -0.904508f;
+ normals[ 2].getNormalVector3fMap () << -0.293893f, 0.309017f, -0.904509f;
+ normals[ 3].getNormalVector3fMap () << 0.293893f, 0.309017f, -0.904508f;
+ normals[ 4].getNormalVector3fMap () << -0.181636f, -0.809017f, -0.559017f;
+ normals[ 5].getNormalVector3fMap () << 0.181636f, -0.809017f, -0.559017f;
+ normals[ 6].getNormalVector3fMap () << -0.769421f, -0.309017f, -0.559017f;
+ normals[ 7].getNormalVector3fMap () << 0.769421f, -0.309017f, -0.559017f;
+ normals[ 8].getNormalVector3fMap () << -0.769421f, 0.309017f, -0.559017f;
+ normals[ 9].getNormalVector3fMap () << 0.769421f, 0.309017f, -0.559017f;
+ normals[10].getNormalVector3fMap () << -0.181636f, 0.809017f, -0.559017f;
+ normals[11].getNormalVector3fMap () << 0.181636f, 0.809017f, -0.559017f;
+ normals[12].getNormalVector3fMap () << -0.475528f, -0.809017f, -0.345491f;
+ normals[13].getNormalVector3fMap () << 0.475528f, -0.809017f, -0.345491f;
+ normals[14].getNormalVector3fMap () << -0.475528f, 0.809017f, -0.345491f;
+ normals[15].getNormalVector3fMap () << 0.475528f, 0.809017f, -0.345491f;
+ normals[16].getNormalVector3fMap () << -0.000000f, -1.000000f, 0.000000f;
+ normals[17].getNormalVector3fMap () << 0.000000f, -1.000000f, 0.000000f;
+ normals[18].getNormalVector3fMap () << -0.587785f, -0.809017f, 0.000000f;
+ normals[19].getNormalVector3fMap () << 0.587785f, -0.809017f, 0.000000f;
+ normals[20].getNormalVector3fMap () << -0.951057f, -0.309017f, 0.000000f;
+ normals[21].getNormalVector3fMap () << 0.951057f, -0.309017f, 0.000000f;
+ normals[22].getNormalVector3fMap () << -0.951057f, 0.309017f, 0.000000f;
+ normals[23].getNormalVector3fMap () << 0.951057f, 0.309017f, 0.000000f;
+ normals[24].getNormalVector3fMap () << -0.587785f, 0.809017f, 0.000000f;
+ normals[25].getNormalVector3fMap () << 0.587785f, 0.809017f, 0.000000f;
+ normals[26].getNormalVector3fMap () << 0.000000f, 1.000000f, 0.000000f;
// Create a shared sphere model pointer directly
SampleConsensusModelNormalSpherePtr model (new SampleConsensusModelNormalSphere<PointXYZ, Normal> (cloud.makeShared ()));
PointCloud<Normal> normals;
cloud.points.resize (31); normals.points.resize (31);
- cloud.points[ 0].getVector3fMap () << -0.011247f, 0.200000f, 0.965384f;
- cloud.points[ 1].getVector3fMap () << 0.000000f, 0.200000f, 0.963603f;
- cloud.points[ 2].getVector3fMap () << 0.011247f, 0.200000f, 0.965384f;
- cloud.points[ 3].getVector3fMap () << -0.016045f, 0.175000f, 0.977916f;
- cloud.points[ 4].getVector3fMap () << -0.008435f, 0.175000f, 0.974038f;
- cloud.points[ 5].getVector3fMap () << 0.004218f, 0.175000f, 0.973370f;
- cloud.points[ 6].getVector3fMap () << 0.016045f, 0.175000f, 0.977916f;
- cloud.points[ 7].getVector3fMap () << -0.025420f, 0.200000f, 0.974580f;
- cloud.points[ 8].getVector3fMap () << 0.025420f, 0.200000f, 0.974580f;
- cloud.points[ 9].getVector3fMap () << -0.012710f, 0.150000f, 0.987290f;
- cloud.points[10].getVector3fMap () << -0.005624f, 0.150000f, 0.982692f;
- cloud.points[11].getVector3fMap () << 0.002812f, 0.150000f, 0.982247f;
- cloud.points[12].getVector3fMap () << 0.012710f, 0.150000f, 0.987290f;
- cloud.points[13].getVector3fMap () << -0.022084f, 0.175000f, 0.983955f;
- cloud.points[14].getVector3fMap () << 0.022084f, 0.175000f, 0.983955f;
- cloud.points[15].getVector3fMap () << -0.034616f, 0.200000f, 0.988753f;
- cloud.points[16].getVector3fMap () << 0.034616f, 0.200000f, 0.988753f;
- cloud.points[17].getVector3fMap () << -0.006044f, 0.125000f, 0.993956f;
- cloud.points[18].getVector3fMap () << 0.004835f, 0.125000f, 0.993345f;
- cloud.points[19].getVector3fMap () << -0.017308f, 0.150000f, 0.994376f;
- cloud.points[20].getVector3fMap () << 0.017308f, 0.150000f, 0.994376f;
- cloud.points[21].getVector3fMap () << -0.025962f, 0.175000f, 0.991565f;
- cloud.points[22].getVector3fMap () << 0.025962f, 0.175000f, 0.991565f;
- cloud.points[23].getVector3fMap () << -0.009099f, 0.125000f, 1.000000f;
- cloud.points[24].getVector3fMap () << 0.009099f, 0.125000f, 1.000000f;
- cloud.points[25].getVector3fMap () << -0.018199f, 0.150000f, 1.000000f;
- cloud.points[26].getVector3fMap () << 0.018199f, 0.150000f, 1.000000f;
- cloud.points[27].getVector3fMap () << -0.027298f, 0.175000f, 1.000000f;
- cloud.points[28].getVector3fMap () << 0.027298f, 0.175000f, 1.000000f;
- cloud.points[29].getVector3fMap () << -0.036397f, 0.200000f, 1.000000f;
- cloud.points[30].getVector3fMap () << 0.036397f, 0.200000f, 1.000000f;
-
- normals.points[ 0].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
- normals.points[ 1].getNormalVector3fMap () << 0.000000f, -0.342020f, -0.939693f;
- normals.points[ 2].getNormalVector3fMap () << 0.290381f, -0.342020f, -0.893701f;
- normals.points[ 3].getNormalVector3fMap () << -0.552338f, -0.342020f, -0.760227f;
- normals.points[ 4].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
- normals.points[ 5].getNormalVector3fMap () << 0.145191f, -0.342020f, -0.916697f;
- normals.points[ 6].getNormalVector3fMap () << 0.552337f, -0.342020f, -0.760227f;
- normals.points[ 7].getNormalVector3fMap () << -0.656282f, -0.342020f, -0.656283f;
- normals.points[ 8].getNormalVector3fMap () << 0.656282f, -0.342020f, -0.656283f;
- normals.points[ 9].getNormalVector3fMap () << -0.656283f, -0.342020f, -0.656282f;
- normals.points[10].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
- normals.points[11].getNormalVector3fMap () << 0.145191f, -0.342020f, -0.916697f;
- normals.points[12].getNormalVector3fMap () << 0.656282f, -0.342020f, -0.656282f;
- normals.points[13].getNormalVector3fMap () << -0.760228f, -0.342020f, -0.552337f;
- normals.points[14].getNormalVector3fMap () << 0.760228f, -0.342020f, -0.552337f;
- normals.points[15].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290380f;
- normals.points[16].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290380f;
- normals.points[17].getNormalVector3fMap () << -0.624162f, -0.342020f, -0.624162f;
- normals.points[18].getNormalVector3fMap () << 0.499329f, -0.342020f, -0.687268f;
- normals.points[19].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290380f;
- normals.points[20].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290380f;
- normals.points[21].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290381f;
- normals.points[22].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290381f;
- normals.points[23].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals.points[24].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
- normals.points[25].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals.points[26].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
- normals.points[27].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals.points[28].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
- normals.points[29].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
- normals.points[30].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
+ cloud[ 0].getVector3fMap () << -0.011247f, 0.200000f, 0.965384f;
+ cloud[ 1].getVector3fMap () << 0.000000f, 0.200000f, 0.963603f;
+ cloud[ 2].getVector3fMap () << 0.011247f, 0.200000f, 0.965384f;
+ cloud[ 3].getVector3fMap () << -0.016045f, 0.175000f, 0.977916f;
+ cloud[ 4].getVector3fMap () << -0.008435f, 0.175000f, 0.974038f;
+ cloud[ 5].getVector3fMap () << 0.004218f, 0.175000f, 0.973370f;
+ cloud[ 6].getVector3fMap () << 0.016045f, 0.175000f, 0.977916f;
+ cloud[ 7].getVector3fMap () << -0.025420f, 0.200000f, 0.974580f;
+ cloud[ 8].getVector3fMap () << 0.025420f, 0.200000f, 0.974580f;
+ cloud[ 9].getVector3fMap () << -0.012710f, 0.150000f, 0.987290f;
+ cloud[10].getVector3fMap () << -0.005624f, 0.150000f, 0.982692f;
+ cloud[11].getVector3fMap () << 0.002812f, 0.150000f, 0.982247f;
+ cloud[12].getVector3fMap () << 0.012710f, 0.150000f, 0.987290f;
+ cloud[13].getVector3fMap () << -0.022084f, 0.175000f, 0.983955f;
+ cloud[14].getVector3fMap () << 0.022084f, 0.175000f, 0.983955f;
+ cloud[15].getVector3fMap () << -0.034616f, 0.200000f, 0.988753f;
+ cloud[16].getVector3fMap () << 0.034616f, 0.200000f, 0.988753f;
+ cloud[17].getVector3fMap () << -0.006044f, 0.125000f, 0.993956f;
+ cloud[18].getVector3fMap () << 0.004835f, 0.125000f, 0.993345f;
+ cloud[19].getVector3fMap () << -0.017308f, 0.150000f, 0.994376f;
+ cloud[20].getVector3fMap () << 0.017308f, 0.150000f, 0.994376f;
+ cloud[21].getVector3fMap () << -0.025962f, 0.175000f, 0.991565f;
+ cloud[22].getVector3fMap () << 0.025962f, 0.175000f, 0.991565f;
+ cloud[23].getVector3fMap () << -0.009099f, 0.125000f, 1.000000f;
+ cloud[24].getVector3fMap () << 0.009099f, 0.125000f, 1.000000f;
+ cloud[25].getVector3fMap () << -0.018199f, 0.150000f, 1.000000f;
+ cloud[26].getVector3fMap () << 0.018199f, 0.150000f, 1.000000f;
+ cloud[27].getVector3fMap () << -0.027298f, 0.175000f, 1.000000f;
+ cloud[28].getVector3fMap () << 0.027298f, 0.175000f, 1.000000f;
+ cloud[29].getVector3fMap () << -0.036397f, 0.200000f, 1.000000f;
+ cloud[30].getVector3fMap () << 0.036397f, 0.200000f, 1.000000f;
+
+ normals[ 0].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
+ normals[ 1].getNormalVector3fMap () << 0.000000f, -0.342020f, -0.939693f;
+ normals[ 2].getNormalVector3fMap () << 0.290381f, -0.342020f, -0.893701f;
+ normals[ 3].getNormalVector3fMap () << -0.552338f, -0.342020f, -0.760227f;
+ normals[ 4].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
+ normals[ 5].getNormalVector3fMap () << 0.145191f, -0.342020f, -0.916697f;
+ normals[ 6].getNormalVector3fMap () << 0.552337f, -0.342020f, -0.760227f;
+ normals[ 7].getNormalVector3fMap () << -0.656282f, -0.342020f, -0.656283f;
+ normals[ 8].getNormalVector3fMap () << 0.656282f, -0.342020f, -0.656283f;
+ normals[ 9].getNormalVector3fMap () << -0.656283f, -0.342020f, -0.656282f;
+ normals[10].getNormalVector3fMap () << -0.290381f, -0.342020f, -0.893701f;
+ normals[11].getNormalVector3fMap () << 0.145191f, -0.342020f, -0.916697f;
+ normals[12].getNormalVector3fMap () << 0.656282f, -0.342020f, -0.656282f;
+ normals[13].getNormalVector3fMap () << -0.760228f, -0.342020f, -0.552337f;
+ normals[14].getNormalVector3fMap () << 0.760228f, -0.342020f, -0.552337f;
+ normals[15].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290380f;
+ normals[16].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290380f;
+ normals[17].getNormalVector3fMap () << -0.624162f, -0.342020f, -0.624162f;
+ normals[18].getNormalVector3fMap () << 0.499329f, -0.342020f, -0.687268f;
+ normals[19].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290380f;
+ normals[20].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290380f;
+ normals[21].getNormalVector3fMap () << -0.893701f, -0.342020f, -0.290381f;
+ normals[22].getNormalVector3fMap () << 0.893701f, -0.342020f, -0.290381f;
+ normals[23].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
+ normals[24].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
+ normals[25].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
+ normals[26].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
+ normals[27].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
+ normals[28].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
+ normals[29].getNormalVector3fMap () << -0.939693f, -0.342020f, 0.000000f;
+ normals[30].getNormalVector3fMap () << 0.939693f, -0.342020f, 0.000000f;
// Create a shared cylinder model pointer directly
SampleConsensusModelConePtr model (new SampleConsensusModelCone<PointXYZ, Normal> (cloud.makeShared ()));
PointCloud<Normal> normals;
cloud.points.resize (20); normals.points.resize (20);
- cloud.points[ 0].getVector3fMap () << -0.499902f, 2.199701f, 0.000008f;
- cloud.points[ 1].getVector3fMap () << -0.875397f, 2.030177f, 0.050104f;
- cloud.points[ 2].getVector3fMap () << -0.995875f, 1.635973f, 0.099846f;
- cloud.points[ 3].getVector3fMap () << -0.779523f, 1.285527f, 0.149961f;
- cloud.points[ 4].getVector3fMap () << -0.373285f, 1.216488f, 0.199959f;
- cloud.points[ 5].getVector3fMap () << -0.052893f, 1.475973f, 0.250101f;
- cloud.points[ 6].getVector3fMap () << -0.036558f, 1.887591f, 0.299839f;
- cloud.points[ 7].getVector3fMap () << -0.335048f, 2.171994f, 0.350001f;
- cloud.points[ 8].getVector3fMap () << -0.745456f, 2.135528f, 0.400072f;
- cloud.points[ 9].getVector3fMap () << -0.989282f, 1.803311f, 0.449983f;
- cloud.points[10].getVector3fMap () << -0.900651f, 1.400701f, 0.500126f;
- cloud.points[11].getVector3fMap () << -0.539658f, 1.201468f, 0.550079f;
- cloud.points[12].getVector3fMap () << -0.151875f, 1.340951f, 0.599983f;
- cloud.points[13].getVector3fMap () << -0.000724f, 1.724373f, 0.649882f;
- cloud.points[14].getVector3fMap () << -0.188573f, 2.090983f, 0.699854f;
- cloud.points[15].getVector3fMap () << -0.587925f, 2.192257f, 0.749956f;
- cloud.points[16].getVector3fMap () << -0.927724f, 1.958846f, 0.800008f;
- cloud.points[17].getVector3fMap () << -0.976888f, 1.549655f, 0.849970f;
- cloud.points[18].getVector3fMap () << -0.702003f, 1.242707f, 0.899954f;
- cloud.points[19].getVector3fMap () << -0.289916f, 1.246296f, 0.950075f;
-
- normals.points[ 0].getNormalVector3fMap () << 0.000098f, 1.000098f, 0.000008f;
- normals.points[ 1].getNormalVector3fMap () << -0.750891f, 0.660413f, 0.000104f;
- normals.points[ 2].getNormalVector3fMap () << -0.991765f, -0.127949f, -0.000154f;
- normals.points[ 3].getNormalVector3fMap () << -0.558918f, -0.829439f, -0.000039f;
- normals.points[ 4].getNormalVector3fMap () << 0.253627f, -0.967447f, -0.000041f;
- normals.points[ 5].getNormalVector3fMap () << 0.894105f, -0.447965f, 0.000101f;
- normals.points[ 6].getNormalVector3fMap () << 0.926852f, 0.375543f, -0.000161f;
- normals.points[ 7].getNormalVector3fMap () << 0.329948f, 0.943941f, 0.000001f;
- normals.points[ 8].getNormalVector3fMap () << -0.490966f, 0.871203f, 0.000072f;
- normals.points[ 9].getNormalVector3fMap () << -0.978507f, 0.206425f, -0.000017f;
- normals.points[10].getNormalVector3fMap () << -0.801227f, -0.598534f, 0.000126f;
- normals.points[11].getNormalVector3fMap () << -0.079447f, -0.996697f, 0.000079f;
- normals.points[12].getNormalVector3fMap () << 0.696154f, -0.717889f, -0.000017f;
- normals.points[13].getNormalVector3fMap () << 0.998685f, 0.048502f, -0.000118f;
- normals.points[14].getNormalVector3fMap () << 0.622933f, 0.782133f, -0.000146f;
- normals.points[15].getNormalVector3fMap () << -0.175948f, 0.984480f, -0.000044f;
- normals.points[16].getNormalVector3fMap () << -0.855476f, 0.517824f, 0.000008f;
- normals.points[17].getNormalVector3fMap () << -0.953769f, -0.300571f, -0.000030f;
- normals.points[18].getNormalVector3fMap () << -0.404035f, -0.914700f, -0.000046f;
- normals.points[19].getNormalVector3fMap () << 0.420154f, -0.907445f, 0.000075f;
+ cloud[ 0].getVector3fMap () << -0.499902f, 2.199701f, 0.000008f;
+ cloud[ 1].getVector3fMap () << -0.875397f, 2.030177f, 0.050104f;
+ cloud[ 2].getVector3fMap () << -0.995875f, 1.635973f, 0.099846f;
+ cloud[ 3].getVector3fMap () << -0.779523f, 1.285527f, 0.149961f;
+ cloud[ 4].getVector3fMap () << -0.373285f, 1.216488f, 0.199959f;
+ cloud[ 5].getVector3fMap () << -0.052893f, 1.475973f, 0.250101f;
+ cloud[ 6].getVector3fMap () << -0.036558f, 1.887591f, 0.299839f;
+ cloud[ 7].getVector3fMap () << -0.335048f, 2.171994f, 0.350001f;
+ cloud[ 8].getVector3fMap () << -0.745456f, 2.135528f, 0.400072f;
+ cloud[ 9].getVector3fMap () << -0.989282f, 1.803311f, 0.449983f;
+ cloud[10].getVector3fMap () << -0.900651f, 1.400701f, 0.500126f;
+ cloud[11].getVector3fMap () << -0.539658f, 1.201468f, 0.550079f;
+ cloud[12].getVector3fMap () << -0.151875f, 1.340951f, 0.599983f;
+ cloud[13].getVector3fMap () << -0.000724f, 1.724373f, 0.649882f;
+ cloud[14].getVector3fMap () << -0.188573f, 2.090983f, 0.699854f;
+ cloud[15].getVector3fMap () << -0.587925f, 2.192257f, 0.749956f;
+ cloud[16].getVector3fMap () << -0.927724f, 1.958846f, 0.800008f;
+ cloud[17].getVector3fMap () << -0.976888f, 1.549655f, 0.849970f;
+ cloud[18].getVector3fMap () << -0.702003f, 1.242707f, 0.899954f;
+ cloud[19].getVector3fMap () << -0.289916f, 1.246296f, 0.950075f;
+
+ normals[ 0].getNormalVector3fMap () << 0.000098f, 1.000098f, 0.000008f;
+ normals[ 1].getNormalVector3fMap () << -0.750891f, 0.660413f, 0.000104f;
+ normals[ 2].getNormalVector3fMap () << -0.991765f, -0.127949f, -0.000154f;
+ normals[ 3].getNormalVector3fMap () << -0.558918f, -0.829439f, -0.000039f;
+ normals[ 4].getNormalVector3fMap () << 0.253627f, -0.967447f, -0.000041f;
+ normals[ 5].getNormalVector3fMap () << 0.894105f, -0.447965f, 0.000101f;
+ normals[ 6].getNormalVector3fMap () << 0.926852f, 0.375543f, -0.000161f;
+ normals[ 7].getNormalVector3fMap () << 0.329948f, 0.943941f, 0.000001f;
+ normals[ 8].getNormalVector3fMap () << -0.490966f, 0.871203f, 0.000072f;
+ normals[ 9].getNormalVector3fMap () << -0.978507f, 0.206425f, -0.000017f;
+ normals[10].getNormalVector3fMap () << -0.801227f, -0.598534f, 0.000126f;
+ normals[11].getNormalVector3fMap () << -0.079447f, -0.996697f, 0.000079f;
+ normals[12].getNormalVector3fMap () << 0.696154f, -0.717889f, -0.000017f;
+ normals[13].getNormalVector3fMap () << 0.998685f, 0.048502f, -0.000118f;
+ normals[14].getNormalVector3fMap () << 0.622933f, 0.782133f, -0.000146f;
+ normals[15].getNormalVector3fMap () << -0.175948f, 0.984480f, -0.000044f;
+ normals[16].getNormalVector3fMap () << -0.855476f, 0.517824f, 0.000008f;
+ normals[17].getNormalVector3fMap () << -0.953769f, -0.300571f, -0.000030f;
+ normals[18].getNormalVector3fMap () << -0.404035f, -0.914700f, -0.000046f;
+ normals[19].getNormalVector3fMap () << 0.420154f, -0.907445f, 0.000075f;
// Create a shared cylinder model pointer directly
SampleConsensusModelCylinderPtr model (new SampleConsensusModelCylinder<PointXYZ, Normal> (cloud.makeShared ()));
PointCloud<PointXYZ> cloud;
cloud.points.resize (18);
- cloud.points[ 0].getVector3fMap () << 3.587751f, -4.190982f, 0.0f;
- cloud.points[ 1].getVector3fMap () << 3.808883f, -4.412265f, 0.0f;
- cloud.points[ 2].getVector3fMap () << 3.587525f, -5.809143f, 0.0f;
- cloud.points[ 3].getVector3fMap () << 2.999913f, -5.999980f, 0.0f;
- cloud.points[ 4].getVector3fMap () << 2.412224f, -5.809090f, 0.0f;
- cloud.points[ 5].getVector3fMap () << 2.191080f, -5.587682f, 0.0f;
- cloud.points[ 6].getVector3fMap () << 2.048941f, -5.309003f, 0.0f;
- cloud.points[ 7].getVector3fMap () << 2.000397f, -4.999944f, 0.0f;
- cloud.points[ 8].getVector3fMap () << 2.999953f, -6.000056f, 0.0f;
- cloud.points[ 9].getVector3fMap () << 2.691127f, -5.951136f, 0.0f;
- cloud.points[10].getVector3fMap () << 2.190892f, -5.587838f, 0.0f;
- cloud.points[11].getVector3fMap () << 2.048874f, -5.309052f, 0.0f;
- cloud.points[12].getVector3fMap () << 1.999990f, -5.000147f, 0.0f;
- cloud.points[13].getVector3fMap () << 2.049026f, -4.690918f, 0.0f;
- cloud.points[14].getVector3fMap () << 2.190956f, -4.412162f, 0.0f;
- cloud.points[15].getVector3fMap () << 2.412231f, -4.190918f, 0.0f;
- cloud.points[16].getVector3fMap () << 2.691027f, -4.049060f, 0.0f;
- cloud.points[17].getVector3fMap () << 2.000000f, -3.000000f, 0.0f;
+ cloud[ 0].getVector3fMap () << 3.587751f, -4.190982f, 0.0f;
+ cloud[ 1].getVector3fMap () << 3.808883f, -4.412265f, 0.0f;
+ cloud[ 2].getVector3fMap () << 3.587525f, -5.809143f, 0.0f;
+ cloud[ 3].getVector3fMap () << 2.999913f, -5.999980f, 0.0f;
+ cloud[ 4].getVector3fMap () << 2.412224f, -5.809090f, 0.0f;
+ cloud[ 5].getVector3fMap () << 2.191080f, -5.587682f, 0.0f;
+ cloud[ 6].getVector3fMap () << 2.048941f, -5.309003f, 0.0f;
+ cloud[ 7].getVector3fMap () << 2.000397f, -4.999944f, 0.0f;
+ cloud[ 8].getVector3fMap () << 2.999953f, -6.000056f, 0.0f;
+ cloud[ 9].getVector3fMap () << 2.691127f, -5.951136f, 0.0f;
+ cloud[10].getVector3fMap () << 2.190892f, -5.587838f, 0.0f;
+ cloud[11].getVector3fMap () << 2.048874f, -5.309052f, 0.0f;
+ cloud[12].getVector3fMap () << 1.999990f, -5.000147f, 0.0f;
+ cloud[13].getVector3fMap () << 2.049026f, -4.690918f, 0.0f;
+ cloud[14].getVector3fMap () << 2.190956f, -4.412162f, 0.0f;
+ cloud[15].getVector3fMap () << 2.412231f, -4.190918f, 0.0f;
+ cloud[16].getVector3fMap () << 2.691027f, -4.049060f, 0.0f;
+ cloud[17].getVector3fMap () << 2.000000f, -3.000000f, 0.0f;
// Create a shared 2d circle model pointer directly
SampleConsensusModelCircle2DPtr model (new SampleConsensusModelCircle2D<PointXYZ> (cloud.makeShared ()));
PointCloud<PointXYZ> cloud;
cloud.points.resize (20);
- cloud.points[ 0].getVector3fMap () << 1.00000000f, 5.0000000f, -2.9000001f;
- cloud.points[ 1].getVector3fMap () << 1.03420200f, 5.0000000f, -2.9060307f;
- cloud.points[ 2].getVector3fMap () << 1.06427870f, 5.0000000f, -2.9233956f;
- cloud.points[ 3].getVector3fMap () << 1.08660260f, 5.0000000f, -2.9500000f;
- cloud.points[ 4].getVector3fMap () << 1.09848080f, 5.0000000f, -2.9826353f;
- cloud.points[ 5].getVector3fMap () << 1.09848080f, 5.0000000f, -3.0173647f;
- cloud.points[ 6].getVector3fMap () << 1.08660260f, 5.0000000f, -3.0500000f;
- cloud.points[ 7].getVector3fMap () << 1.06427870f, 5.0000000f, -3.0766044f;
- cloud.points[ 8].getVector3fMap () << 1.03420200f, 5.0000000f, -3.0939693f;
- cloud.points[ 9].getVector3fMap () << 1.00000000f, 5.0000000f, -3.0999999f;
- cloud.points[10].getVector3fMap () << 0.96579796f, 5.0000000f, -3.0939693f;
- cloud.points[11].getVector3fMap () << 0.93572122f, 5.0000000f, -3.0766044f;
- cloud.points[12].getVector3fMap () << 0.91339743f, 5.0000000f, -3.0500000f;
- cloud.points[13].getVector3fMap () << 0.90151924f, 5.0000000f, -3.0173647f;
- cloud.points[14].getVector3fMap () << 0.90151924f, 5.0000000f, -2.9826353f;
- cloud.points[15].getVector3fMap () << 0.91339743f, 5.0000000f, -2.9500000f;
- cloud.points[16].getVector3fMap () << 0.93572122f, 5.0000000f, -2.9233956f;
- cloud.points[17].getVector3fMap () << 0.96579796f, 5.0000000f, -2.9060307f;
- cloud.points[18].getVector3fMap () << 0.85000002f, 4.8499999f, -3.1500001f;
- cloud.points[19].getVector3fMap () << 1.15000000f, 5.1500001f, -2.8499999f;
+ cloud[ 0].getVector3fMap () << 1.00000000f, 5.0000000f, -2.9000001f;
+ cloud[ 1].getVector3fMap () << 1.03420200f, 5.0000000f, -2.9060307f;
+ cloud[ 2].getVector3fMap () << 1.06427870f, 5.0000000f, -2.9233956f;
+ cloud[ 3].getVector3fMap () << 1.08660260f, 5.0000000f, -2.9500000f;
+ cloud[ 4].getVector3fMap () << 1.09848080f, 5.0000000f, -2.9826353f;
+ cloud[ 5].getVector3fMap () << 1.09848080f, 5.0000000f, -3.0173647f;
+ cloud[ 6].getVector3fMap () << 1.08660260f, 5.0000000f, -3.0500000f;
+ cloud[ 7].getVector3fMap () << 1.06427870f, 5.0000000f, -3.0766044f;
+ cloud[ 8].getVector3fMap () << 1.03420200f, 5.0000000f, -3.0939693f;
+ cloud[ 9].getVector3fMap () << 1.00000000f, 5.0000000f, -3.0999999f;
+ cloud[10].getVector3fMap () << 0.96579796f, 5.0000000f, -3.0939693f;
+ cloud[11].getVector3fMap () << 0.93572122f, 5.0000000f, -3.0766044f;
+ cloud[12].getVector3fMap () << 0.91339743f, 5.0000000f, -3.0500000f;
+ cloud[13].getVector3fMap () << 0.90151924f, 5.0000000f, -3.0173647f;
+ cloud[14].getVector3fMap () << 0.90151924f, 5.0000000f, -2.9826353f;
+ cloud[15].getVector3fMap () << 0.91339743f, 5.0000000f, -2.9500000f;
+ cloud[16].getVector3fMap () << 0.93572122f, 5.0000000f, -2.9233956f;
+ cloud[17].getVector3fMap () << 0.96579796f, 5.0000000f, -2.9060307f;
+ cloud[18].getVector3fMap () << 0.85000002f, 4.8499999f, -3.1500001f;
+ cloud[19].getVector3fMap () << 1.15000000f, 5.1500001f, -2.8499999f;
// Create a shared 3d circle model pointer directly
SampleConsensusModelCircle3DPtr model (new SampleConsensusModelCircle3D<PointXYZ> (cloud.makeShared ()));
FILES test_search.cpp
LINK_WITH pcl_gtest pcl_search pcl_io pcl_kdtree
ARGUMENTS "${PCL_SOURCE_DIR}/test/table_scene_mug_stereo_textured.pcd")
+
+ PCL_ADD_TEST(organized_index test_organized_index
+ FILES test_organized_index.cpp
+ LINK_WITH pcl_gtest pcl_search pcl_common pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/office1.pcd")
endif()
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2011, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: test_feature.cpp 2177 2011-08-23 13:58:35Z shapovalov $
- *
- */
-#include <iostream>
-#include <pcl/test/gtest.h>
-#include <pcl/common/time.h>
-#include <pcl/search/pcl_search.h>
-#include <pcl/point_cloud.h>
-#include <pcl/point_types.h>
-#include <pcl/io/pcd_io.h>
-
-using namespace std;
-using namespace pcl;
-
-PointCloud<PointXYZ> cloud, cloud_big;
-
-void
-init ()
-{
- float resolution = 0.1;
- for (float z = -0.5f; z <= 0.5f; z += resolution)
- for (float y = -0.5f; y <= 0.5f; y += resolution)
- for (float x = -0.5f; x <= 0.5f; x += resolution)
- cloud.points.push_back (PointXYZ (x, y, z));
- cloud.width = cloud.points.size ();
- cloud.height = 1;
-
- cloud_big.width = 640;
- cloud_big.height = 480;
- srand (time (NULL));
- // Randomly create a new point cloud
- for (std::size_t i = 0; i < cloud_big.width * cloud_big.height; ++i)
- cloud_big.points.push_back (PointXYZ (1024 * rand () / (RAND_MAX + 1.0),
- 1024 * rand () / (RAND_MAX + 1.0),
- 1024 * rand () / (RAND_MAX + 1.0)));
-}
-
-// helper class for priority queue
-class prioPointQueueEntry
-{
- public:
- prioPointQueueEntry ()
- {
- }
- prioPointQueueEntry (PointXYZ& point_arg, double pointDistance_arg, int pointIdx_arg)
- {
- point_ = point_arg;
- pointDistance_ = pointDistance_arg;
- pointIdx_ = pointIdx_arg;
- }
-
- bool
- operator< (const prioPointQueueEntry& rhs_arg) const
- {
- return (this->pointDistance_ < rhs_arg.pointDistance_);
- }
-
- PointXYZ point_;
- double pointDistance_;int pointIdx_;
-};
-
-TEST (PCL, Octree_Pointcloud_Nearest_K_Neighbour_Search)
-{
- constexpr unsigned int test_runs = 1;
-
- // instantiate point cloud
- PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
-
- srand (time (NULL));
-
- std::priority_queue<prioPointQueueEntry, std::vector<prioPointQueueEntry, Eigen::aligned_allocator<prioPointQueueEntry> > > pointCandidates;
-
- // create octree
- pcl::search::Search<PointXYZ> octree(pcl::search::OCTREE);
-
- std::vector<int> k_indices;
- std::vector<float> k_sqr_distances;
-
- std::vector<int> k_indices_bruteforce;
- std::vector<float> k_sqr_distances_bruteforce;
-
- for (unsigned int test_id = 0; test_id < test_runs; test_id++)
- {
- // define a random search point
-
- PointXYZ searchPoint (10.0 * ((double)rand () / (double)RAND_MAX), 10.0 * ((double)rand () / (double)RAND_MAX),
- 10.0 * ((double)rand () / (double)RAND_MAX));
-
- const unsigned int K = rand () % 10;
-
- // generate point cloud
- cloudIn->width = 1000;
- cloudIn->height = 1;
- cloudIn->points.resize (cloudIn->width * cloudIn->height);
- for (std::size_t i = 0; i < 1000; i++)
- {
- cloudIn->points[i] = PointXYZ (5.0 * ((double)rand () / (double)RAND_MAX),
- 10.0 * ((double)rand () / (double)RAND_MAX),
- 10.0 * ((double)rand () / (double)RAND_MAX));
- }
-
- k_indices.clear();
- k_sqr_distances.clear();
-
- k_indices_bruteforce.clear();
- k_sqr_distances_bruteforce.clear();
-
- // push all points and their distance to the search point into a priority queue - bruteforce approach.
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
- {
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
-
- prioPointQueueEntry pointEntry (cloudIn->points[i], pointDist, i);
-
- pointCandidates.push (pointEntry);
- }
-
- // pop priority queue until we have the nearest K elements
- while (pointCandidates.size () > K)
- pointCandidates.pop ();
-
- // copy results into vectors
- while (pointCandidates.size ())
- {
- k_indices_bruteforce.push_back (pointCandidates.top ().pointIdx_);
- k_sqr_distances_bruteforce.push_back (pointCandidates.top ().pointDistance_);
-
- pointCandidates.pop ();
- }
-
- // octree nearest neighbor search
- octree.setInputCloud (cloudIn);
- octree.nearestKSearch (searchPoint, (int)K, k_indices, k_sqr_distances);
-
- ASSERT_EQ ( k_indices.size() , k_indices_bruteforce.size() );
-
- // compare nearest neighbor results of octree with bruteforce search
- while (k_indices_bruteforce.size ())
- {
- ASSERT_EQ ( k_indices.back() , k_indices_bruteforce.back() );
- EXPECT_NEAR (k_sqr_distances.back(), k_sqr_distances_bruteforce.back(), 1e-4);
-
- k_indices_bruteforce.pop_back();
- k_indices.pop_back();
-
- k_sqr_distances_bruteforce.pop_back();
- k_sqr_distances.pop_back();
- }
- }
-}
-
-TEST (PCL, Octree_Pointcloud_Approx_Nearest_Neighbour_Search)
-{
- constexpr unsigned int test_runs = 100;
-
- unsigned int bestMatchCount = 0;
-
- // instantiate point cloud
- PointCloud<PointXYZ>::Ptr cloudIn (new PointCloud<PointXYZ> ());
-
- srand (time (NULL));
-
- // create octree
- pcl::search::Search<PointXYZ>* octree = new pcl::search::AutotunedSearch<PointXYZ>(pcl::search::OCTREE);
-
-
- for (unsigned int test_id = 0; test_id < test_runs; test_id++)
- {
- // define a random search point
-
- PointXYZ searchPoint (10.0 * ((double)rand () / (double)RAND_MAX), 10.0 * ((double)rand () / (double)RAND_MAX),
- 10.0 * ((double)rand () / (double)RAND_MAX));
-
- // generate point cloud
- cloudIn->width = 1000;
- cloudIn->height = 1;
- cloudIn->points.resize (cloudIn->width * cloudIn->height);
- for (std::size_t i = 0; i < 1000; i++)
- {
- cloudIn->points[i] = PointXYZ (5.0 * ((double)rand () / (double)RAND_MAX),
- 10.0 * ((double)rand () / (double)RAND_MAX),
- 10.0 * ((double)rand () / (double)RAND_MAX));
- }
-
-
- // brute force search
- double BFdistance = std::numeric_limits<double>::max ();
- int BFindex = 0;
-
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
- {
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
-
- if (pointDist<BFdistance)
- {
- BFindex = i;
- BFdistance = pointDist;
- }
-
- }
-
- int ANNindex;
- float ANNdistance;
-
- // octree approx. nearest neighbor search
- octree->setInputCloud (cloudIn);
- octree->approxNearestSearch (searchPoint, ANNindex, ANNdistance);
-
- if (BFindex==ANNindex)
- {
- EXPECT_NEAR (ANNdistance, BFdistance, 1e-4);
- bestMatchCount++;
- }
- }
-
- // we should have found the absolute nearest neighbor at least once
- ASSERT_EQ ( (bestMatchCount > 0) , true);
-}
-
-
-TEST (PCL, KdTreeWrapper_nearestKSearch)
-{
-
- pcl::search::Search<PointXYZ> kdtree(pcl::search::KDTREE);
- kdtree.setInputCloud (cloud.makeShared ());
- PointXYZ test_point (0.01f, 0.01f, 0.01f);
- unsigned int no_of_neighbors = 20;
- multimap<float, int> sorted_brute_force_result;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
- {
- float distance = euclideanDistance (cloud.points[i], test_point);
- sorted_brute_force_result.insert (make_pair (distance, (int) i));
- }
- float max_dist = 0.0f;
- unsigned int counter = 0;
- for (multimap<float, int>::iterator it = sorted_brute_force_result.begin (); it != sorted_brute_force_result.end () && counter < no_of_neighbors; ++it)
- {
- max_dist = max (max_dist, it->first);
- ++counter;
- }
-
- std::vector<int> k_indices;
- k_indices.resize (no_of_neighbors);
- std::vector<float> k_distances;
- k_distances.resize (no_of_neighbors);
-
- kdtree.nearestKSearch (test_point, no_of_neighbors, k_indices, k_distances);
-
- EXPECT_EQ (k_indices.size (), no_of_neighbors);
-
- // Check if all found neighbors have distance smaller than max_dist
- for (std::size_t i = 0; i < k_indices.size (); ++i)
- {
- const PointXYZ& point = cloud.points[k_indices[i]];
- bool ok = euclideanDistance (test_point, point) <= max_dist;
- if (!ok)
- ok = (std::abs (euclideanDistance (test_point, point)) - max_dist) <= 1e-6;
- //if (!ok) std::cerr << k_indices[i] << " is not correct...\n";
- //else std::cerr << k_indices[i] << " is correct...\n";
- EXPECT_EQ (ok, true);
- }
-
- ScopeTime scopeTime ("FLANN nearestKSearch");
- {
- pcl::search::AutotunedSearch<PointXYZ> kdtree (pcl::search::KDTREE);
- //kdtree.initSearchDS ();
- kdtree.setInputCloud (cloud_big.makeShared ());
- for (std::size_t i = 0; i < cloud_big.points.size (); ++i)
- kdtree.nearestKSearch (cloud_big.points[i], no_of_neighbors, k_indices, k_distances);
- }
-}
-
-
-/* Function to auto evaluate the best search structure for the given dataset */
-TEST (PCL, AutoTunedSearch_Evaluate)
-{
- pcl::search::AutotunedSearch<PointXYZ> search (pcl::search::AUTO_TUNED);
-
- pcl::PCDReader pcd;
- pcl::PointCloud<PointXYZ>::Ptr cloudIn (new pcl::PointCloud<PointXYZ>);
-
- if (pcd.read ("office1.pcd", *cloudIn) == -1)
- {
- std::cout <<"Couldn't read input cloud" << std::endl;
- return;
- }
-
- search.evaluateSearchMethods (cloudIn, pcl::search::NEAREST_K_SEARCH);
- search.evaluateSearchMethods (cloudIn, pcl::search::NEAREST_RADIUS_SEARCH);
-}
-
-
-
-int
-main(int argc, char** argv)
-{
- testing::InitGoogleTest (&argc, argv);
- init ();
-
- // Testing using explicit instantiation of inherited class
- pcl::search::AutotunedSearch<PointXYZ> kdtree(pcl::search::KDTREE);
- kdtree.setInputCloud (cloud.makeShared ());
-
- return (RUN_ALL_TESTS ());
-};
-/* ]--- */
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
-using namespace std;
using namespace pcl;
PointCloud<PointXYZ> cloud, cloud_big;
for (float y = -0.5f; y <= 0.5f; y += resolution)
for (float x = -0.5f; x <= 0.5f; x += resolution)
cloud.points.emplace_back(x, y, z);
- cloud.width = int (cloud.points.size ());
+ cloud.width = cloud.size ();
cloud.height = 1;
cloud_big.width = 640;
FlannSearch.setInputCloud (cloud.makeShared ());
PointXYZ test_point (0.01f, 0.01f, 0.01f);
unsigned int no_of_neighbors = 20;
- multimap<float, int> sorted_brute_force_result;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ std::multimap<float, int> sorted_brute_force_result;
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- float distance = euclideanDistance (cloud.points[i], test_point);
- sorted_brute_force_result.insert (make_pair (distance, int (i)));
+ float distance = euclideanDistance (cloud[i], test_point);
+ sorted_brute_force_result.insert (std::make_pair (distance, int (i)));
}
float max_dist = 0.0f;
unsigned int counter = 0;
- for (multimap<float, int>::iterator it = sorted_brute_force_result.begin (); it != sorted_brute_force_result.end ()
+ for (std::multimap<float, int>::iterator it = sorted_brute_force_result.begin (); it != sorted_brute_force_result.end ()
&& counter < no_of_neighbors; ++it)
{
- max_dist = max (max_dist, it->first);
+ max_dist = std::max (max_dist, it->first);
++counter;
}
// Check if all found neighbors have distance smaller than max_dist
for (const int &k_index : k_indices)
{
- const PointXYZ& point = cloud.points[k_index];
+ const PointXYZ& point = cloud[k_index];
bool ok = euclideanDistance (test_point, point) <= max_dist;
if (!ok)
ok = (std::abs (euclideanDistance (test_point, point)) - max_dist) <= 1e-6;
//if (!ok) std::cerr << k_indices[i] << " is not correct...\n";
//else std::cerr << k_indices[i] << " is correct...\n";
- EXPECT_EQ (ok, true);
+ EXPECT_TRUE (ok);
}
ScopeTime scopeTime ("FLANN nearestKSearch");
//vector<float> k_distances_t;
//k_distances_t.resize (no_of_neighbors);
- for (std::size_t i = 0; i < cloud_rgb.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_rgb.size (); ++i)
{
- //FlannSearch.nearestKSearchT (cloud_rgb.points[i], no_of_neighbors, k_indices_t, k_distances_t);
- FlannSearch.nearestKSearch (cloud_big.points[i], no_of_neighbors, k_indices, k_distances);
+ //FlannSearch.nearestKSearchT (cloud_rgb[i], no_of_neighbors, k_indices_t, k_distances_t);
+ FlannSearch.nearestKSearch (cloud_big[i], no_of_neighbors, k_indices, k_distances);
EXPECT_EQ (k_indices.size (), indices[i].size ());
EXPECT_EQ (k_distances.size (), dists[i].size ());
for (std::size_t j = 0; j< no_of_neighbors; j++)
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
- for (std::size_t i = 0; i < cloud_big.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_big.size (); ++i)
{
- FlannSearch.nearestKSearch (cloud_big.points[i], no_of_neighbors, k_indices, k_distances);
+ FlannSearch.nearestKSearch (cloud_big[i], no_of_neighbors, k_indices, k_distances);
EXPECT_EQ (k_indices.size (), indices[i].size ());
EXPECT_EQ (k_distances.size (), dists[i].size ());
for (std::size_t j = 0; j< no_of_neighbors; j++ )
kdtree_search->nearestKSearch (point, no_of_neighbors, k_indices, k_distances);
}
- std::vector<vector<int> > indices_flann;
- std::vector<vector<float> > dists_flann;
- std::vector<vector<int> > indices_tree;
- std::vector<vector<float> > dists_tree;
+ std::vector<std::vector<int> > indices_flann;
+ std::vector<std::vector<float> > dists_flann;
+ std::vector<std::vector<int> > indices_tree;
+ std::vector<std::vector<float> > dists_tree;
indices_flann.resize (cloud_big.size ());
dists_flann.resize (cloud_big.size ());
indices_tree.resize (cloud_big.size ());
#include <iostream>
#include <pcl/test/gtest.h>
#include <pcl/common/time.h>
-#include <pcl/search/pcl_search.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/common/distances.h>
+#include <pcl/search/kdtree.h> // for KdTree
-using namespace std;
using namespace pcl;
PointCloud<PointXYZ> cloud, cloud_big;
for (float y = -0.5f; y <= 0.5f; y += resolution)
for (float x = -0.5f; x <= 0.5f; x += resolution)
cloud.points.emplace_back(x, y, z);
- cloud.width = static_cast<std::uint32_t> (cloud.points.size ());
+ cloud.width = cloud.size ();
cloud.height = 1;
cloud_big.width = 640;
kdtree.setInputCloud (cloud.makeShared ());
PointXYZ test_point (0.01f, 0.01f, 0.01f);
unsigned int no_of_neighbors = 20;
- multimap<float, int> sorted_brute_force_result;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ std::multimap<float, int> sorted_brute_force_result;
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- float distance = euclideanDistance (cloud.points[i], test_point);
- sorted_brute_force_result.insert (make_pair (distance, static_cast<int> (i)));
+ float distance = euclideanDistance (cloud[i], test_point);
+ sorted_brute_force_result.insert (std::make_pair (distance, static_cast<int> (i)));
}
float max_dist = 0.0f;
unsigned int counter = 0;
- for (multimap<float, int>::iterator it = sorted_brute_force_result.begin (); it != sorted_brute_force_result.end ()
+ for (std::multimap<float, int>::iterator it = sorted_brute_force_result.begin (); it != sorted_brute_force_result.end ()
&& counter < no_of_neighbors; ++it)
{
- max_dist = max (max_dist, it->first);
+ max_dist = std::max (max_dist, it->first);
++counter;
}
// Check if all found neighbors have distance smaller than max_dist
for (const int &k_index : k_indices)
{
- const PointXYZ& point = cloud.points[k_index];
+ const PointXYZ& point = cloud[k_index];
bool ok = euclideanDistance (test_point, point) <= max_dist;
if (!ok)
ok = (std::abs (euclideanDistance (test_point, point)) - max_dist) <= 1e-6;
//if (!ok) std::cerr << k_indices[i] << " is not correct...\n";
//else std::cerr << k_indices[i] << " is correct...\n";
- EXPECT_EQ (ok, true);
+ EXPECT_TRUE (ok);
}
ScopeTime scopeTime ("FLANN nearestKSearch");
std::vector<float> k_distances_t;
k_distances_t.resize (no_of_neighbors);
- for (std::size_t i = 0; i < cloud_rgb.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_rgb.size (); ++i)
{
- kdtree.nearestKSearchT<pcl::PointXYZRGB> (cloud_rgb.points[i], no_of_neighbors, k_indices_t, k_distances_t);
- kdtree.nearestKSearch (cloud_big.points[i], no_of_neighbors, k_indices, k_distances);
+ kdtree.nearestKSearchT<pcl::PointXYZRGB> (cloud_rgb[i], no_of_neighbors, k_indices_t, k_distances_t);
+ kdtree.nearestKSearch (cloud_big[i], no_of_neighbors, k_indices, k_distances);
EXPECT_EQ (k_indices.size (), indices[i].size ());
EXPECT_EQ (k_distances.size (), dists[i].size ());
for (std::size_t j=0; j< no_of_neighbors; j++)
std::vector<float> k_distances;
k_distances.resize (no_of_neighbors);
- for (std::size_t i = 0; i < cloud_big.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud_big.size (); ++i)
{
- kdtree.nearestKSearch (cloud_big.points[i], no_of_neighbors, k_indices, k_distances);
+ kdtree.nearestKSearch (cloud_big[i], no_of_neighbors, k_indices, k_distances);
EXPECT_EQ (k_indices.size (), indices[i].size ());
EXPECT_EQ (k_distances.size (), dists[i].size ());
for (std::size_t j=0; j< no_of_neighbors; j++)
*/
#include <pcl/test/gtest.h>
#include <vector>
-#include <cstdio>
#include <pcl/common/time.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/search/pcl_search.h>
-using namespace std;
using namespace pcl;
using namespace octree;
cloudIn->points.resize (cloudIn->width * cloudIn->height);
for (std::size_t i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (static_cast<float> (5.0 * (rand () / static_cast<double> (RAND_MAX))),
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (5.0 * (rand () / static_cast<double> (RAND_MAX))),
static_cast<float> (10.0 * (rand () / static_cast<double> (RAND_MAX))),
static_cast<float> (10.0 * (rand () / static_cast<double> (RAND_MAX))));
}
k_sqr_distances_bruteforce.clear ();
// push all points and their distance to the search point into a priority queue - bruteforce approach.
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
+ ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
+ ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
- prioPointQueueEntry pointEntry (cloudIn->points[i], pointDist, static_cast<int> (i));
+ prioPointQueueEntry pointEntry ((*cloudIn)[i], pointDist, static_cast<int> (i));
pointCandidates.push (pointEntry);
}
cloudIn->height = 1;
cloudIn->points.resize (cloudIn->width * cloudIn->height);
for (std::size_t i = 0; i < 1000; i++)
- cloudIn->points[i] = PointXYZ (5.0 * ((double)rand () / (double)RAND_MAX),
+ (*cloudIn)[i] = PointXYZ (5.0 * ((double)rand () / (double)RAND_MAX),
10.0 * ((double)rand () / (double)RAND_MAX),
10.0 * ((double)rand () / (double)RAND_MAX));
// brute force search
double BFdistance = std::numeric_limits<double>::max ();
int BFindex = 0;
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
- pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x)
- + (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) + (cloudIn->points[i].z
- - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x)
+ + ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) + ((*cloudIn)[i].z
+ - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
if (pointDist < BFdistance)
{
for (int i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (10.0 * ((double)rand () / (double)RAND_MAX),
+ (*cloudIn)[i] = PointXYZ (10.0 * ((double)rand () / (double)RAND_MAX),
10.0 * ((double)rand () / (double)RAND_MAX),
5.0 * ((double)rand () / (double)RAND_MAX));
}
// generate point cloud data
for (std::size_t i = 0; i < 1000; i++)
{
- cloudIn->points[i] = PointXYZ (static_cast<float> (10.0 * (rand () / static_cast<double> (RAND_MAX))),
+ (*cloudIn)[i] = PointXYZ (static_cast<float> (10.0 * (rand () / static_cast<double> (RAND_MAX))),
static_cast<float> (10.0 * (rand () / static_cast<double> (RAND_MAX))),
static_cast<float> (5.0 * (rand () / static_cast<double> (RAND_MAX))));
}
// bruteforce radius search
std::vector<int> cloudSearchBruteforce;
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
pointDist = sqrt (
- (cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x)
- + (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y)
- + (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ ((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x)
+ + ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y)
+ + ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
if (pointDist <= searchRadius)
{
while (current != cloudNWRSearch.end())
{
pointDist = sqrt (
- (cloudIn->points[*current].x-searchPoint.x) * (cloudIn->points[*current].x-searchPoint.x) +
- (cloudIn->points[*current].y-searchPoint.y) * (cloudIn->points[*current].y-searchPoint.y) +
- (cloudIn->points[*current].z-searchPoint.z) * (cloudIn->points[*current].z-searchPoint.z)
+ ((*cloudIn)[*current].x-searchPoint.x) * ((*cloudIn)[*current].x-searchPoint.x) +
+ ((*cloudIn)[*current].y-searchPoint.y) * ((*cloudIn)[*current].y-searchPoint.y) +
+ ((*cloudIn)[*current].z-searchPoint.z) * ((*cloudIn)[*current].z-searchPoint.z)
);
ASSERT_EQ ( (pointDist<=searchRadius) , true);
#include <vector>
-#include <cstdio>
-
-using namespace std;
#include <pcl/common/time.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
using namespace pcl;
-#include <pcl/search/pcl_search.h>
-
-
// helper class for priority queue
class prioPointQueueEntry
{
}
unsigned int searchIdx = rand()%(cloudIn->width * cloudIn->height);
- const PointXYZ& searchPoint = cloudIn->points[searchIdx];
+ const PointXYZ& searchPoint = (*cloudIn)[searchIdx];
k_indices.clear();
k_sqr_distances.clear();
// push all points and their distance to the search point into a priority queue - bruteforce approach.
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ double pointDist = (((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x) +
+ ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y) +
+ ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
- prioPointQueueEntry pointEntry (cloudIn->points[i], pointDist, int (i));
+ prioPointQueueEntry pointEntry ((*cloudIn)[i], pointDist, int (i));
pointCandidates.push (pointEntry);
}
double y = ypos*oneOverFocalLength*z;
double x = xpos*oneOverFocalLength*z;
- cloudIn->points[idx++]= PointXYZ (float (x), float (y), float (z));
+ (*cloudIn)[idx++]= PointXYZ (float (x), float (y), float (z));
}
unsigned int randomIdx = rand()%(cloudIn->width * cloudIn->height);
- const PointXYZ& searchPoint = cloudIn->points[randomIdx];
+ const PointXYZ& searchPoint = (*cloudIn)[randomIdx];
double pointDist;
double searchRadius = 1.0 * (double (rand ()) / double (RAND_MAX));
std::vector<int> cloudSearchBruteforce;
cloudSearchBruteforce.clear();
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (std::size_t i = 0; i < cloudIn->size (); i++)
{
pointDist = sqrt (
- (cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x)
- + (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y)
- + (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ ((*cloudIn)[i].x - searchPoint.x) * ((*cloudIn)[i].x - searchPoint.x)
+ + ((*cloudIn)[i].y - searchPoint.y) * ((*cloudIn)[i].y - searchPoint.y)
+ + ((*cloudIn)[i].z - searchPoint.z) * ((*cloudIn)[i].z - searchPoint.z));
if (pointDist <= searchRadius)
{
while (current != cloudNWRSearch.end())
{
pointDist = sqrt (
- (cloudIn->points[*current].x-searchPoint.x) * (cloudIn->points[*current].x-searchPoint.x) +
- (cloudIn->points[*current].y-searchPoint.y) * (cloudIn->points[*current].y-searchPoint.y) +
- (cloudIn->points[*current].z-searchPoint.z) * (cloudIn->points[*current].z-searchPoint.z)
+ ((*cloudIn)[*current].x-searchPoint.x) * ((*cloudIn)[*current].x-searchPoint.x) +
+ ((*cloudIn)[*current].y-searchPoint.y) * ((*cloudIn)[*current].y-searchPoint.y) +
+ ((*cloudIn)[*current].z-searchPoint.z) * ((*cloudIn)[*current].z-searchPoint.z)
);
ASSERT_EQ ( (pointDist<=searchRadius) , true);
{
pointDist = sqrt (
- (cloudIn->points[*current].x-searchPoint.x) * (cloudIn->points[*current].x-searchPoint.x) +
- (cloudIn->points[*current].y-searchPoint.y) * (cloudIn->points[*current].y-searchPoint.y) +
- (cloudIn->points[*current].z-searchPoint.z) * (cloudIn->points[*current].z-searchPoint.z)
+ ((*cloudIn)[*current].x-searchPoint.x) * ((*cloudIn)[*current].x-searchPoint.x) +
+ ((*cloudIn)[*current].y-searchPoint.y) * ((*cloudIn)[*current].y-searchPoint.y) +
+ ((*cloudIn)[*current].z-searchPoint.z) * ((*cloudIn)[*current].z-searchPoint.z)
);
ASSERT_EQ ( (pointDist<=searchRadius) , true);
++current;
}
- for (std::size_t i = 0; i < cloudSearchBruteforce.size (); i++)
- for (std::size_t j = 0; j < cloudNWRSearch.size (); j++)
- if (cloudNWRSearch[i]== cloudSearchBruteforce[j])
- break;
-
ASSERT_EQ (cloudNWRRadius.size() , cloudSearchBruteforce.size ());
// check if result limitation works
*/
#include <pcl/test/gtest.h>
#include <vector>
-#include <stdio.h>
+#include <cstdio>
#include <pcl/common/time.h>
+#include <pcl/common/geometry.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
-#include <pcl/search/pcl_search.h>
+#include <pcl/search/organized.h> // for OrganizedNeighbor
-using namespace std;
using namespace pcl;
+std::string pcd_filename;
+
// helper class for priority queue
class prioPointQueueEntry
{
public:
prioPointQueueEntry ()
- {
- }
+ = default;
prioPointQueueEntry (PointXYZ& point_arg, double pointDistance_arg, int pointIdx_arg)
{
point_ = point_arg;
srand (time (NULL));
// create organized search
- pcl::search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
+ search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
std::vector<int> k_indices;
std::vector<float> k_sqr_distances;
double y = (double)ypos*oneOverFocalLength*(double)z;
double x = (double)xpos*oneOverFocalLength*(double)z;
- cloudIn->points.push_back(PointXYZ (x, y, z));
+ cloudIn->points.emplace_back(x, y, z);
}
unsigned int searchIdx = rand()%(cloudIn->width * cloudIn->height);
- const PointXYZ& searchPoint = cloudIn->points[searchIdx];
+ const PointXYZ& searchPoint = (*cloudIn)[searchIdx];
k_indices.clear();
k_sqr_distances.clear();
std::priority_queue<prioPointQueueEntry> pointCandidates;
-
- // push all points and their distance to the search point into a priority queue - bruteforce approach.
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (auto it = cloudIn->begin(); it != cloudIn->end(); ++it)
{
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
-
- prioPointQueueEntry pointEntry (cloudIn->points[i], pointDist, i);
-
+ const auto pointDist = geometry::distance(*it, searchPoint);
+ prioPointQueueEntry pointEntry (*it, pointDist, std::distance(cloudIn->begin(), it));
pointCandidates.push (pointEntry);
}
pointCandidates.pop ();
// copy results into vectors
- while (pointCandidates.size ())
+ while (!pointCandidates.empty())
{
k_indices_bruteforce.push_back (pointCandidates.top ().pointIdx_);
k_sqr_distances_bruteforce.push_back (pointCandidates.top ().pointDistance_);
srand (time (NULL));
// create organized search
- pcl::search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
+ search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
std::vector<int> k_indices;
std::vector<float> k_sqr_distances;
unsigned int searchIdx;
- if (pcd.read ("office1.pcd", *cloudIn) == -1)
- {
- std::cout <<"Couldn't read input cloud" << std::endl;
- return;
- }
+ ASSERT_EQ(pcd.read(pcd_filename, *cloudIn), 0) <<"Couldn't read input cloud" << std::endl;
- while(1)
+
+ while(true)
{
searchIdx = rand()%(cloudIn->width * cloudIn->height);
- if(cloudIn->points[searchIdx].z < 100)
+ if((*cloudIn)[searchIdx].z < 100)
break;
}
const unsigned int K = (rand () % 10)+1;
// K = 16;
// generate point cloud
- const PointXYZ& searchPoint = cloudIn->points[searchIdx];
+ const PointXYZ& searchPoint = (*cloudIn)[searchIdx];
k_indices.clear();
k_sqr_distances.clear();
// organized nearest neighbor search
organizedNeighborSearch.setInputCloud (cloudIn);
- organizedNeighborSearch.nearestKSearch (searchIdx, (int)K, k_indices, k_sqr_distances);
-
+ organizedNeighborSearch.nearestKSearch (searchPoint, (int)K, k_indices, k_sqr_distances);
+
k_indices_bruteforce.clear();
k_sqr_distances_bruteforce.clear();
// push all points and their distance to the search point into a priority queue - bruteforce approach.
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (auto it = cloudIn->begin(); it != cloudIn->end(); ++it)
{
- double pointDist = ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
-
- prioPointQueueEntry pointEntry (cloudIn->points[i], pointDist, i);
-
+ const auto pointDist = geometry::distance(*it, searchPoint);
+ prioPointQueueEntry pointEntry (*it, pointDist, std::distance(cloudIn->begin(), it));
pointCandidates.push (pointEntry);
}
pointCandidates.pop ();
// copy results into vectors
- while (pointCandidates.size ())
+ while (!pointCandidates.empty())
{
k_indices_bruteforce.push_back (pointCandidates.top ().pointIdx_);
k_sqr_distances_bruteforce.push_back (pointCandidates.top ().pointDistance_);
constexpr unsigned int test_runs = 10;
srand (time (NULL));
-
- pcl::search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
+ search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
std::vector<int> k_indices;
std::vector<float> k_sqr_distances;
double z = 5.0 * ( ((double)rand () / (double)RAND_MAX))+5;
double y = ypos*oneOverFocalLength*z;
double x = xpos*oneOverFocalLength*z;
-
- cloudIn->points[idx++]= PointXYZ (x, y, z);
+ (*cloudIn)[idx++]= PointXYZ (x, y, z);
}
unsigned int randomIdx = rand()%(cloudIn->width * cloudIn->height);
- const PointXYZ& searchPoint = cloudIn->points[randomIdx];
+ const PointXYZ& searchPoint = (*cloudIn)[randomIdx];
const double searchRadius = 1.0 * ((double)rand () / (double)RAND_MAX);
-// double searchRadius = 1/10;
+ // double searchRadius = 1/10;
// bruteforce radius search
std::vector<int> cloudSearchBruteforce;
cloudSearchBruteforce.clear();
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (auto it = cloudIn->points.cbegin(); it != cloudIn->points.cend(); ++it)
{
- double pointDist = sqrt ((cloudIn->points[*current].x - searchPoint.x) * (cloudIn->points[*current].x - searchPoint.x) +
- (cloudIn->points[*current].y - searchPoint.y) * (cloudIn->points[*current].y - searchPoint.y) +
- (cloudIn->points[*current].z - searchPoint.z) * (cloudIn->points[*current].z - searchPoint.z));
+ const auto pointDist = geometry::distance(*it, searchPoint);
if (pointDist <= searchRadius)
{
// add point candidates to vector list
- cloudSearchBruteforce.push_back ((int)i);
+ cloudSearchBruteforce.push_back (std::distance(cloudIn->points.cbegin(), it));
}
}
organizedNeighborSearch.setInputCloud (cloudIn);
- organizedNeighborSearch.radiusSearch (randomIdx, searchRadius, cloudNWRSearch, cloudNWRRadius, INT_MAX); //,INT_MAX);
-
-
- // check if result from organized radius search can be also found in bruteforce search
- std::vector<int>::const_iterator current = cloudNWRSearch.begin();
- while (current != cloudNWRSearch.end())
- {
- double pointDist = sqrt ((cloudIn->points[*current].x - searchPoint.x) * (cloudIn->points[*current].x - searchPoint.x) +
- (cloudIn->points[*current].y - searchPoint.y) * (cloudIn->points[*current].y - searchPoint.y) +
- (cloudIn->points[*current].z - searchPoint.z) * (cloudIn->points[*current].z - searchPoint.z));
+ organizedNeighborSearch.radiusSearch ((*cloudIn)[randomIdx], searchRadius, cloudNWRSearch, cloudNWRRadius, std::numeric_limits<unsigned int>::max());
- ASSERT_EQ ( (pointDist<=searchRadius) , true);
- ++current;
+ // check if result from organized radius search can be also found in bruteforce search
+ for (const auto it : cloudNWRSearch)
+ {
+ auto const pointDist = geometry::distance((*cloudIn)[it], searchPoint);
+ ASSERT_EQ ( (pointDist <= searchRadius) , true);
}
// check if bruteforce result from organized radius search can be also found in bruteforce search
- current = cloudSearchBruteforce.begin();
- while (current != cloudSearchBruteforce.end())
+ for (const auto it : cloudSearchBruteforce)
{
-
- double pointDist = sqrt ((cloudIn->points[*current].x - searchPoint.x) * (cloudIn->points[*current].x - searchPoint.x) +
- (cloudIn->points[*current].y - searchPoint.y) * (cloudIn->points[*current].y - searchPoint.y) +
- (cloudIn->points[*current].z - searchPoint.z) * (cloudIn->points[*current].z - searchPoint.z));
-
- ASSERT_EQ ( (pointDist<=searchRadius) , true);
-
- ++current;
+ const auto pointDist = geometry::distance((*cloudIn)[it], searchPoint);
+ ASSERT_EQ ( (pointDist <= searchRadius) , true);
}
- for (std::size_t i = 0; i < cloudSearchBruteforce.size (); i++)
- for (std::size_t j = 0; j < cloudNWRSearch.size (); j++)
- if (cloudNWRSearch[i]== cloudSearchBruteforce[j])
- break;
-
ASSERT_EQ (cloudNWRRadius.size() , cloudSearchBruteforce.size ());
// check if result limitation works
constexpr unsigned int test_runs = 10;
srand (time (NULL));
- pcl::search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch();
+ search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
std::vector<int> k_indices;
std::vector<float> k_sqr_distances;
// typical focal length from kinect
constexpr double oneOverFocalLength = 0.0018;
-
+
double radiusSearchTime = 0, radiusSearchLPTime = 0;
for (unsigned int test_id = 0; test_id < test_runs; test_id++)
double y = ypos*oneOverFocalLength*z;
double x = xpos*oneOverFocalLength*z;
- cloudIn->points[idx++]= PointXYZ (x, y, z);
+ (*cloudIn)[idx++]= PointXYZ (x, y, z);
}
const unsigned int randomIdx = rand() % (cloudIn->width * cloudIn->height);
- const PointXYZ& searchPoint = cloudIn->points[randomIdx];
+ const PointXYZ& searchPoint = (*cloudIn)[randomIdx];
const double searchRadius = 1.0 * ((double)rand () / (double)RAND_MAX);
std::vector<int> cloudSearchBruteforce;
cloudSearchBruteforce.clear();
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (auto it = cloudIn->points.cbegin(); it != cloudIn->points.cend(); ++it)
{
- double pointDist = sqrt ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ const auto pointDist = geometry::distance(*it, searchPoint);
if (pointDist <= searchRadius)
{
// add point candidates to vector list
- cloudSearchBruteforce.push_back ((int)i);
+ cloudSearchBruteforce.push_back (std::distance(cloudIn->points.cbegin(), it));
}
}
-
std::vector<int> cloudNWRSearch;
std::vector<float> cloudNWRRadius;
-
+
double check_time = getTime();
organizedNeighborSearch.setInputCloud (cloudIn);
- organizedNeighborSearch.radiusSearch (randomIdx, searchRadius, cloudNWRSearch, cloudNWRRadius, INT_MAX); //,INT_MAX);
-
+ organizedNeighborSearch.radiusSearch ((*cloudIn)[randomIdx], searchRadius, cloudNWRSearch, cloudNWRRadius, std::numeric_limits<unsigned int>::max());
+
double check_time2 = getTime();
-
+
radiusSearchLPTime += check_time2 - check_time;
organizedNeighborSearch.setInputCloud (cloudIn);
- organizedNeighborSearch.radiusSearch (randomIdx, searchRadius, cloudNWRSearch, cloudNWRRadius, INT_MAX); //,INT_MAX);
+ organizedNeighborSearch.radiusSearch ((*cloudIn)[randomIdx], searchRadius, cloudNWRSearch, cloudNWRRadius, std::numeric_limits<unsigned int>::max());
radiusSearchTime += getTime() - check_time2;
}
pcl::PointCloud<PointXYZ>::Ptr cloudIn (new pcl::PointCloud<PointXYZ>);
pcl::PCDReader pcd;
- if (pcd.read ("office1.pcd", *cloudIn) == -1)
+ if (pcd.read (pcd_filename, *cloudIn) == -1)
{
- std::cout <<"Couldn't read input cloud" << std::endl;
+ std::cout <<"Couldn't read input cloud" << std::endl;
return;
}
constexpr unsigned int test_runs = 10;
srand (time (NULL));
- pcl::search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch();
+ search::OrganizedNeighbor<PointXYZ> organizedNeighborSearch;
// typical focal length from kinect
unsigned int randomIdx;
- while (1)
+ while (true)
{
randomIdx = rand()%(cloudIn->width * cloudIn->height);
- if(cloudIn->points[randomIdx].z <100)break;
+ if((*cloudIn)[randomIdx].z <100)break;
}
- std::cout << "**Search Point:** " << cloudIn->points[randomIdx].x << " " << cloudIn->points[randomIdx].y << " " << cloudIn->points[randomIdx].z << std::endl;
+ std::cout << "**Search Point:** " << (*cloudIn)[randomIdx].x << " " << (*cloudIn)[randomIdx].y << " " << (*cloudIn)[randomIdx].z << std::endl;
std::cout << "+--------+-----------------+----------------+-------------------+" << std::endl;
std::cout << "| Search | Organized | Organized | Number of |" << std::endl;
double check_time = getTime();
organizedNeighborSearch.setInputCloud (cloudIn);
- organizedNeighborSearch.radiusSearch (randomIdx, searchRadius, cloudNWRSearch2, cloudNWRRadius2, INT_MAX); //,INT_MAX);
+ organizedNeighborSearch.radiusSearch ((*cloudIn)[randomIdx], searchRadius, cloudNWRSearch2, cloudNWRRadius2, std::numeric_limits<unsigned int>::max());
double check_time2 = getTime();
sum_time+= check_time2 - check_time;
cloudNWRRadius.clear();
double check_time = getTime();
organizedNeighborSearch.setInputCloud (cloudIn);
- organizedNeighborSearch.approxRadiusSearch (cloudIn, randomIdx, searchRadius, cloudNWRSearch, cloudNWRRadius, INT_MAX); //,INT_MAX);
+ organizedNeighborSearch.radiusSearch ((*cloudIn)[randomIdx], searchRadius, cloudNWRSearch, cloudNWRRadius, std::numeric_limits<unsigned int>::max());
double check_time2 = getTime();
sum_time2+= check_time2 - check_time;
// ASSERT_EQ(cloudNWRRadius.size(), cloudNWRRadius2.size());
// ASSERT_EQ(cloudNWRSearch.size(), cloudNWRSearch2.size());
-
- printf("| %.3lf | %0.5lf | %0.5lf | %6lu |\n",searchRadius, sum_time/100, sum_time2/100, cloudNWRSearch.size());
+
+ printf("| %.3lf | %0.5lf | %0.5lf | %6zu |\n",searchRadius, sum_time/100, sum_time2/100, cloudNWRSearch.size());
std::cout << "+--------+-----------------+----------------+-------------------+" << std::endl;
- const PointXYZ& searchPoint = cloudIn->points[randomIdx];
+ const PointXYZ& searchPoint = (*cloudIn)[randomIdx];
// bruteforce radius search
std::vector<int> cloudSearchBruteforce;
cloudSearchBruteforce.clear();
- for (std::size_t i = 0; i < cloudIn->points.size (); i++)
+ for (auto it = cloudIn->points.cbegin(); it != cloudIn->points.cend(); ++it)
{
- double pointDist = sqrt ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
+ const auto pointDist = geometry::distance(*it, searchPoint);
if (pointDist <= searchRadius)
{
// add point candidates to vector list
- cloudSearchBruteforce.push_back ((int)i);
+ cloudSearchBruteforce.push_back (std::distance(cloudIn->points.cbegin(), it));
}
}
// check if result from organized radius search can be also found in bruteforce search
- std::vector<int>::const_iterator current = cloudNWRSearch.begin();
- while (current != cloudNWRSearch.end())
+ for (const auto it : cloudNWRSearch)
{
- double pointDist = sqrt ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
-
- ASSERT_EQ ( (pointDist<=searchRadius) , true);
-
- ++current;
+ double pointDist = geometry::distance((*cloudIn)[it], searchPoint);
+ ASSERT_EQ ( (pointDist <= searchRadius) , true);
}
// check if bruteforce result from organized radius search can be also found in bruteforce search
- current = cloudSearchBruteforce.begin();
- while (current != cloudSearchBruteforce.end())
+ for (const auto it : cloudSearchBruteforce)
{
- double pointDist = sqrt ((cloudIn->points[i].x - searchPoint.x) * (cloudIn->points[i].x - searchPoint.x) +
- (cloudIn->points[i].y - searchPoint.y) * (cloudIn->points[i].y - searchPoint.y) +
- (cloudIn->points[i].z - searchPoint.z) * (cloudIn->points[i].z - searchPoint.z));
-
- ASSERT_EQ ( (pointDist<=searchRadius) , true);
-
- ++current;
+ double pointDist = geometry::distance((*cloudIn)[it], searchPoint);
+ ASSERT_EQ ( (pointDist <= searchRadius) , true);
}
+
ASSERT_EQ (cloudNWRRadius.size() , cloudSearchBruteforce.size ());
// check if result limitation works
int
main (int argc, char** argv)
{
-
testing::InitGoogleTest (&argc, argv);
+
+ if (argc < 2)
+ {
+ std::cout << "need path to office1.pcd file\n";
+ return (-1);
+ }
+
+ pcd_filename = std::string(argv[1]);
+
return (RUN_ALL_TESTS ());
}
/* ]--- */
#include <pcl/search/octree.h>
#include <pcl/io/pcd_io.h>
#include <pcl/common/point_tests.h> // for pcl::isFinite
-#include <pcl/common/time.h>
using namespace pcl;
-using namespace std;
/** \brief if set to value other than 0 -> fine grained output */
#define DEBUG_OUT 1
* @param name name of the search method that returned these distances
* @return true if indices are unique, false otherwise
*/
-bool testUniqueness (const std::vector<int>& indices, const string& name)
+bool testUniqueness (const std::vector<int>& indices, const std::string& name)
{
bool uniqueness = true;
for (unsigned idx1 = 1; idx1 < indices.size () && uniqueness; ++idx1)
* \param name name of the search method that returned these distances
* \return true if distances in weak ascending order, false otherwise
*/
-bool testOrder (const std::vector<float>& distances, const string& name)
+bool testOrder (const std::vector<float>& distances, const std::string& name)
{
bool ordered = true;
for (std::size_t idx1 = 1; idx1 < distances.size (); ++idx1)
* @return true if result is valid, false otherwise
*/
template<typename PointT> bool
-testResultValidity (const typename PointCloud<PointT>::ConstPtr point_cloud, const std::vector<bool>& indices_mask, const std::vector<bool>& nan_mask, const std::vector<int>& indices, const std::vector<int>& /*input_indices*/, const string& name)
+testResultValidity (const typename PointCloud<PointT>::ConstPtr point_cloud, const std::vector<bool>& indices_mask, const std::vector<bool>& nan_mask, const std::vector<int>& indices, const std::vector<int>& /*input_indices*/, const std::string& name)
{
bool validness = true;
for (const int &index : indices)
* \param eps threshold for comparing the distances
* \return true if both sets are the same, false otherwise
*/
-bool compareResults (const std::vector<int>& indices1, const::vector<float>& distances1, const std::string& name1,
- const std::vector<int>& indices2, const::vector<float>& distances2, const std::string& name2, float eps)
+bool compareResults (const std::vector<int>& indices1, const std::vector<float>& distances1, const std::string& name1,
+ const std::vector<int>& indices2, const std::vector<float>& distances2, const std::string& name2, float eps)
{
bool equal = true;
if (indices1.size () != indices2.size ())
default(none)
for (int sIdx = 0; sIdx < int (search_methods.size ()); ++sIdx)
{
- search_methods [sIdx]->nearestKSearch (point_cloud->points[query_index], knn, indices [sIdx], distances [sIdx]);
+ search_methods [sIdx]->nearestKSearch ((*point_cloud)[query_index], knn, indices [sIdx], distances [sIdx]);
passed [sIdx] = passed [sIdx] && testUniqueness (indices [sIdx], search_methods [sIdx]->getName ());
passed [sIdx] = passed [sIdx] && testOrder (distances [sIdx], search_methods [sIdx]->getName ());
passed [sIdx] = passed [sIdx] && testResultValidity<PointT>(point_cloud, indices_mask, nan_mask, indices [sIdx], input_indices, search_methods [sIdx]->getName ());
shared(distances, indices, indices_mask, input_indices, nan_mask, passed, point_cloud, radius, query_index, search_methods)
for (int sIdx = 0; sIdx < static_cast<int> (search_methods.size ()); ++sIdx)
{
- search_methods [sIdx]->radiusSearch (point_cloud->points[query_index], radius, indices [sIdx], distances [sIdx], 0);
+ search_methods [sIdx]->radiusSearch ((*point_cloud)[query_index], radius, indices [sIdx], distances [sIdx], 0);
passed [sIdx] = passed [sIdx] && testUniqueness (indices [sIdx], search_methods [sIdx]->getName ());
passed [sIdx] = passed [sIdx] && testOrder (distances [sIdx], search_methods [sIdx]->getName ());
passed [sIdx] = passed [sIdx] && testResultValidity<PointT>(point_cloud, indices_mask, nan_mask, indices [sIdx], input_indices, search_methods [sIdx]->getName ());
#include <fstream>
#include <sstream>
-#include <boost/format.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/info_parser.hpp>
std::vector <pcl::PointIndices> clusters;
rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_NE (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_NE (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
rg.setInputCloud (another_cloud_);
rg.setInputNormals (normals_);
- int first_cloud_size = static_cast<int> (cloud_->points.size ());
- int second_cloud_size = static_cast<int> (another_cloud_->points.size ());
+ const auto first_cloud_size = cloud_->size ();
+ const auto second_cloud_size = another_cloud_->size ();
ASSERT_NE (first_cloud_size, second_cloud_size);
std::vector <pcl::PointIndices> clusters;
rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
rg.setInputCloud (cloud_);
rg.setInputNormals (another_normals_);
rg.extract (clusters);
- num_of_segments = static_cast<int> (clusters.size ());
+ num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
rg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
rg.setNumberOfNeighbours (30);
rg.setResidualThreshold (-10.0);
rg.extract (clusters);
- num_of_segments = static_cast<int> (clusters.size ());
+ num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
rg.setCurvatureTestFlag (true);
rg.setCurvatureThreshold (-10.0f);
rg.extract (clusters);
- num_of_segments = static_cast<int> (clusters.size ());
+ num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (2, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (0, num_of_segments);
}
std::vector <pcl::PointIndices> clusters;
mcSeg.extract (clusters);
- int num_of_segments = static_cast<int> (clusters.size ());
+ const auto num_of_segments = clusters.size ();
EXPECT_EQ (2, num_of_segments);
}
PointCloud<PointXYZ> output;
sd.segment (output);
- EXPECT_EQ (static_cast<int> (output.points.size ()), 0);
+ EXPECT_EQ (output.size (), 0);
// Set a different target
sd.setTargetCloud (cloud_t_);
sd.segment (output);
- EXPECT_EQ (static_cast<int> (output.points.size ()), 126);
+ EXPECT_EQ (output.size (), 126);
//savePCDFile ("./test/0-t.pcd", output);
// Reverse
sd.setInputCloud (cloud_t_);
sd.setTargetCloud (cloud_);
sd.segment (output);
- EXPECT_EQ (static_cast<int> (output.points.size ()), 127);
+ EXPECT_EQ (output.size (), 127);
//savePCDFile ("./test/t-0.pcd", output);
}
PointCloud<PointXYZ>::Ptr hull (new PointCloud<PointXYZ>);
hull->points.resize (5);
- for (std::size_t i = 0; i < hull->points.size (); ++i)
+ for (std::size_t i = 0; i < hull->size (); ++i)
{
- hull->points[i].x = hull->points[i].y = static_cast<float> (i);
- hull->points[i].z = 0.0f;
+ (*hull)[i].x = (*hull)[i].y = static_cast<float> (i);
+ (*hull)[i].z = 0.0f;
}
ExtractPolygonalPrismData<PointXYZ> ex;
PointIndices output;
ex.segment (output);
- EXPECT_EQ (static_cast<int> (output.indices.size ()), 0);
+ EXPECT_EQ (output.indices.size (), 0);
}
/* ---[ */
// Tranpose the cloud
cloud_t = cloud;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
- cloud_t.points[i].x += 0.01f;
+ for (auto& point: cloud_t)
+ point.x += 0.01f;
cloud_ = cloud.makeShared ();
cloud_t_ = cloud_t.makeShared ();
FILES test_ear_clipping.cpp
LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features pcl_search
ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+PCL_ADD_TEST(surface_poisson test_poisson
+ FILES test_poisson.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_kdtree pcl_surface pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
if(QHULL_FOUND)
PCL_ADD_TEST(surface_convex_hull test_convex_hull
#include <pcl/io/vtk_io.h>
#include <pcl/features/normal_3d.h>
#include <pcl/surface/concave_hull.h>
-#include <pcl/common/common.h>
-#include <pcl/sample_consensus/sac_model_plane.h>
+#include <pcl/sample_consensus/model_types.h> // for SACMODEL_PLANE
#include <pcl/filters/project_inliers.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
concave_hull.setVoronoiCenters (voronoi_centers);
concave_hull.reconstruct (alpha_shape, polygons_alpha);
- EXPECT_EQ (alpha_shape.points.size (), 21);
+ EXPECT_EQ (alpha_shape.size (), 21);
pcl::PointCloud<pcl::PointXYZ> alpha_shape1;
pcl::PointCloud<pcl::PointXYZ>::Ptr voronoi_centers1 (new pcl::PointCloud<pcl::PointXYZ>);
concave_hull1.setVoronoiCenters (voronoi_centers1);
concave_hull1.reconstruct (alpha_shape1, polygons_alpha1);
- EXPECT_EQ (alpha_shape1.points.size (), 20);
+ EXPECT_EQ (alpha_shape1.size (), 20);
pcl::PointCloud<pcl::PointXYZ> alpha_shape2;
pcl::PointCloud<pcl::PointXYZ>::Ptr voronoi_centers2 (new pcl::PointCloud<pcl::PointXYZ>);
concave_hull2.setVoronoiCenters (voronoi_centers2);
concave_hull2.reconstruct (alpha_shape2, polygons_alpha2);
- EXPECT_EQ (alpha_shape2.points.size (), 81);
+ EXPECT_EQ (alpha_shape2.size (), 81);
//PolygonMesh concave;
//toPCLPointCloud2 (alpha_shape2, concave.cloud);
cloud_4->push_back (p);
cloud_4->height = 1;
- cloud_4->width = std::uint32_t (cloud_4->size ());
+ cloud_4->width = cloud_4->size ();
ConcaveHull<PointXYZ> concave_hull;
concave_hull.setInputCloud (cloud_4);
{
for (std::size_t j = 0; j <= 2; j++)
{
- cloud_out_ltable.points[npoints].x = float (i) * 0.5f;
- cloud_out_ltable.points[npoints].y = -float (j) * 0.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = float (i) * 0.5f;
+ cloud_out_ltable[npoints].y = -float (j) * 0.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
}
}
{
for(std::size_t j = 3; j < 8; j++)
{
- cloud_out_ltable.points[npoints].x = float (i) * 0.5f;
- cloud_out_ltable.points[npoints].y = -float (j) * 0.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = float (i) * 0.5f;
+ cloud_out_ltable[npoints].y = -float (j) * 0.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
}
}
concave_hull.setVoronoiCenters (voronoi_centers);
concave_hull.reconstruct (alpha_shape, polygons_alpha);
- EXPECT_EQ (alpha_shape.points.size (), 27);
+ EXPECT_EQ (alpha_shape.size (), 27);
pcl::PointCloud<pcl::PointXYZ> alpha_shape1;
pcl::PointCloud<pcl::PointXYZ>::Ptr voronoi_centers1 (new pcl::PointCloud<pcl::PointXYZ>);
concave_hull1.setVoronoiCenters (voronoi_centers1);
concave_hull1.reconstruct (alpha_shape1, polygons_alpha1);
- EXPECT_EQ (alpha_shape1.points.size (), 23);
+ EXPECT_EQ (alpha_shape1.size (), 23);
pcl::PointCloud<pcl::PointXYZ> alpha_shape2;
pcl::PointCloud<pcl::PointXYZ>::Ptr voronoi_centers2 (new pcl::PointCloud<pcl::PointXYZ>);
concave_hull2.setVoronoiCenters (voronoi_centers2);
concave_hull2.reconstruct (alpha_shape2, polygons_alpha2);
- EXPECT_EQ (alpha_shape2.points.size (), 19);
+ EXPECT_EQ (alpha_shape2.size (), 19);
}
/* ---[ */
#include <pcl/features/normal_3d.h>
#include <pcl/surface/convex_hull.h>
#include <pcl/common/common.h>
-#include <pcl/sample_consensus/sac_model_plane.h>
+#include <pcl/sample_consensus/model_types.h> // for SACMODEL_PLANE
#include <pcl/filters/project_inliers.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
pcl::fromPCLPointCloud2 (mesh.cloud, hull2);
// compare the PointCloud (hull2) to the output from the original test --- they should be identical
- ASSERT_EQ (hull.points.size (), hull2.points.size ());
- for (std::size_t i = 0; i < hull.points.size (); ++i)
+ ASSERT_EQ (hull.size (), hull2.size ());
+ for (std::size_t i = 0; i < hull.size (); ++i)
{
- const PointXYZ & p1 = hull.points[i];
- const PointXYZ & p2 = hull2.points[i];
+ const PointXYZ & p1 = hull[i];
+ const PointXYZ & p2 = hull2[i];
ASSERT_EQ (p1.x, p2.x);
ASSERT_EQ (p1.y, p2.y);
ASSERT_EQ (p1.z, p2.z);
{
for (std::size_t j = 0; j <= 2; j++)
{
- cloud_out_ltable.points[npoints].x = float (i) * 0.5f;
- cloud_out_ltable.points[npoints].y = -float (j) * 0.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = float (i) * 0.5f;
+ cloud_out_ltable[npoints].y = -float (j) * 0.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
}
}
{
for (std::size_t j = 3; j < 8; j++)
{
- cloud_out_ltable.points[npoints].x = float (i) * 0.5f;
- cloud_out_ltable.points[npoints].y = -float (j) * 0.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = float (i) * 0.5f;
+ cloud_out_ltable[npoints].y = -float (j) * 0.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
}
}
// add the five points on the hull
- cloud_out_ltable.points[npoints].x = -0.5f;
- cloud_out_ltable.points[npoints].y = 0.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = -0.5f;
+ cloud_out_ltable[npoints].y = 0.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
- cloud_out_ltable.points[npoints].x = 4.5f;
- cloud_out_ltable.points[npoints].y = 0.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = 4.5f;
+ cloud_out_ltable[npoints].y = 0.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
- cloud_out_ltable.points[npoints].x = 4.5f;
- cloud_out_ltable.points[npoints].y = -1.0f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = 4.5f;
+ cloud_out_ltable[npoints].y = -1.0f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
- cloud_out_ltable.points[npoints].x = 1.0f;
- cloud_out_ltable.points[npoints].y = -4.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = 1.0f;
+ cloud_out_ltable[npoints].y = -4.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
- cloud_out_ltable.points[npoints].x = -0.5f;
- cloud_out_ltable.points[npoints].y = -4.5f;
- cloud_out_ltable.points[npoints].z = 0.f;
+ cloud_out_ltable[npoints].x = -0.5f;
+ cloud_out_ltable[npoints].y = -4.5f;
+ cloud_out_ltable[npoints].z = 0.f;
npoints++;
cloud_out_ltable.points.resize (npoints);
chull.reconstruct (hull, polygons);
EXPECT_EQ (polygons.size (), 1);
- EXPECT_EQ (hull.points.size (), 5);
+ EXPECT_EQ (hull.size (), 5);
//
pcl::fromPCLPointCloud2 (mesh.cloud, hull2);
// compare the PointCloud (hull2) to the output from the original test --- they should be identical
- ASSERT_EQ (hull.points.size (), hull2.points.size ());
- for (std::size_t i = 0; i < hull.points.size (); ++i)
+ ASSERT_EQ (hull.size (), hull2.size ());
+ for (std::size_t i = 0; i < hull.size (); ++i)
{
- const PointXYZ & p1 = hull.points[i];
- const PointXYZ & p2 = hull2.points[i];
+ const PointXYZ & p1 = hull[i];
+ const PointXYZ & p2 = hull2[i];
ASSERT_EQ (p1.x, p2.x);
ASSERT_EQ (p1.y, p2.y);
ASSERT_EQ (p1.z, p2.z);
cloud_4->push_back (p);
cloud_4->height = 1;
- cloud_4->width = std::uint32_t (cloud_4->size ());
+ cloud_4->width = cloud_4->size ();
ConvexHull<PointXYZ> convex_hull;
convex_hull.setComputeAreaVolume (true);
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
cloud->points.emplace_back( 4.f, 7.f, 0.5f);
cloud->points.emplace_back( 2.f, 5.f, 0.5f);
cloud->points.emplace_back(-1.f, 8.f, 0.5f);
- cloud->width = static_cast<std::uint32_t> (cloud->points.size ());
+ cloud->width = cloud->size ();
Vertices vertices;
- vertices.vertices.resize (cloud->points.size ());
+ vertices.vertices.resize (cloud->size ());
for (int i = 0; i < static_cast<int> (vertices.vertices.size ()); ++i)
vertices.vertices[i] = i;
cloud->points.emplace_back( 1.f, 0.f, 1.f);
cloud->points.emplace_back( 1.f, 1.f, 1.f);
cloud->points.emplace_back( 0.f, 1.f, 1.f);
- cloud->width = static_cast<std::uint32_t> (cloud->points.size ());
+ cloud->width = cloud->size ();
Vertices vertices;
vertices.vertices.resize(4);
#include <pcl/surface/texture_mapping.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
#include <pcl/io/vtk_io.h>
#include <pcl/features/normal_3d.h>
#include <pcl/surface/grid_projection.h>
-#include <pcl/common/common.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
std::vector<Vertices> vertices;
hoppe.reconstruct (points, vertices);
- EXPECT_NEAR (points.points[points.size()/2].x, -0.037143, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].y, 0.098213, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].z, -0.044911, 1e-3);
+ EXPECT_NEAR (points[points.size()/2].x, -0.037143, 1e-3);
+ EXPECT_NEAR (points[points.size()/2].y, 0.098213, 1e-3);
+ EXPECT_NEAR (points[points.size()/2].z, -0.044911, 1e-3);
EXPECT_EQ (vertices[vertices.size ()/2].vertices[0], 11202);
EXPECT_EQ (vertices[vertices.size ()/2].vertices[1], 11203);
EXPECT_EQ (vertices[vertices.size ()/2].vertices[2], 11204);
rbf.setOffSurfaceDisplacement (0.02f);
rbf.reconstruct (points, vertices);
- EXPECT_NEAR (points.points[points.size()/2].x, -0.025630, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].y, 0.135228, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].z, 0.035766, 1e-3);
+ EXPECT_NEAR (points[points.size()/2].x, -0.025630, 1e-3);
+ EXPECT_NEAR (points[points.size()/2].y, 0.135228, 1e-3);
+ EXPECT_NEAR (points[points.size()/2].z, 0.035766, 1e-3);
EXPECT_EQ (vertices[vertices.size ()/2].vertices[0], 4275);
EXPECT_EQ (vertices[vertices.size ()/2].vertices[1], 4276);
EXPECT_EQ (vertices[vertices.size ()/2].vertices[2], 4277);
#include <pcl/io/vtk_io.h>
#include <pcl/features/normal_3d.h>
#include <pcl/surface/mls.h>
-#include <pcl/common/common.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
// Reconstruct
mls.process (*mls_normals);
- EXPECT_NEAR (mls_normals->points[0].x, 0.005417, 1e-3);
- EXPECT_NEAR (mls_normals->points[0].y, 0.113463, 1e-3);
- EXPECT_NEAR (mls_normals->points[0].z, 0.040715, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[0].normal[0]), 0.111894, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[0].normal[1]), 0.594906, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[0].normal[2]), 0.795969, 1e-3);
- EXPECT_NEAR (mls_normals->points[0].curvature, 0.012019, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].x, 0.005417, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].y, 0.113463, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].z, 0.040715, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[0].normal[0]), 0.111894, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[0].normal[1]), 0.594906, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[0].normal[2]), 0.795969, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].curvature, 0.012019, 1e-3);
// Testing upsampling
MovingLeastSquares<PointXYZ, PointNormal> mls_upsampling;
mls_normals->clear ();
mls_upsampling.process (*mls_normals);
- EXPECT_NEAR (mls_normals->points[10].x, -0.000538, 1e-3);
- EXPECT_NEAR (mls_normals->points[10].y, 0.110080, 1e-3);
- EXPECT_NEAR (mls_normals->points[10].z, 0.043602, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[10].normal[0]), 0.022678, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[10].normal[1]), 0.554978, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[10].normal[2]), 0.831556, 1e-3);
- EXPECT_NEAR (mls_normals->points[10].curvature, 0.012019, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[10].x, -0.000538, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[10].y, 0.110080, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[10].z, 0.043602, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[10].normal[0]), 0.022678, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[10].normal[1]), 0.554978, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[10].normal[2]), 0.831556, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[10].curvature, 0.012019, 1e-3);
EXPECT_EQ (mls_normals->size (), 6352);
// mls_normals->clear ();
// mls_upsampling.process (*mls_normals);
//
-// EXPECT_NEAR (mls_normals->points[10].x, 0.018806, 1e-3);
-// EXPECT_NEAR (mls_normals->points[10].y, 0.114685, 1e-3);
-// EXPECT_NEAR (mls_normals->points[10].z, 0.037500, 1e-3);
-// EXPECT_NEAR (std::abs (mls_normals->points[10].normal[0]), 0.351352, 1e-3);
-// EXPECT_NEAR (std::abs (mls_normals->points[10].normal[1]), 0.537741, 1e-3);
-// EXPECT_NEAR (std::abs (mls_normals->points[10].normal[2]), 0.766411, 1e-3);
-// EXPECT_NEAR (mls_normals->points[10].curvature, 0.019003, 1e-3);
+// EXPECT_NEAR ((*mls_normals)[10].x, 0.018806, 1e-3);
+// EXPECT_NEAR ((*mls_normals)[10].y, 0.114685, 1e-3);
+// EXPECT_NEAR ((*mls_normals)[10].z, 0.037500, 1e-3);
+// EXPECT_NEAR (std::abs ((*mls_normals)[10].normal[0]), 0.351352, 1e-3);
+// EXPECT_NEAR (std::abs ((*mls_normals)[10].normal[1]), 0.537741, 1e-3);
+// EXPECT_NEAR (std::abs ((*mls_normals)[10].normal[2]), 0.766411, 1e-3);
+// EXPECT_NEAR ((*mls_normals)[10].curvature, 0.019003, 1e-3);
// EXPECT_EQ (mls_normals->size (), 457);
mls_upsampling.setDilationVoxelSize (0.005f);
mls_normals->clear ();
mls_upsampling.process (*mls_normals);
- EXPECT_NEAR (mls_normals->points[10].x, -0.070005938410758972, 2e-3);
- EXPECT_NEAR (mls_normals->points[10].y, 0.028887597844004631, 2e-3);
- EXPECT_NEAR (mls_normals->points[10].z, 0.01788550429046154, 2e-3);
- EXPECT_NEAR (mls_normals->points[10].curvature, 0.107273, 1e-1);
+ EXPECT_NEAR ((*mls_normals)[10].x, -0.070005938410758972, 2e-3);
+ EXPECT_NEAR ((*mls_normals)[10].y, 0.028887597844004631, 2e-3);
+ EXPECT_NEAR ((*mls_normals)[10].z, 0.01788550429046154, 2e-3);
+ EXPECT_NEAR ((*mls_normals)[10].curvature, 0.107273, 1e-1);
EXPECT_NEAR (double (mls_normals->size ()), 29394, 2);
}
// Reconstruct
mls_omp.process (*mls_normals);
- EXPECT_NEAR (mls_normals->points[0].x, 0.005417, 1e-3);
- EXPECT_NEAR (mls_normals->points[0].y, 0.113463, 1e-3);
- EXPECT_NEAR (mls_normals->points[0].z, 0.040715, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[0].normal[0]), 0.111894, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[0].normal[1]), 0.594906, 1e-3);
- EXPECT_NEAR (std::abs (mls_normals->points[0].normal[2]), 0.795969, 1e-3);
- EXPECT_NEAR (mls_normals->points[0].curvature, 0.012019, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].x, 0.005417, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].y, 0.113463, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].z, 0.040715, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[0].normal[0]), 0.111894, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[0].normal[1]), 0.594906, 1e-3);
+ EXPECT_NEAR (std::abs ((*mls_normals)[0].normal[2]), 0.795969, 1e-3);
+ EXPECT_NEAR ((*mls_normals)[0].curvature, 0.012019, 1e-3);
}
#endif
#include <pcl/io/vtk_io.h>
#include <pcl/features/normal_3d.h>
#include <pcl/surface/organized_fast_mesh.h>
-#include <pcl/common/common.h>
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
{
for (std::size_t j = 0; j < cloud_organized->width; j++)
{
- cloud_organized->points[npoints].x = static_cast<float> (i);
- cloud_organized->points[npoints].y = static_cast<float> (j);
- cloud_organized->points[npoints].z = static_cast<float> (cloud_organized->points.size ()); // to avoid shadowing
+ (*cloud_organized)[npoints].x = static_cast<float> (i);
+ (*cloud_organized)[npoints].y = static_cast<float> (j);
+ (*cloud_organized)[npoints].z = static_cast<float> (cloud_organized->size ()); // to avoid shadowing
npoints++;
}
}
int nan_idx = cloud_organized->width*cloud_organized->height - 2*cloud_organized->width + 1;
- cloud_organized->points[nan_idx].x = std::numeric_limits<float>::quiet_NaN ();
- cloud_organized->points[nan_idx].y = std::numeric_limits<float>::quiet_NaN ();
- cloud_organized->points[nan_idx].z = std::numeric_limits<float>::quiet_NaN ();
+ (*cloud_organized)[nan_idx].x = std::numeric_limits<float>::quiet_NaN ();
+ (*cloud_organized)[nan_idx].y = std::numeric_limits<float>::quiet_NaN ();
+ (*cloud_organized)[nan_idx].z = std::numeric_limits<float>::quiet_NaN ();
// Init objects
PolygonMesh triangles;
using namespace pcl;
using namespace pcl::io;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, Poisson)
{
+ // NOTE: The test checks for implementation changes and not the validity of the implementation itself
+
Poisson<PointNormal> poisson;
poisson.setInputCloud (cloud_with_normals);
PolygonMesh mesh;
// io::saveVTKFile ("bunny_poisson.vtk", mesh);
- ASSERT_EQ (mesh.polygons.size (), 1051);
+ ASSERT_EQ (mesh.polygons.size (), 4828);
// All polygons should be triangles
for (std::size_t i = 0; i < mesh.polygons.size (); ++i)
EXPECT_EQ (mesh.polygons[i].vertices.size (), 3);
- EXPECT_EQ (mesh.polygons[10].vertices[0], 121);
- EXPECT_EQ (mesh.polygons[10].vertices[1], 120);
- EXPECT_EQ (mesh.polygons[10].vertices[2], 23);
+ EXPECT_EQ (mesh.polygons[10].vertices[0], 197);
+ EXPECT_EQ (mesh.polygons[10].vertices[1], 198);
+ EXPECT_EQ (mesh.polygons[10].vertices[2], 201);
- EXPECT_EQ (mesh.polygons[200].vertices[0], 130);
- EXPECT_EQ (mesh.polygons[200].vertices[1], 119);
- EXPECT_EQ (mesh.polygons[200].vertices[2], 131);
+ EXPECT_EQ (mesh.polygons[200].vertices[0], 302);
+ EXPECT_EQ (mesh.polygons[200].vertices[1], 313);
+ EXPECT_EQ (mesh.polygons[200].vertices[2], 310);
- EXPECT_EQ (mesh.polygons[1000].vertices[0], 521);
- EXPECT_EQ (mesh.polygons[1000].vertices[1], 516);
- EXPECT_EQ (mesh.polygons[1000].vertices[2], 517);
+ EXPECT_EQ (mesh.polygons[1000].vertices[0], 705);
+ EXPECT_EQ (mesh.polygons[1000].vertices[1], 706);
+ EXPECT_EQ (mesh.polygons[1000].vertices[2], 715);
}
/* ---[ */
#include <pcl/recognition/ransac_based/model_library.h>
#include <pcl/features/normal_3d.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::recognition;
// Compute the features
ne.compute (*cloud_normals);
- // cloud_normals->points.size () should have the same size as the input cloud->points.size ()*
- return cloud_normals->points.size();
+ // cloud_normals->size () should have the same size as the input cloud->size ()*
+ return cloud_normals->size();
}
return (-1);
}
- if (!estimateNormals(model_cloud, model_cloud_normals) == model_cloud->points.size())
+ if (!estimateNormals(model_cloud, model_cloud_normals) == model_cloud->size())
{
std::cerr << "Failed to estimate normals" << std::endl;
return (-1);
using namespace pcl;
using namespace pcl::io;
using namespace pcl::visualization;
-using namespace std;
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
PointCloud<PointNormal>::Ptr cloud_with_normals (new PointCloud<PointNormal>);
pcl::common::CloudGenerator<pcl::PointXYZRGB, pcl::common::UniformGenerator<float> > generator;
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>());
- int result = generator.fill(3, 1, *cloud);
+ generator.fill(3, 1, *cloud);
// Setup a basic viewport window
pcl::visualization::PCLVisualizer::Ptr viewer(new pcl::visualization::PCLVisualizer("3D Viewer"));
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-#include "boost.h"
using namespace pcl;
using namespace pcl::io;
fromPCLPointCloud2 (*input, *xyz_cloud);
PointCloud<PointXYZ>::Ptr xyz_cloud_filtered (new PointCloud<PointXYZ> ());
- xyz_cloud_filtered->points.resize (xyz_cloud->points.size ());
+ xyz_cloud_filtered->points.resize (xyz_cloud->size ());
xyz_cloud_filtered->header = xyz_cloud->header;
xyz_cloud_filtered->width = xyz_cloud->width;
xyz_cloud_filtered->height = xyz_cloud->height;
std::mt19937 rng(rd());
std::normal_distribution<float> nd (0.0f, standard_deviation);
- for (std::size_t point_i = 0; point_i < xyz_cloud->points.size (); ++point_i)
+ for (std::size_t point_i = 0; point_i < xyz_cloud->size (); ++point_i)
{
- xyz_cloud_filtered->points[point_i].x = xyz_cloud->points[point_i].x + nd (rng);
- xyz_cloud_filtered->points[point_i].y = xyz_cloud->points[point_i].y + nd (rng);
- xyz_cloud_filtered->points[point_i].z = xyz_cloud->points[point_i].z + nd (rng);
+ (*xyz_cloud_filtered)[point_i].x = (*xyz_cloud)[point_i].x + nd (rng);
+ (*xyz_cloud_filtered)[point_i].y = (*xyz_cloud)[point_i].y + nd (rng);
+ (*xyz_cloud_filtered)[point_i].z = (*xyz_cloud)[point_i].z + nd (rng);
}
pcl::PCLPointCloud2 input_xyz_filtered;
fromPCLPointCloud2 (*cloud_target, *xyz_target);
PointCloud<PointXYZI>::Ptr output_xyzi (new PointCloud<PointXYZI> ());
- output_xyzi->points.resize (xyz_source->points.size ());
+ output_xyzi->points.resize (xyz_source->size ());
output_xyzi->height = cloud_source->height;
output_xyzi->width = cloud_source->width;
{
// print_highlight (stderr, "Computing using the equal indices correspondence heuristic.\n");
- if (xyz_source->points.size () != xyz_target->points.size ())
+ if (xyz_source->size () != xyz_target->size ())
{
print_error ("Source and target clouds do not have the same number of points.\n");
return;
}
- for (std::size_t point_i = 0; point_i < xyz_source->points.size (); ++point_i)
+ for (std::size_t point_i = 0; point_i < xyz_source->size (); ++point_i)
{
- if (!std::isfinite (xyz_source->points[point_i].x) || !std::isfinite (xyz_source->points[point_i].y) || !std::isfinite (xyz_source->points[point_i].z))
+ if (!std::isfinite ((*xyz_source)[point_i].x) || !std::isfinite ((*xyz_source)[point_i].y) || !std::isfinite ((*xyz_source)[point_i].z))
continue;
- if (!std::isfinite (xyz_target->points[point_i].x) || !std::isfinite (xyz_target->points[point_i].y) || !std::isfinite (xyz_target->points[point_i].z))
+ if (!std::isfinite ((*xyz_target)[point_i].x) || !std::isfinite ((*xyz_target)[point_i].y) || !std::isfinite ((*xyz_target)[point_i].z))
continue;
- float dist = squaredEuclideanDistance (xyz_source->points[point_i], xyz_target->points[point_i]);
+ float dist = squaredEuclideanDistance ((*xyz_source)[point_i], (*xyz_target)[point_i]);
rmse += dist;
- output_xyzi->points[point_i].x = xyz_source->points[point_i].x;
- output_xyzi->points[point_i].y = xyz_source->points[point_i].y;
- output_xyzi->points[point_i].z = xyz_source->points[point_i].z;
- output_xyzi->points[point_i].intensity = dist;
+ (*output_xyzi)[point_i].x = (*xyz_source)[point_i].x;
+ (*output_xyzi)[point_i].y = (*xyz_source)[point_i].y;
+ (*output_xyzi)[point_i].z = (*xyz_source)[point_i].z;
+ (*output_xyzi)[point_i].intensity = dist;
}
- rmse = std::sqrt (rmse / static_cast<float> (xyz_source->points.size ()));
+ rmse = std::sqrt (rmse / static_cast<float> (xyz_source->size ()));
}
else if (correspondence_type == "nn")
{
KdTreeFLANN<PointXYZ>::Ptr tree (new KdTreeFLANN<PointXYZ> ());
tree->setInputCloud (xyz_target);
- for (std::size_t point_i = 0; point_i < xyz_source->points.size (); ++ point_i)
+ for (std::size_t point_i = 0; point_i < xyz_source->size (); ++ point_i)
{
- if (!std::isfinite (xyz_source->points[point_i].x) || !std::isfinite (xyz_source->points[point_i].y) || !std::isfinite (xyz_source->points[point_i].z))
+ if (!std::isfinite ((*xyz_source)[point_i].x) || !std::isfinite ((*xyz_source)[point_i].y) || !std::isfinite ((*xyz_source)[point_i].z))
continue;
std::vector<int> nn_indices (1);
std::vector<float> nn_distances (1);
- if (!tree->nearestKSearch (xyz_source->points[point_i], 1, nn_indices, nn_distances))
+ if (!tree->nearestKSearch ((*xyz_source)[point_i], 1, nn_indices, nn_distances))
continue;
std::size_t point_nn_i = nn_indices.front();
- float dist = squaredEuclideanDistance (xyz_source->points[point_i], xyz_target->points[point_nn_i]);
+ float dist = squaredEuclideanDistance ((*xyz_source)[point_i], (*xyz_target)[point_nn_i]);
rmse += dist;
- output_xyzi->points[point_i].x = xyz_source->points[point_i].x;
- output_xyzi->points[point_i].y = xyz_source->points[point_i].y;
- output_xyzi->points[point_i].z = xyz_source->points[point_i].z;
- output_xyzi->points[point_i].intensity = dist;
+ (*output_xyzi)[point_i].x = (*xyz_source)[point_i].x;
+ (*output_xyzi)[point_i].y = (*xyz_source)[point_i].y;
+ (*output_xyzi)[point_i].z = (*xyz_source)[point_i].z;
+ (*output_xyzi)[point_i].intensity = dist;
}
- rmse = std::sqrt (rmse / static_cast<float> (xyz_source->points.size ()));
+ rmse = std::sqrt (rmse / static_cast<float> (xyz_source->size ()));
}
else if (correspondence_type == "nnplane")
KdTreeFLANN<PointXYZ>::Ptr tree (new KdTreeFLANN<PointXYZ> ());
tree->setInputCloud (xyz_target);
- for (std::size_t point_i = 0; point_i < xyz_source->points.size (); ++ point_i)
+ for (std::size_t point_i = 0; point_i < xyz_source->size (); ++ point_i)
{
- if (!std::isfinite (xyz_source->points[point_i].x) || !std::isfinite (xyz_source->points[point_i].y) || !std::isfinite (xyz_source->points[point_i].z))
+ if (!std::isfinite ((*xyz_source)[point_i].x) || !std::isfinite ((*xyz_source)[point_i].y) || !std::isfinite ((*xyz_source)[point_i].z))
continue;
std::vector<int> nn_indices (1);
std::vector<float> nn_distances (1);
- if (!tree->nearestKSearch (xyz_source->points[point_i], 1, nn_indices, nn_distances))
+ if (!tree->nearestKSearch ((*xyz_source)[point_i], 1, nn_indices, nn_distances))
continue;
std::size_t point_nn_i = nn_indices.front();
- Eigen::Vector3f normal_target = normals_target->points[point_nn_i].getNormalVector3fMap (),
- point_source = xyz_source->points[point_i].getVector3fMap (),
- point_target = xyz_target->points[point_nn_i].getVector3fMap ();
+ Eigen::Vector3f normal_target = (*normals_target)[point_nn_i].getNormalVector3fMap (),
+ point_source = (*xyz_source)[point_i].getVector3fMap (),
+ point_target = (*xyz_target)[point_nn_i].getVector3fMap ();
float dist = normal_target.dot (point_source - point_target);
rmse += dist * dist;
- output_xyzi->points[point_i].x = xyz_source->points[point_i].x;
- output_xyzi->points[point_i].y = xyz_source->points[point_i].y;
- output_xyzi->points[point_i].z = xyz_source->points[point_i].z;
- output_xyzi->points[point_i].intensity = dist * dist;
+ (*output_xyzi)[point_i].x = (*xyz_source)[point_i].x;
+ (*output_xyzi)[point_i].y = (*xyz_source)[point_i].y;
+ (*output_xyzi)[point_i].z = (*xyz_source)[point_i].z;
+ (*output_xyzi)[point_i].intensity = dist * dist;
}
- rmse = std::sqrt (rmse / static_cast<float> (xyz_source->points.size ()));
+ rmse = std::sqrt (rmse / static_cast<float> (xyz_source->size ()));
}
else
{
#include <pcl/console/time.h>
#include <pcl/search/kdtree.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
pcl::PointCloud<pcl::PointNormal>::Ptr cloud_normals (new pcl::PointCloud<pcl::PointNormal>);
cloud_normals->width = cloud->width;
cloud_normals->height = cloud->height;
- cloud_normals->points.resize (cloud->points.size ());
- for (std::size_t i = 0; i < cloud->points.size (); i++)
+ cloud_normals->points.resize (cloud->size ());
+ for (std::size_t i = 0; i < cloud->size (); i++)
{
- cloud_normals->points[i].x = cloud->points[i].x;
- cloud_normals->points[i].y = cloud->points[i].y;
- cloud_normals->points[i].z = cloud->points[i].z;
+ (*cloud_normals)[i].x = (*cloud)[i].x;
+ (*cloud_normals)[i].y = (*cloud)[i].y;
+ (*cloud_normals)[i].z = (*cloud)[i].z;
}
// estimate surface normals
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
bool
-loadCloud (const string &filename, pcl::PCLPointCloud2 &cloud,
+loadCloud (const std::string &filename, pcl::PCLPointCloud2 &cloud,
Eigen::Vector4f &translation, Eigen::Quaternionf &orientation)
{
if (loadPCDFile (filename, cloud, translation, orientation) < 0)
}
void
-saveCloud (const string &filename, const pcl::PCLPointCloud2 &output,
+saveCloud (const std::string &filename, const pcl::PCLPointCloud2 &output,
const Eigen::Vector4f &translation, const Eigen::Quaternionf &orientation)
{
PCDWriter w;
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir, float sigma_s, float sigma_r)
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir, float sigma_s, float sigma_r)
{
#pragma omp parallel for \
default(none) \
compute (cloud, output, sigma_s, sigma_r);
// Prepare output file name
- string filename = pcd_files[i];
+ std::string filename = pcd_files[i];
boost::trim (filename);
- std::vector<string> st;
+ std::vector<std::string> st;
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output, translation, rotation);
}
float sigma_r = default_sigma_r;
parse_argument (argc, argv, "-sigma_s", sigma_s);
parse_argument (argc, argv, "-sigma_r", sigma_r);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::common;
PointCloud<PointNormal>::Ptr cloud (new PointCloud<PointNormal> ());
for (std::size_t i = 0; i < input->size (); ++i)
- if (std::isfinite (input->points[i].x))
- cloud->push_back (input->points[i]);
+ if (std::isfinite ((*input)[i].x))
+ cloud->push_back ((*input)[i]);
- cloud->width = static_cast<std::uint32_t> (cloud->size ());
+ cloud->width = cloud->size ();
cloud->height = 1;
cloud->is_dense = true;
#include <pcl/console/time.h>
#include <pcl/filters/grid_minimum.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir,
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir,
float resolution)
{
- std::vector<string> st;
+ std::vector<std::string> st;
for (const auto &pcd_file : pcd_files)
{
// Load the first file
compute (cloud, output, resolution);
// Prepare output file name
- string filename = pcd_file;
+ std::string filename = pcd_file;
boost::trim (filename);
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output);
}
// Command line parsing
float resolution = default_resolution;
parse_argument (argc, argv, "-resolution", resolution);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
#include <typeinfo>
#include <vector>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace pcl::console;
FPS_CALC ("cloud callback");
std::lock_guard<std::mutex> lock (cloud_mutex_);
cloud_ = cloud;
- //std::cout << cloud->points[0] << " " << cloud->size () << std::endl;
+ //std::cout << (*cloud)[0] << " " << cloud->size () << std::endl;
}
void
#include <pcl/visualization/image_viewer.h>
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/console/parse.h>
-#include <pcl/visualization/mouse_event.h>
#include <vector>
-#include <string>
-
-using namespace std;
int
main (int, char ** argv)
pcl::visualization::ImageViewer depth_image_viewer_;
float* img = new float[cloud.width * cloud.height];
- for (int i = 0; i < static_cast<int> (xyz.points.size ()); ++i)
- img[i] = xyz.points[i].z;
+ for (int i = 0; i < static_cast<int> (xyz.size ()); ++i)
+ img[i] = xyz[i].z;
depth_image_viewer_.showFloatImage (img,
cloud.width, cloud.height,
#include <pcl/console/time.h>
#include <pcl/filters/local_maximum.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir,
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir,
float radius)
{
- std::vector<string> st;
+ std::vector<std::string> st;
for (const auto &pcd_file : pcd_files)
{
// Load the first file
compute (cloud, output, radius);
// Prepare output file name
- string filename = pcd_file;
+ std::string filename = pcd_file;
boost::trim (filename);
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output);
}
// Command line parsing
float radius = default_radius;
parse_argument (argc, argv, "-radius", radius);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
{
for (std::size_t x = 0; x < image.get_width (); ++x)
{
- const pcl::PointXYZRGBA & p = input->points[i++];
+ const pcl::PointXYZRGBA & p = (*input)[i++];
image[y][x] = png::rgb_pixel(p.r, p.g, p.b);
}
}
Eigen::Vector3f n (0, 0, 0);
Eigen::Vector3f c (0, 0, 0);
randPSurface (polydata, &cumulativeAreas, totalArea, p, calc_normal, n, calc_color, c);
- cloud_out.points[i].x = p[0];
- cloud_out.points[i].y = p[1];
- cloud_out.points[i].z = p[2];
+ cloud_out[i].x = p[0];
+ cloud_out[i].y = p[1];
+ cloud_out[i].z = p[2];
if (calc_normal)
{
- cloud_out.points[i].normal_x = n[0];
- cloud_out.points[i].normal_y = n[1];
- cloud_out.points[i].normal_z = n[2];
+ cloud_out[i].normal_x = n[0];
+ cloud_out[i].normal_y = n[1];
+ cloud_out[i].normal_z = n[2];
}
if (calc_color)
{
- cloud_out.points[i].r = static_cast<std::uint8_t>(c[0]);
- cloud_out.points[i].g = static_cast<std::uint8_t>(c[1]);
- cloud_out.points[i].b = static_cast<std::uint8_t>(c[2]);
+ cloud_out[i].r = static_cast<std::uint8_t>(c[0]);
+ cloud_out[i].g = static_cast<std::uint8_t>(c[1]);
+ cloud_out[i].b = static_cast<std::uint8_t>(c[2]);
}
}
}
#include <pcl/console/time.h>
#include <pcl/surface/mls.h>
#include <pcl/filters/voxel_grid.h>
+#include <pcl/search/kdtree.h> // for KdTree
using namespace pcl;
using namespace pcl::io;
// Filter the NaNs from the cloud
for (std::size_t i = 0; i < xyz_cloud_pre->size (); ++i)
- if (std::isfinite (xyz_cloud_pre->points[i].x))
- xyz_cloud->push_back (xyz_cloud_pre->points[i]);
+ if (std::isfinite ((*xyz_cloud_pre)[i].x))
+ xyz_cloud->push_back ((*xyz_cloud_pre)[i]);
xyz_cloud->header = xyz_cloud_pre->header;
xyz_cloud->height = 1;
- xyz_cloud->width = static_cast<std::uint32_t> (xyz_cloud->size ());
+ xyz_cloud->width = xyz_cloud->size ();
xyz_cloud->is_dense = false;
#include <pcl/console/time.h>
#include <pcl/filters/morphological_filter.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir,
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir,
float resolution, const std::string &method)
{
- std::vector<string> st;
+ std::vector<std::string> st;
for (const auto &pcd_file : pcd_files)
{
// Load the first file
compute (cloud, output, resolution, method);
// Prepare output file name
- string filename = pcd_file;
+ std::string filename = pcd_file;
boost::trim (filename);
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output);
}
float resolution = default_resolution;
parse_argument (argc, argv, "-method", method);
parse_argument (argc, argv, "-resolution", resolution);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
bool
-loadCloud (const string &filename, pcl::PCLPointCloud2 &cloud,
+loadCloud (const std::string &filename, pcl::PCLPointCloud2 &cloud,
Eigen::Vector4f &translation, Eigen::Quaternionf &orientation)
{
if (loadPCDFile (filename, cloud, translation, orientation) < 0)
}
void
-saveCloud (const string &filename, const pcl::PCLPointCloud2 &output,
+saveCloud (const std::string &filename, const pcl::PCLPointCloud2 &output,
const Eigen::Vector4f &translation, const Eigen::Quaternionf &orientation)
{
PCDWriter w;
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir, int k, double radius)
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir, int k, double radius)
{
#pragma omp parallel for \
default(none) \
compute (cloud, output, k, radius);
// Prepare output file name
- string filename = pcd_files[i];
+ std::string filename = pcd_files[i];
boost::trim (filename);
- std::vector<string> st;
+ std::vector<std::string> st;
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output, translation, rotation);
}
double radius = default_radius;
parse_argument (argc, argv, "-k", k);
parse_argument (argc, argv, "-radius", radius);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
#include <thread>
#include <vector>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace io;
PointCloud<PointXYZ>& points_;
PointCloud<Normal>& normals_;
int num_hypotheses_to_show_;
- list<vtkActor*> actors_, model_actors_;
+ std::list<vtkActor*> actors_, model_actors_;
bool show_models_;
};
//===============================================================================================================================
void
-showHypothesisAsCoordinateFrame (Hypothesis& hypo, CallbackParameters* parameters, const string &frame_name)
+showHypothesisAsCoordinateFrame (Hypothesis& hypo, CallbackParameters* parameters, const std::string &frame_name)
{
float rot_col[3], x_dir[3], y_dir[3], z_dir[3], origin[3], scale = 2.0f*parameters->objrec_.getPairWidth ();
pcl::ModelCoefficients coeffs; coeffs.values.resize (6);
void
update (CallbackParameters* params)
{
- list<ObjRecRANSAC::Output> dummy_output;
+ std::list<ObjRecRANSAC::Output> dummy_output;
// Run the recognition method
params->objrec_.recognize (params->points_, params->normals_, dummy_output);
vtk_hh->Update ();
// The lines
- string lines_str_id = "opps";
+ std::string lines_str_id = "opps";
params->viz_.addModelFromPolyData (vtk_opps, lines_str_id);
params->viz_.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR, 0.0, 0.0, 1.0, lines_str_id);
// The normals
- string normals_str_id = "opps normals";
+ std::string normals_str_id = "opps normals";
params->viz_.addModelFromPolyData (vtk_hh->GetOutput (), normals_str_id);
params->viz_.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR, 1.0, 1.0, 0.0, normals_str_id);
#endif
// Switch models visibility
params->show_models_ = !params->show_models_;
- for ( list<vtkActor*>::iterator it = params->model_actors_.begin () ; it != params->model_actors_.end () ; ++it )
+ for ( std::list<vtkActor*>::iterator it = params->model_actors_.begin () ; it != params->model_actors_.end () ; ++it )
(*it)->SetVisibility (static_cast<int> (params->show_models_));
params->viz_.getRenderWindow ()->Render ();
const int num_params = 4;
float parameters[num_params] = {40.0f/*pair width*/, 5.0f/*voxel size*/, 15.0f/*max co-planarity angle*/, 1/*n_hypotheses_to_show*/};
- string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle", "n_hypotheses_to_show"};
+ std::string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle", "n_hypotheses_to_show"};
// Read the user input if any
for ( int i = 0 ; i < argc-1 && i < num_params ; ++i )
#include <vtkGlyph3D.h>
#include <cstdio>
#include <thread>
-#include <vector>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace io;
#include <thread>
#include <vector>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace io;
const int num_params = 3;
float parameters[num_params] = {10.0f/*pair width*/, 5.0f/*voxel size*/, 5.0f/*max co-planarity angle*/};
- string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle"};
+ std::string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle"};
// Read the user input if any
for ( int i = 0 ; i < argc-1 && i < num_params ; ++i )
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/print.h>
-#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <vtkVersion.h>
#include <list>
#include <thread>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace io;
PCLVisualizer& viz_;
PointCloud<PointXYZ>& scene_points_;
PointCloud<Normal>& scene_normals_;
- list<vtkActor*> actors_;
+ std::list<vtkActor*> actors_;
};
//===========================================================================================================================================
const int num_params = 3;
float parameters[num_params] = {40.0f/*pair width*/, 5.0f/*voxel size*/, 15.0f/*max co-planarity angle*/};
- string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle"};
+ std::string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle"};
// Read the user input if any
for ( int i = 0 ; i < argc-1 && i < num_params ; ++i )
objrec.setMaxCoplanarityAngleDegrees (max_coplanarity_angle);
// The models to be loaded
- list<string> model_names;
+ std::list<std::string> model_names;
model_names.emplace_back("tum_amicelli_box");
model_names.emplace_back("tum_rusk_box");
model_names.emplace_back("tum_soda_bottle");
- list<PointCloud<PointXYZ>::Ptr> model_points_list;
- list<PointCloud<Normal>::Ptr> model_normals_list;
- list<vtkSmartPointer<vtkPolyData> > vtk_models_list;
+ std::list<PointCloud<PointXYZ>::Ptr> model_points_list;
+ std::list<PointCloud<Normal>::Ptr> model_normals_list;
+ std::list<vtkSmartPointer<vtkPolyData> > vtk_models_list;
// Load the models and add them to the recognizer
for (const auto &model_name : model_names)
vtk_models_list.push_back (vtk_model);
// Compose the file
- string file_name = string("../../test/") + model_name + string (".vtk");
+ std::string file_name = std::string("../../test/") + model_name + std::string (".vtk");
// Get the points and normals from the input model
if ( !vtk2PointCloud (file_name.c_str (), *model_points, *model_normals, vtk_model) )
params->actors_.clear ();
// This will be the output of the recognition
- list<ObjRecRANSAC::Output> rec_output;
+ std::list<ObjRecRANSAC::Output> rec_output;
// For convenience
ObjRecRANSAC& objrec = params->objrec_;
int i = 0;
// Show the hypotheses
- for ( list<ObjRecRANSAC::Output>::iterator it = rec_output.begin () ; it != rec_output.end () ; ++it, ++i )
+ for ( std::list<ObjRecRANSAC::Output>::iterator it = rec_output.begin () ; it != rec_output.end () ; ++it, ++i )
{
std::cout << it->object_name_ << " has a confidence value of " << it->match_confidence_ << std::endl;
{
if ( static_cast<int> (id) == inliers->indices[i] )
{
- plane_points.points[i] = all_points->points[id];
+ plane_points[i] = (*all_points)[id];
++i;
}
else
{
- non_plane_points.points[j] = all_points->points[id];
- non_plane_normals.points[j] = all_normals->points[id];
+ non_plane_points[j] = (*all_points)[id];
+ non_plane_normals[j] = (*all_normals)[id];
++j;
}
++id;
// Just copy the rest of the non-plane points
for ( std::size_t id = inliers->indices.size (); id < all_points->size () ; ++id, ++j )
{
- non_plane_points.points[j] = all_points->points[id];
- non_plane_normals.points[j] = all_normals->points[id];
+ non_plane_points[j] = (*all_points)[id];
+ non_plane_normals[j] = (*all_normals)[id];
}
return true;
#include <pcl/recognition/ransac_based/obj_rec_ransac.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/print.h>
-#include <pcl/console/parse.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <vtkVersion.h>
#include <vtkHedgeHog.h>
#include <cstdio>
#include <thread>
-#include <vector>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace io;
const int num_params = 3;
float parameters[num_params] = {40.0f/*pair width*/, 5.0f/*voxel size*/, 15.0f/*max co-planarity angle*/};
- string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle"};
+ std::string parameter_names[num_params] = {"pair_width", "voxel_size", "max_coplanarity_angle"};
// Read the user input if any
for ( int i = 0 ; i < argc-1 && i < num_params ; ++i )
void update (CallbackParameters* params)
{
- list<ObjRecRANSAC::Output> dummy_output;
+ std::list<ObjRecRANSAC::Output> dummy_output;
// Run the recognition method
params->objrec_.recognize (params->points_, params->normals_, dummy_output);
// Build the vtk objects visualizing the lines between the opps
- const list<ObjRecRANSAC::OrientedPointPair>& opps = params->objrec_.getSampledOrientedPointPairs ();
+ const std::list<ObjRecRANSAC::OrientedPointPair>& opps = params->objrec_.getSampledOrientedPointPairs ();
std::cout << "There is (are) " << opps.size () << " oriented point pair(s).\n";
// The opps points
vtkSmartPointer<vtkPolyData> vtk_opps = vtkSmartPointer<vtkPolyData>::New ();
vtk_hh->Update ();
// The lines
- string lines_str_id = "opps";
+ std::string lines_str_id = "opps";
params->viz_.removeShape(lines_str_id);
params->viz_.addModelFromPolyData (vtk_opps, lines_str_id);
params->viz_.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR, 0.0, 0.0, 1.0, lines_str_id);
// The normals
- string normals_str_id = "opps normals";
+ std::string normals_str_id = "opps normals";
params->viz_.removeShape(normals_str_id);
params->viz_.addModelFromPolyData (vtk_hh->GetOutput (), normals_str_id);
params->viz_.setShapeRenderingProperties(pcl::visualization::PCL_VISUALIZER_COLOR, 1.0, 1.0, 0.0, normals_str_id);
#include <pcl/common/centroid.h>
#include <pcl/filters/filter.h>
-#include "boost.h"
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
//remove NaN Points
std::vector<int> nanIndexes;
pcl::removeNaNFromPointCloud(*cloud, *cloud, nanIndexes);
- std::cout << "Loaded " << cloud->points.size() << " points" << std::endl;
+ std::cout << "Loaded " << cloud->size() << " points" << std::endl;
//create octree structure
octree.setInputCloud(cloud);
viz.addText (level, 0, 30, 1.0, 0.0, 0.0, "level_t1");
viz.removeShape ("level_t2");
- sprintf (level, "Voxel size: %.4fm [%lu voxels]", std::sqrt (octree.getVoxelSquaredSideLen (displayedDepth)),
- cloudVoxel->points.size ());
+ sprintf(level,
+ "Voxel size: %.4fm [%zu voxels]",
+ std::sqrt(octree.getVoxelSquaredSideLen(displayedDepth)),
+ static_cast<std::size_t>(cloudVoxel->size()));
viz.addText (level, 0, 15, 1.0, 0.0, 0.0, "level_t2");
}
}
double end = pcl::getTime ();
- printf("%lu pts, %.4gs. %.4gs./pt. =====\n", displayCloud->points.size (), end - start,
- (end - start) / static_cast<double> (displayCloud->points.size ()));
+ printf("%zu pts, %.4gs. %.4gs./pt. =====\n",
+ static_cast<std::size_t>(displayCloud->size()),
+ end - start,
+ (end - start) / static_cast<double>(displayCloud->size()));
update();
}
#include <pcl/io/pcd_io.h>
#include <vector>
-using namespace std;
using namespace pcl;
using namespace pcl::console;
*
*/
-#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/common/time.h> //fps calculations
#include <pcl/io/openni_grabber.h>
#include <thread>
#include <memory>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace pcl::console;
FPS_CALC_WRITER ("data write ", buf_);
nr_frames_total++;
- stringstream ss1, ss2, ss3;
+ std::stringstream ss1, ss2, ss3;
- string time_string = boost::posix_time::to_iso_string (frame->time);
+ std::string time_string = boost::posix_time::to_iso_string (frame->time);
// Save RGB data
ss1 << "frame_" << time_string << "_rgb.pclzf";
switch (frame->image->getEncoding ())
void
receiveAndView ()
{
- string mouseMsg2D ("Mouse coordinates in image viewer");
- string keyMsg2D ("Key event for image viewer");
+ std::string mouseMsg2D ("Mouse coordinates in image viewer");
+ std::string keyMsg2D ("Key event for image viewer");
image_viewer_->registerMouseCallback (&Viewer::mouse_callback, *this, static_cast<void*> (&mouseMsg2D));
image_viewer_->registerKeyboardCallback(&Viewer::keyboard_callback, *this, static_cast<void*> (&keyMsg2D));
else
print_highlight ("Using default buffer size of %d frames.\n", buff_size);
- string device_id;
+ std::string device_id;
OpenNIGrabber::Mode image_mode = OpenNIGrabber::OpenNI_Default_Mode;
OpenNIGrabber::Mode depth_mode = OpenNIGrabber::OpenNI_Default_Mode;
{
OpenNIGrabber grabber (argv[2]);
auto device = grabber.getDevice ();
- std::vector<pair<int, XnMapOutputMode> > modes;
+ std::vector<std::pair<int, XnMapOutputMode> > modes;
if (device->hasImageStream ())
{
std::cout << std::endl << "Supported image modes for device: " << device->getVendorName () << " , " << device->getProductName () << std::endl;
modes = grabber.getAvailableImageModes ();
- for (vector<pair<int, XnMapOutputMode> >::const_iterator it = modes.begin (); it != modes.end (); ++it)
+ for (std::vector<std::pair<int, XnMapOutputMode> >::const_iterator it = modes.begin (); it != modes.end (); ++it)
{
std::cout << it->first << " = " << it->second.nXRes << " x " << it->second.nYRes << " @ " << it->second.nFPS << std::endl;
}
{
std::cout << std::endl << "Supported depth modes for device: " << device->getVendorName () << " , " << device->getProductName () << std::endl;
modes = grabber.getAvailableDepthModes ();
- for (vector<pair<int, XnMapOutputMode> >::const_iterator it = modes.begin (); it != modes.end (); ++it)
+ for (std::vector<std::pair<int, XnMapOutputMode> >::const_iterator it = modes.begin (); it != modes.end (); ++it)
{
std::cout << it->first << " = " << it->second.nXRes << " x " << it->second.nYRes << " @ " << it->second.nFPS << std::endl;
}
#include <vector>
#include <string>
-using namespace std;
-
#define SHOW_FPS 1
#if SHOW_FPS
#define FPS_CALC(_WHAT_) \
{
//pcl::Grabber* interface = new pcl::OpenNIGrabber(device_id_, pcl::OpenNIGrabber::OpenNI_QQVGA_30Hz, pcl::OpenNIGrabber::OpenNI_VGA_30Hz);
- string mouseMsg3D("Mouse coordinates in PCL Visualizer");
- string keyMsg3D("Key event for PCL Visualizer");
+ std::string mouseMsg3D("Mouse coordinates in PCL Visualizer");
+ std::string keyMsg3D("Key event for PCL Visualizer");
cloud_viewer_.registerMouseCallback (&SimpleOpenNIViewer::mouse_callback, *this, (void*)(&mouseMsg3D));
cloud_viewer_.registerKeyboardCallback(&SimpleOpenNIViewer::keyboard_callback, *this, (void*)(&keyMsg3D));
std::function<void (const CloudConstPtr&)> cloud_cb = [this] (const CloudConstPtr& cloud) { cloud_callback (cloud); };
boost::signals2::connection image_connection;
if (grabber_.providesCallback<void (const openni_wrapper::Image::Ptr&)>())
{
- string mouseMsg2D("Mouse coordinates in image viewer");
- string keyMsg2D("Key event for image viewer");
+ std::string mouseMsg2D("Mouse coordinates in image viewer");
+ std::string keyMsg2D("Key event for image viewer");
image_viewer_.registerMouseCallback (&SimpleOpenNIViewer::mouse_callback, *this, (void*)(&mouseMsg2D));
image_viewer_.registerKeyboardCallback(&SimpleOpenNIViewer::keyboard_callback, *this, (void*)(&keyMsg2D));
std::function<void (const openni_wrapper::Image::Ptr&)> image_cb = [this] (const openni_wrapper::Image::Ptr& img) { image_callback (img); };
#include <pcl/console/time.h>
#include <pcl/filters/passthrough.h>
-
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir,
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir,
const std::string &field_name, float min, float max, bool inside, bool keep_organized)
{
- std::vector<string> st;
+ std::vector<std::string> st;
for (const auto &pcd_file : pcd_files)
{
// Load the first file
compute (cloud, output, field_name, min, max, inside, keep_organized);
// Prepare output file name
- string filename = pcd_file;
+ std::string filename = pcd_file;
boost::trim (filename);
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output);
}
parse_argument (argc, argv, "-inside", inside);
parse_argument (argc, argv, "-field", field_name);
parse_argument (argc, argv, "-keep", keep_organized);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
projected_cloud_pcl->sensor_origin_ = xyz->sensor_origin_;
projected_cloud_pcl->sensor_orientation_ = xyz->sensor_orientation_;
- for(std::size_t i = 0; i < xyz->points.size(); ++i)
+ for (const auto& point: *xyz)
{
pcl::PointXYZ projection;
- pcl::projectPoint<PointXYZ> (xyz->points[i], coeffs, projection);
+ pcl::projectPoint<PointXYZ> (point, coeffs, projection);
projected_cloud_pcl->points.push_back(projection);
}
#include <pcl/segmentation/approximate_progressive_morphological_filter.h>
#include <pcl/segmentation/progressive_morphological_filter.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
if (approximate)
{
- PCL_DEBUG ("approx with %d points\n", input->points.size ());
+ PCL_DEBUG("approx with %zu points\n", static_cast<std::size_t>(input->size()));
ApproximateProgressiveMorphologicalFilter<PointType> pmf;
pmf.setInputCloud (input);
pmf.setMaxWindowSize (max_window_size);
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir, int max_window_size, float slope, float max_distance, float initial_distance, float cell_size, float base, bool exponential, bool approximate)
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir, int max_window_size, float slope, float max_distance, float initial_distance, float cell_size, float base, bool exponential, bool approximate)
{
- std::vector<string> st;
+ std::vector<std::string> st;
for (const auto &pcd_file : pcd_files)
{
// Load the first file
compute (cloud, output, max_window_size, slope, max_distance, initial_distance, cell_size, base, exponential, approximate);
// Prepare output file name
- string filename = pcd_file;
+ std::string filename = pcd_file;
boost::trim (filename);
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output);
}
parse_argument (argc, argv, "-exponential", exponential);
approximate = find_switch (argc, argv, "-approximate");
parse_argument (argc, argv, "-verbosity", verbosity_level);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
bool
-loadCloud (const string &filename, pcl::PCLPointCloud2 &cloud)
+loadCloud (const std::string &filename, pcl::PCLPointCloud2 &cloud)
{
TicToc tt;
print_highlight ("Loading "); print_value ("%s ", filename.c_str ());
}
void
-saveCloud (const string &filename, const pcl::PCLPointCloud2 &output)
+saveCloud (const std::string &filename, const pcl::PCLPointCloud2 &output)
{
TicToc tt;
tt.tic ();
}
int
-batchProcess (const std::vector<string> &pcd_files, string &output_dir, int max_it, double thresh, bool negative)
+batchProcess (const std::vector<std::string> &pcd_files, std::string &output_dir, int max_it, double thresh, bool negative)
{
- std::vector<string> st;
+ std::vector<std::string> st;
for (const auto &pcd_file : pcd_files)
{
// Load the first file
compute (cloud, output, max_it, thresh, negative);
// Prepare output file name
- string filename = pcd_file;
+ std::string filename = pcd_file;
boost::trim (filename);
boost::split (st, filename, boost::is_any_of ("/\\"), boost::token_compress_on);
// Save into the second file
- stringstream ss;
+ std::stringstream ss;
ss << output_dir << "/" << st.at (st.size () - 1);
saveCloud (ss.str (), output);
}
parse_argument (argc, argv, "-max_it", max_it);
parse_argument (argc, argv, "-thresh", thresh);
parse_argument (argc, argv, "-neg", negative);
- string input_dir, output_dir;
+ std::string input_dir, output_dir;
if (parse_argument (argc, argv, "-input_dir", input_dir) != -1)
{
PCL_INFO ("Input directory given as %s. Batch process mode on.\n", input_dir.c_str ());
{
if (!input_dir.empty() && boost::filesystem::exists (input_dir))
{
- std::vector<string> pcd_files;
+ std::vector<std::string> pcd_files;
boost::filesystem::directory_iterator end_itr;
for (boost::filesystem::directory_iterator itr (input_dir); itr != end_itr; ++itr)
{
#include <pcl/common/time.h>
#include <pcl/visualization/boost.h>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
pcl::IndicesPtr indices (new std::vector<int>);
for (std::size_t i = 0; i < input->size (); ++i)
{
- const float z = input->points[i].z;
+ const float z = (*input)[i].z;
if (min_depth < z && z < max_depth)
{
foreground_mask[i] = true;
{
if (foreground_mask[i])
{
- const pcl::PointXYZRGBA & p = input->points[i];
+ const pcl::PointXYZRGBA & p = (*input)[i];
float d = std::abs (c[0]*p.x + c[1]*p.y + c[2]*p.z + c[3]);
foreground_mask[i] = (d < max_height);
}
{
if (!foreground_mask[i])
{
- pcl::PointXYZRGBA & p = template_cloud.points[i];
+ pcl::PointXYZRGBA & p = template_cloud[i];
p.x = p.y = p.z = std::numeric_limits<float>::quiet_NaN ();
}
}
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
#include <pcl/common/transforms.h>
-#include <cmath>
using namespace pcl;
using namespace pcl::io;
#include <string>
#include <pcl/io/vtk_io.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
bool
-loadCloud (const string &filename, pcl::PCLPointCloud2 &cloud)
+loadCloud (const std::string &filename, pcl::PCLPointCloud2 &cloud)
{
TicToc tt;
print_highlight ("Loading "); print_value ("%s ", filename.c_str ());
}
void
-saveCloud (const string &filename, const pcl::PCLPointCloud2 &output)
+saveCloud (const std::string &filename, const pcl::PCLPointCloud2 &output)
{
TicToc tt;
tt.tic ();
}
else
{
- cloud.width = static_cast<std::uint32_t> (cloud.points.size ());
+ cloud.width = cloud.size ();
cloud.height = 1;
}
pcl::PCDWriter writer;
- PCL_INFO ("Wrote %lu points (%d x %d) to %s\n", cloud.points.size (), cloud.width, cloud.height, fname.c_str ());
+ PCL_INFO("Wrote %zu points (%d x %d) to %s\n",
+ static_cast<std::size_t>(cloud.size()),
+ cloud.width,
+ cloud.height,
+ fname.c_str());
writer.writeBinaryCompressed (fname, cloud);
} // sphere
return (0);
#include <pcl/io/openni_camera/openni_driver.h>
#include <pcl/console/parse.h>
#include <pcl/visualization/boost.h>
-#include <pcl/visualization/mouse_event.h>
#include <boost/algorithm/string.hpp>
#include <mutex>
-#include <vector>
#include <string>
-#include <typeinfo>
-using namespace std;
using namespace std::chrono_literals;
using namespace pcl;
using namespace pcl::console;
print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", (int)occluded_voxels.size ()); print_info (" occluded voxels]\n");
CloudT::Ptr occ_centroids (new CloudT);
- occ_centroids->width = static_cast<int> (occluded_voxels.size ());
+ occ_centroids->width = occluded_voxels.size ();
occ_centroids->height = 1;
occ_centroids->is_dense = false;
occ_centroids->points.resize (occluded_voxels.size ());
point.x = xyz[0];
point.y = xyz[1];
point.z = xyz[2];
- occ_centroids->points[i] = point;
+ (*occ_centroids)[i] = point;
}
CloudT::Ptr cloud_centroids (new CloudT);
- cloud_centroids->width = static_cast<int> (input_cloud->points.size ());
+ cloud_centroids->width = input_cloud->size ();
cloud_centroids->height = 1;
cloud_centroids->is_dense = false;
- cloud_centroids->points.resize (input_cloud->points.size ());
+ cloud_centroids->points.resize (input_cloud->size ());
- for (std::size_t i = 0; i < input_cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < input_cloud->size (); ++i)
{
- float x = input_cloud->points[i].x;
- float y = input_cloud->points[i].y;
- float z = input_cloud->points[i].z;
+ float x = (*input_cloud)[i].x;
+ float y = (*input_cloud)[i].y;
+ float z = (*input_cloud)[i].z;
Eigen::Vector3i c = vg.getGridCoordinates (x, y, z);
Eigen::Vector4f xyz = vg.getCentroidCoordinate (c);
PointT point;
point.x = xyz[0];
point.y = xyz[1];
point.z = xyz[2];
- cloud_centroids->points[i] = point;
+ (*cloud_centroids)[i] = point;
}
// visualization
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
-using namespace std;
using namespace pcl;
using namespace pcl::io;
using namespace pcl::console;
}
bool
-loadCloud (const string &filename, PointCloud<PointXYZ> &cloud)
+loadCloud (const std::string &filename, PointCloud<PointXYZ> &cloud)
{
- ifstream fs;
- fs.open (filename.c_str (), ios::binary);
+ std::ifstream fs;
+ fs.open (filename.c_str (), std::ios::binary);
if (!fs.is_open () || fs.fail ())
{
PCL_ERROR ("Could not open file '%s'! Error : %s\n", filename.c_str (), strerror (errno));
return (false);
}
- string line;
- std::vector<string> st;
+ std::string line;
+ std::vector<std::string> st;
while (!fs.eof ())
{
- getline (fs, line);
+ std::getline (fs, line);
// Ignore empty lines
if (line.empty())
continue;
}
fs.close ();
- cloud.width = std::uint32_t (cloud.size ()); cloud.height = 1; cloud.is_dense = true;
+ cloud.width = cloud.size (); cloud.height = 1; cloud.is_dense = true;
return (true);
}
#pragma once
-#include <pcl/search/search.h>
#include <pcl/search/octree.h>
+#include <pcl/search/search.h>
#include <pcl/tracking/nearest_pair_point_cloud_coherence.h>
-namespace pcl
-{
- namespace tracking
- {
- /** \brief @b ApproxNearestPairPointCloudCoherence computes coherence between two pointclouds using the
- approximate nearest point pairs.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT>
- class ApproxNearestPairPointCloudCoherence: public NearestPairPointCloudCoherence<PointInT>
- {
- public:
- using PointCoherencePtr = typename NearestPairPointCloudCoherence<PointInT>::PointCoherencePtr;
- using PointCloudInConstPtr = typename NearestPairPointCloudCoherence<PointInT>::PointCloudInConstPtr;
- //using NearestPairPointCloudCoherence<PointInT>::search_;
- using NearestPairPointCloudCoherence<PointInT>::maximum_distance_;
- using NearestPairPointCloudCoherence<PointInT>::target_input_;
- using NearestPairPointCloudCoherence<PointInT>::point_coherences_;
- using NearestPairPointCloudCoherence<PointInT>::coherence_name_;
- using NearestPairPointCloudCoherence<PointInT>::new_target_;
- using NearestPairPointCloudCoherence<PointInT>::getClassName;
-
- /** \brief empty constructor */
- ApproxNearestPairPointCloudCoherence () :
- NearestPairPointCloudCoherence<PointInT> (), search_ ()
- {
- coherence_name_ = "ApproxNearestPairPointCloudCoherence";
- }
-
- protected:
- /** \brief This method should get called before starting the actual computation. */
- bool initCompute () override;
-
- /** \brief compute the nearest pairs and compute coherence using point_coherences_ */
- void
- computeCoherence (const PointCloudInConstPtr &cloud, const IndicesConstPtr &indices, float &w_j) override;
- typename pcl::search::Octree<PointInT>::Ptr search_;
- };
+namespace pcl {
+namespace tracking {
+/** \brief @b ApproxNearestPairPointCloudCoherence computes coherence between
+ * two pointclouds using the approximate nearest point pairs.
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT>
+class ApproxNearestPairPointCloudCoherence
+: public NearestPairPointCloudCoherence<PointInT> {
+public:
+ using PointCoherencePtr =
+ typename NearestPairPointCloudCoherence<PointInT>::PointCoherencePtr;
+ using PointCloudInConstPtr =
+ typename NearestPairPointCloudCoherence<PointInT>::PointCloudInConstPtr;
+ // using NearestPairPointCloudCoherence<PointInT>::search_;
+ using NearestPairPointCloudCoherence<PointInT>::maximum_distance_;
+ using NearestPairPointCloudCoherence<PointInT>::target_input_;
+ using NearestPairPointCloudCoherence<PointInT>::point_coherences_;
+ using NearestPairPointCloudCoherence<PointInT>::coherence_name_;
+ using NearestPairPointCloudCoherence<PointInT>::new_target_;
+ using NearestPairPointCloudCoherence<PointInT>::getClassName;
+
+ /** \brief empty constructor */
+ ApproxNearestPairPointCloudCoherence()
+ : NearestPairPointCloudCoherence<PointInT>(), search_()
+ {
+ coherence_name_ = "ApproxNearestPairPointCloudCoherence";
}
-}
+
+protected:
+ /** \brief This method should get called before starting the actual
+ * computation.
+ */
+ bool
+ initCompute() override;
+
+ /** \brief compute the nearest pairs and compute coherence using
+ * point_coherences_
+ */
+ void
+ computeCoherence(const PointCloudInConstPtr& cloud,
+ const IndicesConstPtr& indices,
+ float& w_j) override;
+
+ typename pcl::search::Octree<PointInT>::Ptr search_;
+};
+} // namespace tracking
+} // namespace pcl
#ifdef PCL_NO_PRECOMPILE
#include <pcl/tracking/impl/approx_nearest_pair_point_cloud_coherence.hpp>
#include <pcl/pcl_base.h>
-namespace pcl
-{
+namespace pcl {
+
+namespace tracking {
+
+/** \brief @b PointCoherence is a base class to compute coherence between the
+ * two points.
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT>
+class PointCoherence {
+public:
+ using Ptr = shared_ptr<PointCoherence<PointInT>>;
+ using ConstPtr = shared_ptr<const PointCoherence<PointInT>>;
+
+public:
+ /** \brief empty constructor */
+ PointCoherence() {}
+
+ /** \brief empty distructor */
+ virtual ~PointCoherence() {}
+
+ /** \brief compute coherence from the source point to the target point.
+ * \param source instance of source point.
+ * \param target instance of target point.
+ */
+ inline double
+ compute(PointInT& source, PointInT& target);
+
+protected:
+ /** \brief The coherence name. */
+ std::string coherence_name_;
+
+ /** \brief abstract method to calculate coherence.
+ * \param[in] source instance of source point.
+ * \param[in] target instance of target point.
+ */
+ virtual double
+ computeCoherence(PointInT& source, PointInT& target) = 0;
+
+ /** \brief Get a string representation of the name of this class. */
+ inline const std::string&
+ getClassName() const
+ {
+ return (coherence_name_);
+ }
+};
+
+/** \brief @b PointCloudCoherence is a base class to compute coherence between
+ * the two PointClouds.
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT>
+class PointCloudCoherence {
+public:
+ using Ptr = shared_ptr<PointCloudCoherence<PointInT>>;
+ using ConstPtr = shared_ptr<const PointCloudCoherence<PointInT>>;
+
+ using PointCloudIn = pcl::PointCloud<PointInT>;
+ using PointCloudInPtr = typename PointCloudIn::Ptr;
+ using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
+
+ using PointCoherencePtr = typename PointCoherence<PointInT>::Ptr;
+ /** \brief Constructor. */
+ PointCloudCoherence() : target_input_(), point_coherences_() {}
+
+ /** \brief Destructor. */
+ virtual ~PointCloudCoherence() {}
+
+ /** \brief compute coherence between two pointclouds. */
+ inline void
+ compute(const PointCloudInConstPtr& cloud,
+ const IndicesConstPtr& indices,
+ float& w_i);
+
+ /** \brief get a list of pcl::tracking::PointCoherence.*/
+ inline std::vector<PointCoherencePtr>
+ getPointCoherences()
+ {
+ return point_coherences_;
+ }
+
+ /** \brief set a list of pcl::tracking::PointCoherence.
+ * \param coherences a list of pcl::tracking::PointCoherence.
+ */
+ inline void
+ setPointCoherences(std::vector<PointCoherencePtr> coherences)
+ {
+ point_coherences_ = coherences;
+ }
+
+ /** \brief This method should get called before starting the actual
+ * computation. */
+ virtual bool
+ initCompute();
- namespace tracking
+ /** \brief add a PointCoherence to the PointCloudCoherence.
+ * \param coherence a pointer to PointCoherence.
+ */
+ inline void
+ addPointCoherence(PointCoherencePtr coherence)
{
+ point_coherences_.push_back(coherence);
+ }
- /** \brief @b PointCoherence is a base class to compute coherence between the two points.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT>
- class PointCoherence
- {
- public:
- using Ptr = shared_ptr<PointCoherence<PointInT> >;
- using ConstPtr = shared_ptr<const PointCoherence<PointInT> >;
-
- public:
- /** \brief empty constructor */
- PointCoherence () {}
-
- /** \brief empty distructor */
- virtual ~PointCoherence () {}
-
- /** \brief compute coherence from the source point to the target point.
- * \param source instance of source point.
- * \param target instance of target point.
- */
- inline double
- compute (PointInT &source, PointInT &target);
-
- protected:
-
- /** \brief The coherence name. */
- std::string coherence_name_;
-
- /** \brief abstract method to calculate coherence.
- * \param[in] source instance of source point.
- * \param[in] target instance of target point.
- */
- virtual double
- computeCoherence (PointInT &source, PointInT &target) = 0;
-
- /** \brief Get a string representation of the name of this class. */
- inline const std::string&
- getClassName () const { return (coherence_name_); }
-
- };
-
- /** \brief @b PointCloudCoherence is a base class to compute coherence between the two PointClouds.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT>
- class PointCloudCoherence
- {
- public:
- using Ptr = shared_ptr<PointCloudCoherence<PointInT> >;
- using ConstPtr = shared_ptr<const PointCloudCoherence<PointInT> >;
-
- using PointCloudIn = pcl::PointCloud<PointInT>;
- using PointCloudInPtr = typename PointCloudIn::Ptr;
- using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
-
- using PointCoherencePtr = typename PointCoherence<PointInT>::Ptr;
- /** \brief Constructor. */
- PointCloudCoherence () : target_input_ (), point_coherences_ () {}
-
- /** \brief Destructor. */
- virtual ~PointCloudCoherence () {}
-
- /** \brief compute coherence between two pointclouds. */
- inline void
- compute (const PointCloudInConstPtr &cloud, const IndicesConstPtr &indices,
- float &w_i);
-
- /** \brief get a list of pcl::tracking::PointCoherence.*/
- inline std::vector<PointCoherencePtr>
- getPointCoherences () { return point_coherences_; }
-
- /** \brief set a list of pcl::tracking::PointCoherence.
- * \param coherences a list of pcl::tracking::PointCoherence.
- */
- inline void
- setPointCoherences (std::vector<PointCoherencePtr> coherences) { point_coherences_ = coherences; }
-
- /** \brief This method should get called before starting the actual computation. */
- virtual bool initCompute ();
-
- /** \brief add a PointCoherence to the PointCloudCoherence.
- * \param coherence a pointer to PointCoherence.
- */
- inline void
- addPointCoherence (PointCoherencePtr coherence) { point_coherences_.push_back (coherence); }
-
- /** \brief add a PointCoherence to the PointCloudCoherence.
- * \param cloud a pointer to PointCoherence.
- */
- virtual inline void
- setTargetCloud (const PointCloudInConstPtr &cloud) { target_input_ = cloud; }
-
- protected:
- /** \brief Abstract method to compute coherence. */
- virtual void
- computeCoherence (const PointCloudInConstPtr &cloud, const IndicesConstPtr &indices, float &w_j) = 0;
-
- inline double calcPointCoherence (PointInT &source, PointInT &target);
-
- /** \brief Get a string representation of the name of this class. */
- inline const std::string&
- getClassName () const { return (coherence_name_); }
-
-
- /** \brief The coherence name. */
- std::string coherence_name_;
-
- /** \brief a pointer to target point cloud*/
- PointCloudInConstPtr target_input_;
-
- /** \brief a list of pointers to PointCoherence.*/
- std::vector<PointCoherencePtr> point_coherences_;
- };
-
+ /** \brief add a PointCoherence to the PointCloudCoherence.
+ * \param cloud a pointer to PointCoherence.
+ */
+ virtual inline void
+ setTargetCloud(const PointCloudInConstPtr& cloud)
+ {
+ target_input_ = cloud;
}
-}
+
+protected:
+ /** \brief Abstract method to compute coherence. */
+ virtual void
+ computeCoherence(const PointCloudInConstPtr& cloud,
+ const IndicesConstPtr& indices,
+ float& w_j) = 0;
+
+ inline double
+ calcPointCoherence(PointInT& source, PointInT& target);
+
+ /** \brief Get a string representation of the name of this class. */
+ inline const std::string&
+ getClassName() const
+ {
+ return (coherence_name_);
+ }
+
+ /** \brief The coherence name. */
+ std::string coherence_name_;
+
+ /** \brief a pointer to target point cloud*/
+ PointCloudInConstPtr target_input_;
+
+ /** \brief a list of pointers to PointCoherence.*/
+ std::vector<PointCoherencePtr> point_coherences_;
+};
+
+} // namespace tracking
+} // namespace pcl
#include <pcl/tracking/impl/coherence.hpp>
#pragma once
-#include <pcl/memory.h>
#include <pcl/tracking/coherence.h>
+#include <pcl/memory.h>
+namespace pcl {
+namespace tracking {
+/** \brief @b DistanceCoherence computes coherence between two points from the distance
+ * between them. the coherence is calculated by 1 / (1 + weight * d^2 ).
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT>
+class DistanceCoherence : public PointCoherence<PointInT> {
+public:
+ using Ptr = shared_ptr<DistanceCoherence<PointInT>>;
+ using ConstPtr = shared_ptr<const DistanceCoherence<PointInT>>;
+
+ /** \brief initialize the weight to 1.0. */
+ DistanceCoherence() : PointCoherence<PointInT>(), weight_(1.0) {}
+
+ /** \brief set the weight of coherence.
+ * \param weight the value of the wehgit.
+ */
+ inline void
+ setWeight(double weight)
+ {
+ weight_ = weight;
+ }
-namespace pcl
-{
- namespace tracking
+ /** \brief get the weight of coherence.*/
+ inline double
+ getWeight()
{
- /** \brief @b DistanceCoherence computes coherence between two points from the distance
- between them. the coherence is calculated by 1 / (1 + weight * d^2 ).
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT>
- class DistanceCoherence: public PointCoherence<PointInT>
- {
- public:
-
- using Ptr = shared_ptr<DistanceCoherence<PointInT> >;
- using ConstPtr = shared_ptr<const DistanceCoherence<PointInT>>;
-
- /** \brief initialize the weight to 1.0. */
- DistanceCoherence ()
- : PointCoherence<PointInT> ()
- , weight_ (1.0)
- {}
-
- /** \brief set the weight of coherence.
- * \param weight the value of the wehgit.
- */
- inline void setWeight (double weight) { weight_ = weight; }
-
- /** \brief get the weight of coherence.*/
- inline double getWeight () { return weight_; }
-
- protected:
-
- /** \brief return the distance coherence between the two points.
- * \param source instance of source point.
- * \param target instance of target point.
- */
- double computeCoherence (PointInT &source, PointInT &target) override;
-
- /** \brief the weight of coherence.*/
- double weight_;
- };
+ return weight_;
}
-}
+
+protected:
+ /** \brief return the distance coherence between the two points.
+ * \param source instance of source point.
+ * \param target instance of target point.
+ */
+ double
+ computeCoherence(PointInT& source, PointInT& target) override;
+
+ /** \brief the weight of coherence.*/
+ double weight_;
+};
+} // namespace tracking
+} // namespace pcl
#ifdef PCL_NO_PRECOMPILE
#include <pcl/tracking/impl/distance_coherence.hpp>
#include <pcl/tracking/coherence.h>
-namespace pcl
-{
- namespace tracking
+namespace pcl {
+namespace tracking {
+/** \brief @b HSVColorCoherence computes coherence between the two points from the color
+ * difference between them. the color difference is calculated in HSV color space. the
+ * coherence is calculated by 1 / ( 1 + w * (w_h^2 * h_diff^2 + w_s^2 * s_diff^2 + w_v^2
+ * * v_diff^2))
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT>
+class HSVColorCoherence : public PointCoherence<PointInT> {
+public:
+ using Ptr = shared_ptr<HSVColorCoherence<PointInT>>;
+ using ConstPtr = shared_ptr<const HSVColorCoherence<PointInT>>;
+
+ /** \brief initialize the weights of the computation. weight_, h_weight_, s_weight_
+ * default to 1.0 and v_weight_ defaults to 0.0.
+ */
+ HSVColorCoherence()
+ : PointCoherence<PointInT>()
+ , weight_(1.0)
+ , h_weight_(1.0)
+ , s_weight_(1.0)
+ , v_weight_(0.0)
+ {}
+
+ /** \brief set the weight of coherence
+ * \param[in] weight the weight of coherence.
+ */
+ inline void
+ setWeight(double weight)
+ {
+ weight_ = weight;
+ }
+
+ /** \brief get the weight (w) of coherence */
+ inline double
+ getWeight()
+ {
+ return weight_;
+ }
+
+ /** \brief set the hue weight (w_h) of coherence
+ * \param[in] weight the hue weight (w_h) of coherence.
+ */
+ inline void
+ setHWeight(double weight)
+ {
+ h_weight_ = weight;
+ }
+
+ /** \brief get the hue weight (w_h) of coherence */
+ inline double
+ getHWeight()
+ {
+ return h_weight_;
+ }
+
+ /** \brief set the saturation weight (w_s) of coherence
+ * \param[in] weight the saturation weight (w_s) of coherence.
+ */
+ inline void
+ setSWeight(double weight)
{
- /** \brief @b HSVColorCoherence computes coherence between the two points from
- the color difference between them. the color difference is calculated in HSV color space.
- the coherence is calculated by 1 / ( 1 + w * (w_h^2 * h_diff^2 + w_s^2 * s_diff^2 + w_v^2 * v_diff^2))
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT>
- class HSVColorCoherence: public PointCoherence<PointInT>
- {
- public:
- using Ptr = shared_ptr<HSVColorCoherence<PointInT> >;
- using ConstPtr = shared_ptr<const HSVColorCoherence<PointInT> >;
-
- /** \brief initialize the weights of the computation.
- weight_, h_weight_, s_weight_ default to 1.0 and
- v_weight_ defaults to 0.0.
- */
- HSVColorCoherence ()
- : PointCoherence<PointInT> ()
- , weight_ (1.0)
- , h_weight_ (1.0)
- , s_weight_ (1.0)
- , v_weight_ (0.0)
- {}
-
- /** \brief set the weight of coherence
- * \param[in] weight the weight of coherence.
- */
- inline void
- setWeight (double weight) { weight_ = weight; }
-
- /** \brief get the weight (w) of coherence */
- inline double
- getWeight () { return weight_; }
-
- /** \brief set the hue weight (w_h) of coherence
- * \param[in] weight the hue weight (w_h) of coherence.
- */
- inline void
- setHWeight (double weight) { h_weight_ = weight; }
-
- /** \brief get the hue weight (w_h) of coherence */
- inline double
- getHWeight () { return h_weight_; }
-
- /** \brief set the saturation weight (w_s) of coherence
- * \param[in] weight the saturation weight (w_s) of coherence.
- */
- inline void
- setSWeight (double weight) { s_weight_ = weight; }
-
- /** \brief get the saturation weight (w_s) of coherence */
- inline double
- getSWeight () { return s_weight_; }
-
- /** \brief set the value weight (w_v) of coherence
- * \param[in] weight the value weight (w_v) of coherence.
- */
- inline void
- setVWeight (double weight) { v_weight_ = weight; }
-
- /** \brief get the value weight (w_v) of coherence */
- inline double
- getVWeight () { return v_weight_; }
-
- protected:
-
- /** \brief return the color coherence between the two points.
- * \param[in] source instance of source point.
- * \param[in] target instance of target point.
- */
- double
- computeCoherence (PointInT &source, PointInT &target) override;
-
- /** \brief the weight of coherence (w) */
- double weight_;
-
- /** \brief the hue weight (w_h) */
- double h_weight_;
-
- /** \brief the saturation weight (w_s) */
- double s_weight_;
-
- /** \brief the value weight (w_v) */
- double v_weight_;
-
- };
+ s_weight_ = weight;
}
-}
+
+ /** \brief get the saturation weight (w_s) of coherence */
+ inline double
+ getSWeight()
+ {
+ return s_weight_;
+ }
+
+ /** \brief set the value weight (w_v) of coherence
+ * \param[in] weight the value weight (w_v) of coherence.
+ */
+ inline void
+ setVWeight(double weight)
+ {
+ v_weight_ = weight;
+ }
+
+ /** \brief get the value weight (w_v) of coherence */
+ inline double
+ getVWeight()
+ {
+ return v_weight_;
+ }
+
+protected:
+ /** \brief return the color coherence between the two points.
+ * \param[in] source instance of source point.
+ * \param[in] target instance of target point.
+ */
+ double
+ computeCoherence(PointInT& source, PointInT& target) override;
+
+ /** \brief the weight of coherence (w) */
+ double weight_;
+
+ /** \brief the hue weight (w_h) */
+ double h_weight_;
+
+ /** \brief the saturation weight (w_s) */
+ double s_weight_;
+
+ /** \brief the value weight (w_v) */
+ double v_weight_;
+};
+} // namespace tracking
+} // namespace pcl
#ifdef PCL_NO_PRECOMPILE
#include <pcl/tracking/impl/hsv_color_coherence.hpp>
#include <pcl/search/octree.h>
#include <pcl/tracking/approx_nearest_pair_point_cloud_coherence.h>
-namespace pcl
+namespace pcl {
+namespace tracking {
+template <typename PointInT>
+void
+ApproxNearestPairPointCloudCoherence<PointInT>::computeCoherence(
+ const PointCloudInConstPtr& cloud, const IndicesConstPtr&, float& w)
{
- namespace tracking
- {
- template <typename PointInT> void
- ApproxNearestPairPointCloudCoherence<PointInT>::computeCoherence (
- const PointCloudInConstPtr &cloud, const IndicesConstPtr &, float &w)
- {
- double val = 0.0;
- //for (std::size_t i = 0; i < indices->size (); i++)
- for (std::size_t i = 0; i < cloud->points.size (); i++)
- {
- int k_index = 0;
- float k_distance = 0.0;
- //PointInT input_point = cloud->points[(*indices)[i]];
- PointInT input_point = cloud->points[i];
- search_->approxNearestSearch(input_point, k_index, k_distance);
- if (k_distance < maximum_distance_ * maximum_distance_)
- {
- PointInT target_point = target_input_->points[k_index];
- double coherence_val = 1.0;
- for (std::size_t i = 0; i < point_coherences_.size (); i++)
- {
- PointCoherencePtr coherence = point_coherences_[i];
- double w = coherence->compute (input_point, target_point);
- coherence_val *= w;
- }
- val += coherence_val;
- }
+ double val = 0.0;
+ // for (std::size_t i = 0; i < indices->size (); i++)
+ for (const auto& point : *cloud) {
+ int k_index = 0;
+ float k_distance = 0.0;
+ // PointInT input_point = cloud->points[(*indices)[i]];
+ PointInT input_point = point;
+ search_->approxNearestSearch(input_point, k_index, k_distance);
+ if (k_distance < maximum_distance_ * maximum_distance_) {
+ PointInT target_point = (*target_input_)[k_index];
+ double coherence_val = 1.0;
+ for (std::size_t i = 0; i < point_coherences_.size(); i++) {
+ PointCoherencePtr coherence = point_coherences_[i];
+ double w = coherence->compute(input_point, target_point);
+ coherence_val *= w;
}
- w = - static_cast<float> (val);
+ val += coherence_val;
}
+ }
+ w = -static_cast<float>(val);
+}
- template <typename PointInT> bool
- ApproxNearestPairPointCloudCoherence<PointInT>::initCompute ()
- {
- if (!PointCloudCoherence<PointInT>::initCompute ())
- {
- PCL_ERROR ("[pcl::%s::initCompute] PointCloudCoherence::Init failed.\n", getClassName ().c_str ());
- //deinitCompute ();
- return (false);
- }
-
- // initialize tree
- if (!search_)
- search_.reset (new pcl::search::Octree<PointInT> (0.01));
-
- if (new_target_ && target_input_)
- {
- search_->setInputCloud (target_input_);
- new_target_ = false;
- }
-
- return true;
- }
-
+template <typename PointInT>
+bool
+ApproxNearestPairPointCloudCoherence<PointInT>::initCompute()
+{
+ if (!PointCloudCoherence<PointInT>::initCompute()) {
+ PCL_ERROR("[pcl::%s::initCompute] PointCloudCoherence::Init failed.\n",
+ getClassName().c_str());
+ // deinitCompute ();
+ return (false);
}
+
+ // initialize tree
+ if (!search_)
+ search_.reset(new pcl::search::Octree<PointInT>(0.01));
+
+ if (new_target_ && target_input_) {
+ search_->setInputCloud(target_input_);
+ new_target_ = false;
+ }
+
+ return true;
}
-#define PCL_INSTANTIATE_ApproxNearestPairPointCloudCoherence(T) template class PCL_EXPORTS pcl::tracking::ApproxNearestPairPointCloudCoherence<T>;
+} // namespace tracking
+} // namespace pcl
+
+#define PCL_INSTANTIATE_ApproxNearestPairPointCloudCoherence(T) \
+ template class PCL_EXPORTS pcl::tracking::ApproxNearestPairPointCloudCoherence<T>;
#endif
#include <pcl/console/print.h>
#include <pcl/tracking/coherence.h>
-namespace pcl
+namespace pcl {
+namespace tracking {
+
+template <typename PointInT>
+double
+PointCoherence<PointInT>::compute(PointInT& source, PointInT& target)
+{
+ return computeCoherence(source, target);
+}
+
+template <typename PointInT>
+double
+PointCloudCoherence<PointInT>::calcPointCoherence(PointInT& source, PointInT& target)
+{
+ double val = 0.0;
+ for (std::size_t i = 0; i < point_coherences_.size(); i++) {
+ PointCoherencePtr coherence = point_coherences_[i];
+ double d = std::log(coherence->compute(source, target));
+ // double d = coherence->compute (source, target);
+ if (!std::isnan(d))
+ val += d;
+ else
+ PCL_WARN("nan!\n");
+ }
+ return val;
+}
+
+template <typename PointInT>
+bool
+PointCloudCoherence<PointInT>::initCompute()
+{
+ if (!target_input_ || target_input_->points.empty()) {
+ PCL_ERROR("[pcl::%s::compute] target_input_ is empty!\n", getClassName().c_str());
+ return false;
+ }
+
+ return true;
+}
+
+template <typename PointInT>
+void
+PointCloudCoherence<PointInT>::compute(const PointCloudInConstPtr& cloud,
+ const IndicesConstPtr& indices,
+ float& w)
{
- namespace tracking
- {
-
- template <typename PointInT> double
- PointCoherence<PointInT>::compute (PointInT &source, PointInT &target)
- {
- return computeCoherence (source, target);
- }
-
- template <typename PointInT> double
- PointCloudCoherence<PointInT>::calcPointCoherence (PointInT &source, PointInT &target)
- {
- double val = 0.0;
- for (std::size_t i = 0; i < point_coherences_.size (); i++)
- {
- PointCoherencePtr coherence = point_coherences_[i];
- double d = std::log(coherence->compute (source, target));
- //double d = coherence->compute (source, target);
- if (! std::isnan(d))
- val += d;
- else
- PCL_WARN ("nan!\n");
- }
- return val;
- }
-
- template <typename PointInT> bool
- PointCloudCoherence<PointInT>::initCompute ()
- {
- if (!target_input_ || target_input_->points.empty ())
- {
- PCL_ERROR ("[pcl::%s::compute] target_input_ is empty!\n", getClassName ().c_str ());
- return false;
- }
-
- return true;
-
- }
-
- template <typename PointInT> void
- PointCloudCoherence<PointInT>::compute (const PointCloudInConstPtr &cloud, const IndicesConstPtr &indices, float &w)
- {
- if (!initCompute ())
- {
- PCL_ERROR ("[pcl::%s::compute] Init failed.\n", getClassName ().c_str ());
- return;
- }
- computeCoherence (cloud, indices, w);
- }
+ if (!initCompute()) {
+ PCL_ERROR("[pcl::%s::compute] Init failed.\n", getClassName().c_str());
+ return;
}
+ computeCoherence(cloud, indices, w);
}
+} // namespace tracking
+} // namespace pcl
#endif
#ifndef PCL_TRACKING_IMPL_DISTANCE_COHERENCE_H_
#define PCL_TRACKING_IMPL_DISTANCE_COHERENCE_H_
-#include <Eigen/Dense>
#include <pcl/tracking/distance_coherence.h>
-namespace pcl
+#include <Eigen/Dense>
+
+namespace pcl {
+namespace tracking {
+template <typename PointInT>
+double
+DistanceCoherence<PointInT>::computeCoherence(PointInT& source, PointInT& target)
{
- namespace tracking
- {
- template <typename PointInT> double
- DistanceCoherence<PointInT>::computeCoherence (PointInT &source, PointInT &target)
- {
- Eigen::Vector4f p = source.getVector4fMap ();
- Eigen::Vector4f p_dash = target.getVector4fMap ();
- double d = (p - p_dash).norm ();
- return 1.0 / (1.0 + d * d * weight_);
- }
- }
+ Eigen::Vector4f p = source.getVector4fMap();
+ Eigen::Vector4f p_dash = target.getVector4fMap();
+ double d = (p - p_dash).norm();
+ return 1.0 / (1.0 + d * d * weight_);
}
+} // namespace tracking
+} // namespace pcl
-#define PCL_INSTANTIATE_DistanceCoherence(T) template class PCL_EXPORTS pcl::tracking::DistanceCoherence<T>;
+#define PCL_INSTANTIATE_DistanceCoherence(T) \
+ template class PCL_EXPORTS pcl::tracking::DistanceCoherence<T>;
#endif
#define PCL_TRACKING_IMPL_HSV_COLOR_COHERENCE_H_
#if defined __GNUC__
-# pragma GCC system_header
+#pragma GCC system_header
#endif
#include <pcl/tracking/hsv_color_coherence.h>
+
#include <Eigen/Dense>
-namespace pcl
-{
- namespace tracking
+namespace pcl {
+namespace tracking {
+// utility
+union RGBValue {
+ struct /*anonymous*/
{
- // utility
- union RGBValue
- {
- struct /*anonymous*/
- {
- unsigned char Blue; // Blue channel
- unsigned char Green; // Green channel
- unsigned char Red; // Red channel
- };
- float float_value;
- int int_value;
- };
-
- /** \brief Convert a RGB tuple to an HSV one.
- * \param[in] r the input Red component
- * \param[in] g the input Green component
- * \param[in] b the input Blue component
- * \param[out] fh the output Hue component
- * \param[out] fs the output Saturation component
- * \param[out] fv the output Value component
- */
- void
- RGB2HSV (int r, int g, int b, float& fh, float& fs, float& fv)
- {
- // mostly copied from opencv-svn/modules/imgproc/src/color.cpp
- // revision is 4351
- const int hsv_shift = 12;
-
- static const int div_table[] =
- {
- 0, 1044480, 522240, 348160, 261120, 208896, 174080, 149211,
- 130560, 116053, 104448, 94953, 87040, 80345, 74606, 69632,
- 65280, 61440, 58027, 54973, 52224, 49737, 47476, 45412,
- 43520, 41779, 40172, 38684, 37303, 36017, 34816, 33693,
- 32640, 31651, 30720, 29842, 29013, 28229, 27486, 26782,
- 26112, 25475, 24869, 24290, 23738, 23211, 22706, 22223,
- 21760, 21316, 20890, 20480, 20086, 19707, 19342, 18991,
- 18651, 18324, 18008, 17703, 17408, 17123, 16846, 16579,
- 16320, 16069, 15825, 15589, 15360, 15137, 14921, 14711,
- 14507, 14308, 14115, 13926, 13743, 13565, 13391, 13221,
- 13056, 12895, 12738, 12584, 12434, 12288, 12145, 12006,
- 11869, 11736, 11605, 11478, 11353, 11231, 11111, 10995,
- 10880, 10768, 10658, 10550, 10445, 10341, 10240, 10141,
- 10043, 9947, 9854, 9761, 9671, 9582, 9495, 9410,
- 9326, 9243, 9162, 9082, 9004, 8927, 8852, 8777,
- 8704, 8632, 8561, 8492, 8423, 8356, 8290, 8224,
- 8160, 8097, 8034, 7973, 7913, 7853, 7795, 7737,
- 7680, 7624, 7569, 7514, 7461, 7408, 7355, 7304,
- 7253, 7203, 7154, 7105, 7057, 7010, 6963, 6917,
- 6872, 6827, 6782, 6739, 6695, 6653, 6611, 6569,
- 6528, 6487, 6447, 6408, 6369, 6330, 6292, 6254,
- 6217, 6180, 6144, 6108, 6073, 6037, 6003, 5968,
- 5935, 5901, 5868, 5835, 5803, 5771, 5739, 5708,
- 5677, 5646, 5615, 5585, 5556, 5526, 5497, 5468,
- 5440, 5412, 5384, 5356, 5329, 5302, 5275, 5249,
- 5222, 5196, 5171, 5145, 5120, 5095, 5070, 5046,
- 5022, 4998, 4974, 4950, 4927, 4904, 4881, 4858,
- 4836, 4813, 4791, 4769, 4748, 4726, 4705, 4684,
- 4663, 4642, 4622, 4601, 4581, 4561, 4541, 4522,
- 4502, 4483, 4464, 4445, 4426, 4407, 4389, 4370,
- 4352, 4334, 4316, 4298, 4281, 4263, 4246, 4229,
- 4212, 4195, 4178, 4161, 4145, 4128, 4112, 4096
- };
- int hr = 180, hscale = 15;
- int h, s, v = b;
- int vmin = b, diff;
- int vr, vg;
-
- v = std::max<int> (v, g);
- v = std::max<int> (v, r);
- vmin = std::min<int> (vmin, g);
- vmin = std::min<int> (vmin, r);
-
- diff = v - vmin;
- vr = v == r ? -1 : 0;
- vg = v == g ? -1 : 0;
-
- s = diff * div_table[v] >> hsv_shift;
- h = (vr & (g - b)) +
- (~vr & ((vg & (b - r + 2 * diff))
- + ((~vg) & (r - g + 4 * diff))));
- h = (h * div_table[diff] * hscale +
- (1 << (hsv_shift + 6))) >> (7 + hsv_shift);
-
- h += h < 0 ? hr : 0;
- fh = static_cast<float> (h) / 180.0f;
- fs = static_cast<float> (s) / 255.0f;
- fv = static_cast<float> (v) / 255.0f;
- }
-
- ///////////////////////////////////////////////////////////////////////////////////////////////////////////////
- template <typename PointInT> double
- HSVColorCoherence<PointInT>::computeCoherence (PointInT &source, PointInT &target)
- {
- // convert color space from RGB to HSV
- RGBValue source_rgb, target_rgb;
- source_rgb.int_value = source.rgba;
- target_rgb.int_value = target.rgba;
-
- float source_h, source_s, source_v, target_h, target_s, target_v;
- RGB2HSV (source_rgb.Red, source_rgb.Blue, source_rgb.Green,
- source_h, source_s, source_v);
- RGB2HSV (target_rgb.Red, target_rgb.Blue, target_rgb.Green,
- target_h, target_s, target_v);
- // hue value is in 0 ~ 2pi, but circulated.
- const float _h_diff = std::abs (source_h - target_h);
- // Also need to compute distance other way around circle - but need to check which is closer to 0
- float _h_diff2;
- if (source_h < target_h)
- _h_diff2 = std::abs (1.0f + source_h - target_h); //Add 2pi to source, subtract target
- else
- _h_diff2 = std::abs (1.0f + target_h - source_h); //Add 2pi to target, subtract source
-
- float h_diff;
- //Now we need to choose the smaller distance
- if (_h_diff < _h_diff2)
- h_diff = static_cast<float> (h_weight_) * _h_diff * _h_diff;
- else
- h_diff = static_cast<float> (h_weight_) * _h_diff2 * _h_diff2;
-
- const float s_diff = static_cast<float> (s_weight_) * (source_s - target_s) * (source_s - target_s);
- const float v_diff = static_cast<float> (v_weight_) * (source_v - target_v) * (source_v - target_v);
- const float diff2 = h_diff + s_diff + v_diff;
-
- return (1.0 / (1.0 + weight_ * diff2));
- }
- }
+ unsigned char Blue; // Blue channel
+ unsigned char Green; // Green channel
+ unsigned char Red; // Red channel
+ };
+ float float_value;
+ int int_value;
+};
+
+/** \brief Convert a RGB tuple to an HSV one.
+ * \param[in] r the input Red component
+ * \param[in] g the input Green component
+ * \param[in] b the input Blue component
+ * \param[out] fh the output Hue component
+ * \param[out] fs the output Saturation component
+ * \param[out] fv the output Value component
+ */
+void
+RGB2HSV(int r, int g, int b, float& fh, float& fs, float& fv)
+{
+ // mostly copied from opencv-svn/modules/imgproc/src/color.cpp
+ // revision is 4351
+ const int hsv_shift = 12;
+
+ static const int div_table[] = {
+ 0, 1044480, 522240, 348160, 261120, 208896, 174080, 149211, 130560, 116053,
+ 104448, 94953, 87040, 80345, 74606, 69632, 65280, 61440, 58027, 54973,
+ 52224, 49737, 47476, 45412, 43520, 41779, 40172, 38684, 37303, 36017,
+ 34816, 33693, 32640, 31651, 30720, 29842, 29013, 28229, 27486, 26782,
+ 26112, 25475, 24869, 24290, 23738, 23211, 22706, 22223, 21760, 21316,
+ 20890, 20480, 20086, 19707, 19342, 18991, 18651, 18324, 18008, 17703,
+ 17408, 17123, 16846, 16579, 16320, 16069, 15825, 15589, 15360, 15137,
+ 14921, 14711, 14507, 14308, 14115, 13926, 13743, 13565, 13391, 13221,
+ 13056, 12895, 12738, 12584, 12434, 12288, 12145, 12006, 11869, 11736,
+ 11605, 11478, 11353, 11231, 11111, 10995, 10880, 10768, 10658, 10550,
+ 10445, 10341, 10240, 10141, 10043, 9947, 9854, 9761, 9671, 9582,
+ 9495, 9410, 9326, 9243, 9162, 9082, 9004, 8927, 8852, 8777,
+ 8704, 8632, 8561, 8492, 8423, 8356, 8290, 8224, 8160, 8097,
+ 8034, 7973, 7913, 7853, 7795, 7737, 7680, 7624, 7569, 7514,
+ 7461, 7408, 7355, 7304, 7253, 7203, 7154, 7105, 7057, 7010,
+ 6963, 6917, 6872, 6827, 6782, 6739, 6695, 6653, 6611, 6569,
+ 6528, 6487, 6447, 6408, 6369, 6330, 6292, 6254, 6217, 6180,
+ 6144, 6108, 6073, 6037, 6003, 5968, 5935, 5901, 5868, 5835,
+ 5803, 5771, 5739, 5708, 5677, 5646, 5615, 5585, 5556, 5526,
+ 5497, 5468, 5440, 5412, 5384, 5356, 5329, 5302, 5275, 5249,
+ 5222, 5196, 5171, 5145, 5120, 5095, 5070, 5046, 5022, 4998,
+ 4974, 4950, 4927, 4904, 4881, 4858, 4836, 4813, 4791, 4769,
+ 4748, 4726, 4705, 4684, 4663, 4642, 4622, 4601, 4581, 4561,
+ 4541, 4522, 4502, 4483, 4464, 4445, 4426, 4407, 4389, 4370,
+ 4352, 4334, 4316, 4298, 4281, 4263, 4246, 4229, 4212, 4195,
+ 4178, 4161, 4145, 4128, 4112, 4096};
+ int hr = 180, hscale = 15;
+ int h, s, v = b;
+ int vmin = b, diff;
+ int vr, vg;
+
+ v = std::max<int>(v, g);
+ v = std::max<int>(v, r);
+ vmin = std::min<int>(vmin, g);
+ vmin = std::min<int>(vmin, r);
+
+ diff = v - vmin;
+ vr = v == r ? -1 : 0;
+ vg = v == g ? -1 : 0;
+
+ s = diff * div_table[v] >> hsv_shift;
+ h = (vr & (g - b)) +
+ (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff))));
+ h = (h * div_table[diff] * hscale + (1 << (hsv_shift + 6))) >> (7 + hsv_shift);
+
+ h += h < 0 ? hr : 0;
+ fh = static_cast<float>(h) / 180.0f;
+ fs = static_cast<float>(s) / 255.0f;
+ fv = static_cast<float>(v) / 255.0f;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT>
+double
+HSVColorCoherence<PointInT>::computeCoherence(PointInT& source, PointInT& target)
+{
+ // convert color space from RGB to HSV
+ RGBValue source_rgb, target_rgb;
+ source_rgb.int_value = source.rgba;
+ target_rgb.int_value = target.rgba;
+
+ float source_h, source_s, source_v, target_h, target_s, target_v;
+ RGB2HSV(
+ source_rgb.Red, source_rgb.Blue, source_rgb.Green, source_h, source_s, source_v);
+ RGB2HSV(
+ target_rgb.Red, target_rgb.Blue, target_rgb.Green, target_h, target_s, target_v);
+ // hue value is in 0 ~ 2pi, but circulated.
+ const float _h_diff = std::abs(source_h - target_h);
+ // Also need to compute distance other way around circle - but need to check which is
+ // closer to 0
+ float _h_diff2;
+ if (source_h < target_h)
+ _h_diff2 =
+ std::abs(1.0f + source_h - target_h); // Add 2pi to source, subtract target
+ else
+ _h_diff2 =
+ std::abs(1.0f + target_h - source_h); // Add 2pi to target, subtract source
+
+ float h_diff;
+ // Now we need to choose the smaller distance
+ if (_h_diff < _h_diff2)
+ h_diff = static_cast<float>(h_weight_) * _h_diff * _h_diff;
+ else
+ h_diff = static_cast<float>(h_weight_) * _h_diff2 * _h_diff2;
+
+ const float s_diff =
+ static_cast<float>(s_weight_) * (source_s - target_s) * (source_s - target_s);
+ const float v_diff =
+ static_cast<float>(v_weight_) * (source_v - target_v) * (source_v - target_v);
+ const float diff2 = h_diff + s_diff + v_diff;
+
+ return (1.0 / (1.0 + weight_ * diff2));
}
+} // namespace tracking
+} // namespace pcl
-#define PCL_INSTANTIATE_HSVColorCoherence(T) template class PCL_EXPORTS pcl::tracking::HSVColorCoherence<T>;
+#define PCL_INSTANTIATE_HSVColorCoherence(T) \
+ template class PCL_EXPORTS pcl::tracking::HSVColorCoherence<T>;
#endif
#include <pcl/tracking/kld_adaptive_particle_filter.h>
-template <typename PointInT, typename StateT> bool
-pcl::tracking::KLDAdaptiveParticleFilterTracker<PointInT, StateT>::initCompute ()
+namespace pcl {
+namespace tracking {
+template <typename PointInT, typename StateT>
+bool
+KLDAdaptiveParticleFilterTracker<PointInT, StateT>::initCompute()
{
- if (!Tracker<PointInT, StateT>::initCompute ())
- {
- PCL_ERROR ("[pcl::%s::initCompute] Init failed.\n", getClassName ().c_str ());
+ if (!Tracker<PointInT, StateT>::initCompute()) {
+ PCL_ERROR("[pcl::%s::initCompute] Init failed.\n", getClassName().c_str());
return (false);
}
- if (transed_reference_vector_.empty ())
- {
+ if (transed_reference_vector_.empty()) {
// only one time allocation
- transed_reference_vector_.resize (maximum_particle_number_);
- for (unsigned int i = 0; i < maximum_particle_number_; i++)
- {
- transed_reference_vector_[i] = PointCloudInPtr (new PointCloudIn ());
+ transed_reference_vector_.resize(maximum_particle_number_);
+ for (unsigned int i = 0; i < maximum_particle_number_; i++) {
+ transed_reference_vector_[i] = PointCloudInPtr(new PointCloudIn());
}
}
-
- coherence_->setTargetCloud (input_);
+
+ coherence_->setTargetCloud(input_);
if (!change_detector_)
- change_detector_.reset (new pcl::octree::OctreePointCloudChangeDetector<PointInT> (change_detector_resolution_));
-
- if (!particles_ || particles_->points.empty ())
- initParticles (true);
+ change_detector_.reset(new pcl::octree::OctreePointCloudChangeDetector<PointInT>(
+ change_detector_resolution_));
+
+ if (!particles_ || particles_->points.empty())
+ initParticles(true);
return (true);
}
-template <typename PointInT, typename StateT> bool
-pcl::tracking::KLDAdaptiveParticleFilterTracker<PointInT, StateT>::insertIntoBins
-(std::vector<int> &&new_bin, std::vector<std::vector<int> > &bins)
+template <typename PointInT, typename StateT>
+bool
+KLDAdaptiveParticleFilterTracker<PointInT, StateT>::insertIntoBins(
+ std::vector<int>&& new_bin, std::vector<std::vector<int>>& bins)
{
- for (auto &existing_bin : bins)
- {
- if (equalBin (new_bin, existing_bin))
+ for (auto& existing_bin : bins) {
+ if (equalBin(new_bin, existing_bin))
return false;
}
- bins.push_back (std::move(new_bin));
+ bins.push_back(std::move(new_bin));
return true;
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::KLDAdaptiveParticleFilterTracker<PointInT, StateT>::resample ()
+template <typename PointInT, typename StateT>
+void
+KLDAdaptiveParticleFilterTracker<PointInT, StateT>::resample()
{
unsigned int k = 0;
unsigned int n = 0;
- PointCloudStatePtr S (new PointCloudState);
- std::vector<std::vector<int> > bins;
-
+ PointCloudStatePtr S(new PointCloudState);
+ std::vector<std::vector<int>> bins;
+
// initializing for sampling without replacement
- std::vector<int> a (particles_->points.size ());
- std::vector<double> q (particles_->points.size ());
- this->genAliasTable (a, q, particles_);
-
- const std::vector<double> zero_mean (StateT::stateDimension (), 0.0);
-
+ std::vector<int> a(particles_->size());
+ std::vector<double> q(particles_->size());
+ this->genAliasTable(a, q, particles_);
+
+ const std::vector<double> zero_mean(StateT::stateDimension(), 0.0);
+
// select the particles with KLD sampling
- do
- {
- int j_n = sampleWithReplacement (a, q);
- StateT x_t = particles_->points[j_n];
- x_t.sample (zero_mean, step_noise_covariance_);
-
+ do {
+ int j_n = sampleWithReplacement(a, q);
+ StateT x_t = (*particles_)[j_n];
+ x_t.sample(zero_mean, step_noise_covariance_);
+
// motion
- if (rand () / double (RAND_MAX) < motion_ratio_)
+ if (rand() / double(RAND_MAX) < motion_ratio_)
x_t = x_t + motion_;
-
- S->points.push_back (x_t);
+
+ S->points.push_back(x_t);
// calc bin
- std::vector<int> new_bin (StateT::stateDimension ());
- for (int i = 0; i < StateT::stateDimension (); i++)
- new_bin[i] = static_cast<int> (x_t[i] / bin_size_[i]);
-
+ std::vector<int> new_bin(StateT::stateDimension());
+ for (int i = 0; i < StateT::stateDimension(); i++)
+ new_bin[i] = static_cast<int>(x_t[i] / bin_size_[i]);
+
// calc bin index... how?
- if (insertIntoBins (std::move(new_bin), bins))
+ if (insertIntoBins(std::move(new_bin), bins))
++k;
++n;
- }
- while (n < maximum_particle_number_ && (k < 2 || n < calcKLBound (k)));
-
- particles_ = S; // swap
- particle_num_ = static_cast<int> (particles_->points.size ());
-}
+ } while (n < maximum_particle_number_ && (k < 2 || n < calcKLBound(k)));
+ particles_ = S; // swap
+ particle_num_ = static_cast<int>(particles_->size());
+}
+} // namespace tracking
+} // namespace pcl
-#define PCL_INSTANTIATE_KLDAdaptiveParticleFilterTracker(T,ST) template class PCL_EXPORTS pcl::tracking::KLDAdaptiveParticleFilterTracker<T,ST>;
+#define PCL_INSTANTIATE_KLDAdaptiveParticleFilterTracker(T, ST) \
+ template class PCL_EXPORTS pcl::tracking::KLDAdaptiveParticleFilterTracker<T, ST>;
#endif
#include <pcl/tracking/kld_adaptive_particle_filter_omp.h>
+namespace pcl {
+namespace tracking {
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename StateT> void
-pcl::tracking::KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>::setNumberOfThreads (unsigned int nr_threads)
+template <typename PointInT, typename StateT>
+void
+KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>::setNumberOfThreads(
+ unsigned int nr_threads)
{
if (nr_threads == 0)
#ifdef _OPENMP
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename StateT> void
-pcl::tracking::KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>::weight ()
+template <typename PointInT, typename StateT>
+void
+KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>::weight()
{
- if (!use_normal_)
- {
+ if (!use_normal_) {
+ // clang-format off
#pragma omp parallel for \
default(none) \
num_threads(threads_)
+ // clang-format on
for (int i = 0; i < particle_num_; i++)
- this->computeTransformedPointCloudWithoutNormal (particles_->points[i], *transed_reference_vector_[i]);
-
- PointCloudInPtr coherence_input (new PointCloudIn);
- this->cropInputPointCloud (input_, *coherence_input);
- if (change_counter_ == 0)
- {
+ this->computeTransformedPointCloudWithoutNormal((*particles_)[i],
+ *transed_reference_vector_[i]);
+
+ PointCloudInPtr coherence_input(new PointCloudIn);
+ this->cropInputPointCloud(input_, *coherence_input);
+ if (change_counter_ == 0) {
// test change detector
- if (!use_change_detector_ || this->testChangeDetection (coherence_input))
- {
+ if (!use_change_detector_ || this->testChangeDetection(coherence_input)) {
changed_ = true;
change_counter_ = change_detector_interval_;
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ // clang-format off
#pragma omp parallel for \
default(none) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
IndicesPtr indices;
- coherence_->compute (transed_reference_vector_[i], indices, particles_->points[i].weight);
+ coherence_->compute(
+ transed_reference_vector_[i], indices, (*particles_)[i].weight);
}
}
else
changed_ = false;
}
- else
- {
+ else {
--change_counter_;
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ // clang-format off
#pragma omp parallel for \
default(none) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
IndicesPtr indices;
- coherence_->compute (transed_reference_vector_[i], indices, particles_->points[i].weight);
+ coherence_->compute(
+ transed_reference_vector_[i], indices, (*particles_)[i].weight);
}
}
}
- else
- {
- std::vector<IndicesPtr> indices_list (particle_num_);
- for (int i = 0; i < particle_num_; i++)
- {
- indices_list[i] = IndicesPtr (new std::vector<int>);
+ else {
+ std::vector<IndicesPtr> indices_list(particle_num_);
+ for (int i = 0; i < particle_num_; i++) {
+ indices_list[i] = IndicesPtr(new std::vector<int>);
}
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(indices_list) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
- this->computeTransformedPointCloudWithNormal (particles_->points[i], *indices_list[i], *transed_reference_vector_[i]);
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
+ this->computeTransformedPointCloudWithNormal(
+ (*particles_)[i], *indices_list[i], *transed_reference_vector_[i]);
}
-
- PointCloudInPtr coherence_input (new PointCloudIn);
- this->cropInputPointCloud (input_, *coherence_input);
-
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
+
+ PointCloudInPtr coherence_input(new PointCloudIn);
+ this->cropInputPointCloud(input_, *coherence_input);
+
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(indices_list) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
- coherence_->compute (transed_reference_vector_[i], indices_list[i], particles_->points[i].weight);
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
+ coherence_->compute(
+ transed_reference_vector_[i], indices_list[i], (*particles_)[i].weight);
}
}
-
- normalizeWeight ();
+
+ normalizeWeight();
}
+} // namespace tracking
+} // namespace pcl
-#define PCL_INSTANTIATE_KLDAdaptiveParticleFilterOMPTracker(T,ST) template class PCL_EXPORTS pcl::tracking::KLDAdaptiveParticleFilterOMPTracker<T,ST>;
+#define PCL_INSTANTIATE_KLDAdaptiveParticleFilterOMPTracker(T, ST) \
+ template class PCL_EXPORTS pcl::tracking::KLDAdaptiveParticleFilterOMPTracker<T, ST>;
#endif
#include <pcl/search/organized.h>
#include <pcl/tracking/nearest_pair_point_cloud_coherence.h>
-namespace pcl
+namespace pcl {
+namespace tracking {
+template <typename PointInT>
+void
+NearestPairPointCloudCoherence<PointInT>::computeCoherence(
+ const PointCloudInConstPtr& cloud, const IndicesConstPtr&, float& w)
{
- namespace tracking
- {
- template <typename PointInT> void
- NearestPairPointCloudCoherence<PointInT>::computeCoherence (
- const PointCloudInConstPtr &cloud, const IndicesConstPtr &, float &w)
- {
- double val = 0.0;
- //for (std::size_t i = 0; i < indices->size (); i++)
- for (std::size_t i = 0; i < cloud->points.size (); i++)
- {
- PointInT input_point = cloud->points[i];
- std::vector<int> k_indices(1);
- std::vector<float> k_distances(1);
- search_->nearestKSearch (input_point, 1, k_indices, k_distances);
- int k_index = k_indices[0];
- float k_distance = k_distances[0];
- if (k_distance < maximum_distance_ * maximum_distance_)
- {
- // nearest_targets.push_back (k_index);
- // nearest_inputs.push_back (i);
- PointInT target_point = target_input_->points[k_index];
- double coherence_val = 1.0;
- for (std::size_t i = 0; i < point_coherences_.size (); i++)
- {
- PointCoherencePtr coherence = point_coherences_[i];
- double w = coherence->compute (input_point, target_point);
- coherence_val *= w;
- }
- val += coherence_val;
- }
+ double val = 0.0;
+ // for (std::size_t i = 0; i < indices->size (); i++)
+ for (std::size_t i = 0; i < cloud->size(); i++) {
+ PointInT input_point = (*cloud)[i];
+ std::vector<int> k_indices(1);
+ std::vector<float> k_distances(1);
+ search_->nearestKSearch(input_point, 1, k_indices, k_distances);
+ int k_index = k_indices[0];
+ float k_distance = k_distances[0];
+ if (k_distance < maximum_distance_ * maximum_distance_) {
+ // nearest_targets.push_back (k_index);
+ // nearest_inputs.push_back (i);
+ PointInT target_point = (*target_input_)[k_index];
+ double coherence_val = 1.0;
+ for (std::size_t i = 0; i < point_coherences_.size(); i++) {
+ PointCoherencePtr coherence = point_coherences_[i];
+ double w = coherence->compute(input_point, target_point);
+ coherence_val *= w;
}
- w = - static_cast<float> (val);
- }
-
- template <typename PointInT> bool
- NearestPairPointCloudCoherence<PointInT>::initCompute ()
- {
- if (!PointCloudCoherence<PointInT>::initCompute ())
- {
- PCL_ERROR ("[pcl::%s::initCompute] PointCloudCoherence::Init failed.\n", getClassName ().c_str ());
- //deinitCompute ();
- return (false);
- }
-
- // initialize tree
- if (!search_)
- search_.reset (new pcl::search::KdTree<PointInT> (false));
-
- if (new_target_ && target_input_)
- {
- search_->setInputCloud (target_input_);
- new_target_ = false;
- }
-
- return true;
+ val += coherence_val;
}
}
+ w = -static_cast<float>(val);
+}
+
+template <typename PointInT>
+bool
+NearestPairPointCloudCoherence<PointInT>::initCompute()
+{
+ if (!PointCloudCoherence<PointInT>::initCompute()) {
+ PCL_ERROR("[pcl::%s::initCompute] PointCloudCoherence::Init failed.\n",
+ getClassName().c_str());
+ // deinitCompute ();
+ return (false);
+ }
+
+ // initialize tree
+ if (!search_)
+ search_.reset(new pcl::search::KdTree<PointInT>(false));
+
+ if (new_target_ && target_input_) {
+ search_->setInputCloud(target_input_);
+ new_target_ = false;
+ }
+
+ return true;
}
+} // namespace tracking
+} // namespace pcl
-#define PCL_INSTANTIATE_NearestPairPointCloudCoherence(T) template class PCL_EXPORTS pcl::tracking::NearestPairPointCloudCoherence<T>;
+#define PCL_INSTANTIATE_NearestPairPointCloudCoherence(T) \
+ template class PCL_EXPORTS pcl::tracking::NearestPairPointCloudCoherence<T>;
#endif
#include <pcl/console/print.h>
#include <pcl/tracking/normal_coherence.h>
-template <typename PointInT> double
-pcl::tracking::NormalCoherence<PointInT>::computeCoherence (PointInT &source, PointInT &target)
+namespace pcl {
+namespace tracking {
+template <typename PointInT>
+double
+NormalCoherence<PointInT>::computeCoherence(PointInT& source, PointInT& target)
{
- Eigen::Vector4f n = source.getNormalVector4fMap ();
- Eigen::Vector4f n_dash = target.getNormalVector4fMap ();
- if ( n.norm () <= 1e-5 || n_dash.norm () <= 1e-5 )
- {
- PCL_ERROR("norm might be ZERO!\n");
- std::cout << "source: " << source << std::endl;
- std::cout << "target: " << target << std::endl;
- exit (1);
- return 0.0;
- }
- n.normalize ();
- n_dash.normalize ();
- double theta = pcl::getAngle3D (n, n_dash);
- if (!std::isnan (theta))
- return 1.0 / (1.0 + weight_ * theta * theta);
+ Eigen::Vector4f n = source.getNormalVector4fMap();
+ Eigen::Vector4f n_dash = target.getNormalVector4fMap();
+ if (n.norm() <= 1e-5 || n_dash.norm() <= 1e-5) {
+ PCL_ERROR("norm might be ZERO!\n");
+ std::cout << "source: " << source << std::endl;
+ std::cout << "target: " << target << std::endl;
+ exit(1);
return 0.0;
+ }
+ n.normalize();
+ n_dash.normalize();
+ double theta = pcl::getAngle3D(n, n_dash);
+ if (!std::isnan(theta))
+ return 1.0 / (1.0 + weight_ * theta * theta);
+ return 0.0;
}
+} // namespace tracking
+} // namespace pcl
-
-#define PCL_INSTANTIATE_NormalCoherence(T) template class PCL_EXPORTS pcl::tracking::NormalCoherence<T>;
+#define PCL_INSTANTIATE_NormalCoherence(T) \
+ template class PCL_EXPORTS pcl::tracking::NormalCoherence<T>;
#endif
#ifndef PCL_TRACKING_IMPL_PARTICLE_FILTER_H_
#define PCL_TRACKING_IMPL_PARTICLE_FILTER_H_
-#include <random>
-
#include <pcl/common/common.h>
-#include <pcl/common/eigen.h>
#include <pcl/common/transforms.h>
#include <pcl/tracking/particle_filter.h>
-template <typename PointInT, typename StateT> bool
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::initCompute ()
+#include <random>
+
+namespace pcl {
+namespace tracking {
+template <typename PointInT, typename StateT>
+bool
+ParticleFilterTracker<PointInT, StateT>::initCompute()
{
- if (!Tracker<PointInT, StateT>::initCompute ())
- {
- PCL_ERROR ("[pcl::%s::initCompute] Init failed.\n", getClassName ().c_str ());
+ if (!Tracker<PointInT, StateT>::initCompute()) {
+ PCL_ERROR("[pcl::%s::initCompute] Init failed.\n", getClassName().c_str());
return (false);
}
- if (transed_reference_vector_.empty ())
- {
+ if (transed_reference_vector_.empty()) {
// only one time allocation
- transed_reference_vector_.resize (particle_num_);
- for (int i = 0; i < particle_num_; i++)
- {
- transed_reference_vector_[i] = PointCloudInPtr (new PointCloudIn ());
+ transed_reference_vector_.resize(particle_num_);
+ for (int i = 0; i < particle_num_; i++) {
+ transed_reference_vector_[i] = PointCloudInPtr(new PointCloudIn());
}
}
- coherence_->setTargetCloud (input_);
+ coherence_->setTargetCloud(input_);
if (!change_detector_)
- change_detector_.reset (new pcl::octree::OctreePointCloudChangeDetector<PointInT> (change_detector_resolution_));
-
- if (!particles_ || particles_->points.empty ())
- initParticles (true);
+ change_detector_.reset(new pcl::octree::OctreePointCloudChangeDetector<PointInT>(
+ change_detector_resolution_));
+
+ if (!particles_ || particles_->points.empty())
+ initParticles(true);
return (true);
}
-template <typename PointInT, typename StateT> int
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::sampleWithReplacement
-(const std::vector<int>& a, const std::vector<double>& q)
+template <typename PointInT, typename StateT>
+int
+ParticleFilterTracker<PointInT, StateT>::sampleWithReplacement(
+ const std::vector<int>& a, const std::vector<double>& q)
{
- static std::mt19937 rng([] { std::random_device rd; return rd(); } ());
- std::uniform_real_distribution<> rd (0.0, 1.0);
+ static std::mt19937 rng{std::random_device{}()};
+ std::uniform_real_distribution<> rd(0.0, 1.0);
- double rU = rd (rng) * static_cast<double> (particles_->points.size ());
- int k = static_cast<int> (rU);
- rU -= k; /* rU - [rU] */
- if ( rU < q[k] )
+ double rU = rd(rng) * static_cast<double>(particles_->size());
+ int k = static_cast<int>(rU);
+ rU -= k; /* rU - [rU] */
+ if (rU < q[k])
return k;
return a[k];
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::genAliasTable (std::vector<int> &a, std::vector<double> &q,
- const PointCloudStateConstPtr &particles)
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::genAliasTable(
+ std::vector<int>& a,
+ std::vector<double>& q,
+ const PointCloudStateConstPtr& particles)
{
/* generate an alias table, a and q */
- std::vector<int> HL (particles->points.size ());
- std::vector<int>::iterator H = HL.begin ();
- std::vector<int>::iterator L = HL.end () - 1;
- std::size_t num = particles->points.size ();
- for ( std::size_t i = 0; i < num; i++ )
- q[i] = particles->points[i].weight * static_cast<float> (num);
- for ( std::size_t i = 0; i < num; i++ )
- a[i] = static_cast<int> (i);
+ std::vector<int> HL(particles->size());
+ std::vector<int>::iterator H = HL.begin();
+ std::vector<int>::iterator L = HL.end() - 1;
+ const auto num = particles->size();
+ for (std::size_t i = 0; i < num; i++)
+ q[i] = (*particles)[i].weight * static_cast<float>(num);
+ for (std::size_t i = 0; i < num; i++)
+ a[i] = static_cast<int>(i);
// setup H and L
- for ( std::size_t i = 0; i < num; i++ )
- if ( q[i] >= 1.0 )
- *H++ = static_cast<int> (i);
+ for (std::size_t i = 0; i < num; i++)
+ if (q[i] >= 1.0)
+ *H++ = static_cast<int>(i);
else
- *L-- = static_cast<int> (i);
-
- while ( H != HL.begin() && L != HL.end() - 1 )
- {
+ *L-- = static_cast<int>(i);
+
+ while (H != HL.begin() && L != HL.end() - 1) {
int j = *(L + 1);
int k = *(H - 1);
a[j] = k;
q[k] += q[j] - 1;
++L;
- if ( q[k] < 1.0 )
- {
+ if (q[k] < 1.0) {
*L-- = k;
--H;
}
}
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::initParticles (bool reset)
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::initParticles(bool reset)
{
- particles_.reset (new PointCloudState ());
- if (reset)
- {
- representative_state_.zero ();
- StateT offset = StateT::toState (trans_);
+ particles_.reset(new PointCloudState());
+ if (reset) {
+ representative_state_.zero();
+ StateT offset = StateT::toState(trans_);
representative_state_ = offset;
- representative_state_.weight = 1.0f / static_cast<float> (particle_num_);
+ representative_state_.weight = 1.0f / static_cast<float>(particle_num_);
}
// sampling...
- for ( int i = 0; i < particle_num_; i++ )
- {
+ for (int i = 0; i < particle_num_; i++) {
StateT p;
- p.zero ();
- p.sample (initial_noise_mean_, initial_noise_covariance_);
+ p.zero();
+ p.sample(initial_noise_mean_, initial_noise_covariance_);
p = p + representative_state_;
- p.weight = 1.0f / static_cast<float> (particle_num_);
- particles_->points.push_back (p); // update
+ p.weight = 1.0f / static_cast<float>(particle_num_);
+ particles_->points.push_back(p); // update
}
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::normalizeWeight ()
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::normalizeWeight()
{
- // apply exponential function
- double w_min = std::numeric_limits<double>::max ();
- double w_max = - std::numeric_limits<double>::max ();
- for ( std::size_t i = 0; i < particles_->points.size (); i++ )
- {
- double weight = particles_->points[i].weight;
- if (w_min > weight)
- w_min = weight;
- if (weight != 0.0 && w_max < weight)
- w_max = weight;
- }
-
- fit_ratio_ = w_min;
- if (w_max != w_min)
- {
- for ( std::size_t i = 0; i < particles_->points.size (); i++ )
- {
- if (particles_->points[i].weight != 0.0)
- {
- particles_->points[i].weight = static_cast<float> (normalizeParticleWeight (particles_->points[i].weight, w_min, w_max));
- }
+ // apply exponential function
+ double w_min = std::numeric_limits<double>::max();
+ double w_max = -std::numeric_limits<double>::max();
+ for (const auto& point : *particles_) {
+ double weight = point.weight;
+ if (w_min > weight)
+ w_min = weight;
+ if (weight != 0.0 && w_max < weight)
+ w_max = weight;
+ }
+
+ fit_ratio_ = w_min;
+ if (w_max != w_min) {
+ for (auto& point : *particles_) {
+ if (point.weight != 0.0) {
+ point.weight =
+ static_cast<float>(normalizeParticleWeight(point.weight, w_min, w_max));
}
}
- else
- {
- for ( std::size_t i = 0; i < particles_->points.size (); i++ )
- particles_->points[i].weight = 1.0f / static_cast<float> (particles_->points.size ());
- }
-
- double sum = 0.0;
- for ( std::size_t i = 0; i < particles_->points.size (); i++ )
- {
- sum += particles_->points[i].weight;
- }
-
- if (sum != 0.0)
- {
- for ( std::size_t i = 0; i < particles_->points.size (); i++ )
- particles_->points[i].weight /= static_cast<float> (sum);
- }
- else
- {
- for ( std::size_t i = 0; i < particles_->points.size (); i++ )
- particles_->points[i].weight = 1.0f / static_cast<float> (particles_->points.size ());
- }
+ }
+ else {
+ for (auto& point : *particles_)
+ point.weight = 1.0f / static_cast<float>(particles_->size());
+ }
+
+ double sum = 0.0;
+ for (const auto& point : *particles_) {
+ sum += point.weight;
+ }
+
+ if (sum != 0.0) {
+ for (auto& point : *particles_)
+ point.weight /= static_cast<float>(sum);
+ }
+ else {
+ for (auto& point : *particles_)
+ point.weight = 1.0f / static_cast<float>(particles_->size());
+ }
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::cropInputPointCloud (
- const PointCloudInConstPtr &, PointCloudIn &output)
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::cropInputPointCloud(
+ const PointCloudInConstPtr&, PointCloudIn& output)
{
double x_min, y_min, z_min, x_max, y_max, z_max;
- calcBoundingBox (x_min, x_max, y_min, y_max, z_min, z_max);
- pass_x_.setFilterLimits (float (x_min), float (x_max));
- pass_y_.setFilterLimits (float (y_min), float (y_max));
- pass_z_.setFilterLimits (float (z_min), float (z_max));
-
+ calcBoundingBox(x_min, x_max, y_min, y_max, z_min, z_max);
+ pass_x_.setFilterLimits(float(x_min), float(x_max));
+ pass_y_.setFilterLimits(float(y_min), float(y_max));
+ pass_z_.setFilterLimits(float(z_min), float(z_max));
+
// x
- PointCloudInPtr xcloud (new PointCloudIn);
- pass_x_.setInputCloud (input_);
- pass_x_.filter (*xcloud);
+ PointCloudInPtr xcloud(new PointCloudIn);
+ pass_x_.setInputCloud(input_);
+ pass_x_.filter(*xcloud);
// y
- PointCloudInPtr ycloud (new PointCloudIn);
- pass_y_.setInputCloud (xcloud);
- pass_y_.filter (*ycloud);
+ PointCloudInPtr ycloud(new PointCloudIn);
+ pass_y_.setInputCloud(xcloud);
+ pass_y_.filter(*ycloud);
// z
- pass_z_.setInputCloud (ycloud);
- pass_z_.filter (output);
+ pass_z_.setInputCloud(ycloud);
+ pass_z_.filter(output);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::calcBoundingBox (
- double &x_min, double &x_max, double &y_min, double &y_max, double &z_min, double &z_max)
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::calcBoundingBox(double& x_min,
+ double& x_max,
+ double& y_min,
+ double& y_max,
+ double& z_min,
+ double& z_max)
{
- x_min = y_min = z_min = std::numeric_limits<double>::max ();
- x_max = y_max = z_max = - std::numeric_limits<double>::max ();
-
- for (std::size_t i = 0; i < transed_reference_vector_.size (); i++)
- {
+ x_min = y_min = z_min = std::numeric_limits<double>::max();
+ x_max = y_max = z_max = -std::numeric_limits<double>::max();
+
+ for (std::size_t i = 0; i < transed_reference_vector_.size(); i++) {
PointCloudInPtr target = transed_reference_vector_[i];
PointInT Pmin, Pmax;
- pcl::getMinMax3D (*target, Pmin, Pmax);
+ pcl::getMinMax3D(*target, Pmin, Pmax);
if (x_min > Pmin.x)
x_min = Pmin.x;
if (x_max < Pmax.x)
}
}
-template <typename PointInT, typename StateT> bool
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::testChangeDetection
-(const PointCloudInConstPtr &input)
+template <typename PointInT, typename StateT>
+bool
+ParticleFilterTracker<PointInT, StateT>::testChangeDetection(
+ const PointCloudInConstPtr& input)
{
- change_detector_->setInputCloud (input);
- change_detector_->addPointsFromInputCloud ();
+ change_detector_->setInputCloud(input);
+ change_detector_->addPointsFromInputCloud();
std::vector<int> newPointIdxVector;
- change_detector_->getPointIndicesFromNewVoxels (newPointIdxVector, change_detector_filter_);
- change_detector_->switchBuffers ();
- return !newPointIdxVector.empty ();
+ change_detector_->getPointIndicesFromNewVoxels(newPointIdxVector,
+ change_detector_filter_);
+ change_detector_->switchBuffers();
+ return !newPointIdxVector.empty();
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::weight ()
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::weight()
{
- if (!use_normal_)
- {
- for (std::size_t i = 0; i < particles_->points.size (); i++)
- {
- computeTransformedPointCloudWithoutNormal (particles_->points[i], *transed_reference_vector_[i]);
+ if (!use_normal_) {
+ for (std::size_t i = 0; i < particles_->size(); i++) {
+ computeTransformedPointCloudWithoutNormal((*particles_)[i],
+ *transed_reference_vector_[i]);
}
-
- PointCloudInPtr coherence_input (new PointCloudIn);
- cropInputPointCloud (input_, *coherence_input);
-
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
- for (std::size_t i = 0; i < particles_->points.size (); i++)
- {
+
+ PointCloudInPtr coherence_input(new PointCloudIn);
+ cropInputPointCloud(input_, *coherence_input);
+
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ for (std::size_t i = 0; i < particles_->size(); i++) {
IndicesPtr indices;
- coherence_->compute (transed_reference_vector_[i], indices, particles_->points[i].weight);
+ coherence_->compute(
+ transed_reference_vector_[i], indices, (*particles_)[i].weight);
}
}
- else
- {
- for (std::size_t i = 0; i < particles_->points.size (); i++)
- {
- IndicesPtr indices (new std::vector<int>);
- computeTransformedPointCloudWithNormal (particles_->points[i], *indices, *transed_reference_vector_[i]);
+ else {
+ for (std::size_t i = 0; i < particles_->size(); i++) {
+ IndicesPtr indices(new std::vector<int>);
+ computeTransformedPointCloudWithNormal(
+ (*particles_)[i], *indices, *transed_reference_vector_[i]);
}
-
- PointCloudInPtr coherence_input (new PointCloudIn);
- cropInputPointCloud (input_, *coherence_input);
-
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
- for (std::size_t i = 0; i < particles_->points.size (); i++)
- {
- IndicesPtr indices (new std::vector<int>);
- coherence_->compute (transed_reference_vector_[i], indices, particles_->points[i].weight);
+
+ PointCloudInPtr coherence_input(new PointCloudIn);
+ cropInputPointCloud(input_, *coherence_input);
+
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ for (std::size_t i = 0; i < particles_->size(); i++) {
+ IndicesPtr indices(new std::vector<int>);
+ coherence_->compute(
+ transed_reference_vector_[i], indices, (*particles_)[i].weight);
}
}
-
- normalizeWeight ();
+
+ normalizeWeight();
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloud
-(const StateT& hypothesis, std::vector<int>& indices, PointCloudIn &cloud)
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloud(
+ const StateT& hypothesis, std::vector<int>& indices, PointCloudIn& cloud)
{
if (use_normal_)
- computeTransformedPointCloudWithNormal (hypothesis, indices, cloud);
+ computeTransformedPointCloudWithNormal(hypothesis, indices, cloud);
else
- computeTransformedPointCloudWithoutNormal (hypothesis, cloud);
+ computeTransformedPointCloudWithoutNormal(hypothesis, cloud);
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloudWithoutNormal
-(const StateT& hypothesis, PointCloudIn &cloud)
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloudWithoutNormal(
+ const StateT& hypothesis, PointCloudIn& cloud)
{
- const Eigen::Affine3f trans = toEigenMatrix (hypothesis);
+ const Eigen::Affine3f trans = toEigenMatrix(hypothesis);
// destructively assigns to cloud
- pcl::transformPointCloud<PointInT> (*ref_, cloud, trans);
+ pcl::transformPointCloud<PointInT>(*ref_, cloud, trans);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloudWithNormal (
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::computeTransformedPointCloudWithNormal(
#ifdef PCL_TRACKING_NORMAL_SUPPORTED
- const StateT& hypothesis, std::vector<int>& indices, PointCloudIn &cloud)
+ const StateT& hypothesis, std::vector<int>& indices, PointCloudIn& cloud)
#else
- const StateT&, std::vector<int>&, PointCloudIn &)
+ const StateT&, std::vector<int>&, PointCloudIn&)
#endif
{
#ifdef PCL_TRACKING_NORMAL_SUPPORTED
- const Eigen::Affine3f trans = toEigenMatrix (hypothesis);
+ const Eigen::Affine3f trans = toEigenMatrix(hypothesis);
// destructively assigns to cloud
- pcl::transformPointCloudWithNormals<PointInT> (*ref_, cloud, trans);
- for ( std::size_t i = 0; i < cloud.points.size (); i++ )
- {
- PointInT input_point = cloud.points[i];
+ pcl::transformPointCloudWithNormals<PointInT>(*ref_, cloud, trans);
+ for (std::size_t i = 0; i < cloud.size(); i++) {
+ PointInT input_point = cloud[i];
- if (!std::isfinite(input_point.x) || !std::isfinite(input_point.y) || !std::isfinite(input_point.z))
+ if (!std::isfinite(input_point.x) || !std::isfinite(input_point.y) ||
+ !std::isfinite(input_point.z))
continue;
// take occlusion into account
- Eigen::Vector4f p = input_point.getVector4fMap ();
- Eigen::Vector4f n = input_point.getNormalVector4fMap ();
- double theta = pcl::getAngle3D (p, n);
- if ( theta > occlusion_angle_thr_ )
- indices.push_back (i);
+ Eigen::Vector4f p = input_point.getVector4fMap();
+ Eigen::Vector4f n = input_point.getNormalVector4fMap();
+ double theta = pcl::getAngle3D(p, n);
+ if (theta > occlusion_angle_thr_)
+ indices.push_back(i);
}
#else
- PCL_WARN ("[pcl::%s::computeTransformedPointCloudWithoutNormal] use_normal_ == true is not supported in this Point Type.",
- getClassName ().c_str ());
+ PCL_WARN("[pcl::%s::computeTransformedPointCloudWithoutNormal] "
+ "use_normal_ == true is not supported in this Point Type.",
+ getClassName().c_str());
#endif
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::resample ()
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::resample()
{
- resampleWithReplacement ();
+ resampleWithReplacement();
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::resampleWithReplacement ()
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::resampleWithReplacement()
{
- std::vector<int> a (particles_->points.size ());
- std::vector<double> q (particles_->points.size ());
- genAliasTable (a, q, particles_);
-
- const std::vector<double> zero_mean (StateT::stateDimension (), 0.0);
+ std::vector<int> a(particles_->size());
+ std::vector<double> q(particles_->size());
+ genAliasTable(a, q, particles_);
+
+ const std::vector<double> zero_mean(StateT::stateDimension(), 0.0);
// memoize the original list of particles
PointCloudStatePtr origparticles = particles_;
- particles_->points.clear ();
+ particles_->points.clear();
// the first particle, it is a just copy of the maximum result
StateT p = representative_state_;
- particles_->points.push_back (p);
-
+ particles_->points.push_back(p);
+
// with motion
- int motion_num = static_cast<int> (particles_->points.size ()) * static_cast<int> (motion_ratio_);
- for ( int i = 1; i < motion_num; i++ )
- {
- int target_particle_index = sampleWithReplacement (a, q);
- StateT p = origparticles->points[target_particle_index];
+ int motion_num =
+ static_cast<int>(particles_->size()) * static_cast<int>(motion_ratio_);
+ for (int i = 1; i < motion_num; i++) {
+ int target_particle_index = sampleWithReplacement(a, q);
+ StateT p = (*origparticles)[target_particle_index];
// add noise using gaussian
- p.sample (zero_mean, step_noise_covariance_);
+ p.sample(zero_mean, step_noise_covariance_);
p = p + motion_;
- particles_->points.push_back (p);
+ particles_->points.push_back(p);
}
-
+
// no motion
- for ( int i = motion_num; i < particle_num_; i++ )
- {
- int target_particle_index = sampleWithReplacement (a, q);
- StateT p = origparticles->points[target_particle_index];
+ for (int i = motion_num; i < particle_num_; i++) {
+ int target_particle_index = sampleWithReplacement(a, q);
+ StateT p = (*origparticles)[target_particle_index];
// add noise using gaussian
- p.sample (zero_mean, step_noise_covariance_);
- particles_->points.push_back (p);
+ p.sample(zero_mean, step_noise_covariance_);
+ particles_->points.push_back(p);
}
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::update ()
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::update()
{
-
+
StateT orig_representative = representative_state_;
- representative_state_.zero ();
+ representative_state_.zero();
representative_state_.weight = 0.0;
- for ( std::size_t i = 0; i < particles_->points.size (); i++)
- {
- StateT p = particles_->points[i];
+ for (const auto& p : *particles_) {
representative_state_ = representative_state_ + p * p.weight;
}
- representative_state_.weight = 1.0f / static_cast<float> (particles_->points.size ());
+ representative_state_.weight = 1.0f / static_cast<float>(particles_->size());
motion_ = representative_state_ - orig_representative;
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterTracker<PointInT, StateT>::computeTracking ()
+template <typename PointInT, typename StateT>
+void
+ParticleFilterTracker<PointInT, StateT>::computeTracking()
{
-
- for (int i = 0; i < iteration_num_; i++)
- {
- if (changed_)
- {
- resample ();
+
+ for (int i = 0; i < iteration_num_; i++) {
+ if (changed_) {
+ resample();
}
-
- weight (); // likelihood is called in it
-
- if (changed_)
- {
- update ();
+
+ weight(); // likelihood is called in it
+
+ if (changed_) {
+ update();
}
}
-
+
// if ( getResult ().weight < resample_likelihood_thr_ )
// {
// PCL_WARN ("too small likelihood, re-initializing...\n");
// initParticles (false);
// }
}
+} // namespace tracking
+} // namespace pcl
-#define PCL_INSTANTIATE_ParticleFilterTracker(T,ST) template class PCL_EXPORTS pcl::tracking::ParticleFilterTracker<T,ST>;
+#define PCL_INSTANTIATE_ParticleFilterTracker(T, ST) \
+ template class PCL_EXPORTS pcl::tracking::ParticleFilterTracker<T, ST>;
-#endif
+#endif
#include <pcl/tracking/particle_filter_omp.h>
+namespace pcl {
+namespace tracking {
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterOMPTracker<PointInT, StateT>::setNumberOfThreads (unsigned int nr_threads)
+template <typename PointInT, typename StateT>
+void
+ParticleFilterOMPTracker<PointInT, StateT>::setNumberOfThreads(unsigned int nr_threads)
{
if (nr_threads == 0)
#ifdef _OPENMP
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename StateT> void
-pcl::tracking::ParticleFilterOMPTracker<PointInT, StateT>::weight ()
+template <typename PointInT, typename StateT>
+void
+ParticleFilterOMPTracker<PointInT, StateT>::weight()
{
- if (!use_normal_)
- {
+ if (!use_normal_) {
+ // clang-format off
#pragma omp parallel for \
default(none) \
num_threads(threads_)
+ // clang-format on
for (int i = 0; i < particle_num_; i++)
- this->computeTransformedPointCloudWithoutNormal (particles_->points[i], *transed_reference_vector_[i]);
-
- PointCloudInPtr coherence_input (new PointCloudIn);
- this->cropInputPointCloud (input_, *coherence_input);
- if (change_counter_ == 0)
- {
+ this->computeTransformedPointCloudWithoutNormal((*particles_)[i],
+ *transed_reference_vector_[i]);
+
+ PointCloudInPtr coherence_input(new PointCloudIn);
+ this->cropInputPointCloud(input_, *coherence_input);
+ if (change_counter_ == 0) {
// test change detector
- if (!use_change_detector_ || this->testChangeDetection (coherence_input))
- {
+ if (!use_change_detector_ || this->testChangeDetection(coherence_input)) {
changed_ = true;
change_counter_ = change_detector_interval_;
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ // clang-format off
#pragma omp parallel for \
default(none) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
- IndicesPtr indices; // dummy
- coherence_->compute (transed_reference_vector_[i], indices, particles_->points[i].weight);
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
+ IndicesPtr indices; // dummy
+ coherence_->compute(
+ transed_reference_vector_[i], indices, (*particles_)[i].weight);
}
}
else
changed_ = false;
}
- else
- {
+ else {
--change_counter_;
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ // clang-format off
#pragma omp parallel for \
default(none) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
- IndicesPtr indices; // dummy
- coherence_->compute (transed_reference_vector_[i], indices, particles_->points[i].weight);
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
+ IndicesPtr indices; // dummy
+ coherence_->compute(
+ transed_reference_vector_[i], indices, (*particles_)[i].weight);
}
}
}
- else
- {
- std::vector<IndicesPtr> indices_list (particle_num_);
- for (int i = 0; i < particle_num_; i++)
- {
- indices_list[i] = IndicesPtr (new std::vector<int>);
+ else {
+ std::vector<IndicesPtr> indices_list(particle_num_);
+ for (int i = 0; i < particle_num_; i++) {
+ indices_list[i] = IndicesPtr(new std::vector<int>);
}
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(indices_list) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
- this->computeTransformedPointCloudWithNormal (particles_->points[i], *indices_list[i], *transed_reference_vector_[i]);
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
+ this->computeTransformedPointCloudWithNormal(
+ (*particles_)[i], *indices_list[i], *transed_reference_vector_[i]);
}
-
- PointCloudInPtr coherence_input (new PointCloudIn);
- this->cropInputPointCloud (input_, *coherence_input);
-
- coherence_->setTargetCloud (coherence_input);
- coherence_->initCompute ();
+
+ PointCloudInPtr coherence_input(new PointCloudIn);
+ this->cropInputPointCloud(input_, *coherence_input);
+
+ coherence_->setTargetCloud(coherence_input);
+ coherence_->initCompute();
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(indices_list) \
num_threads(threads_)
- for (int i = 0; i < particle_num_; i++)
- {
- coherence_->compute (transed_reference_vector_[i], indices_list[i], particles_->points[i].weight);
+ // clang-format on
+ for (int i = 0; i < particle_num_; i++) {
+ coherence_->compute(
+ transed_reference_vector_[i], indices_list[i], (*particles_)[i].weight);
}
}
-
- normalizeWeight ();
+
+ normalizeWeight();
}
+} // namespace tracking
+} // namespace pcl
-#define PCL_INSTANTIATE_ParticleFilterOMPTracker(T,ST) template class PCL_EXPORTS pcl::tracking::ParticleFilterOMPTracker<T,ST>;
+#define PCL_INSTANTIATE_ParticleFilterOMPTracker(T, ST) \
+ template class PCL_EXPORTS pcl::tracking::ParticleFilterOMPTracker<T, ST>;
#endif
#ifndef PCL_TRACKING_IMPL_PYRAMIDAL_KLT_HPP
#define PCL_TRACKING_IMPL_PYRAMIDAL_KLT_HPP
-#include <pcl/common/time.h>
-#include <pcl/common/utils.h>
#include <pcl/common/io.h>
+#include <pcl/common/time.h>
#include <pcl/common/utils.h>
-
-namespace pcl
-{
-
-namespace tracking
-{
-
-template <typename PointInT, typename IntensityT> inline void
-PyramidalKLTTracker<PointInT, IntensityT>::setTrackingWindowSize (int width, int height)
+namespace pcl {
+namespace tracking {
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+inline void
+PyramidalKLTTracker<PointInT, IntensityT>::setTrackingWindowSize(int width, int height)
{
track_width_ = width;
track_height_ = height;
}
-
-template <typename PointInT, typename IntensityT> inline void
-PyramidalKLTTracker<PointInT, IntensityT>::setPointsToTrack (const pcl::PointCloud<pcl::PointUV>::ConstPtr& keypoints)
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+inline void
+PyramidalKLTTracker<PointInT, IntensityT>::setPointsToTrack(
+ const pcl::PointCloud<pcl::PointUV>::ConstPtr& keypoints)
{
- if (keypoints->size () <= keypoints_nbr_)
+ if (keypoints->size() <= keypoints_nbr_)
keypoints_ = keypoints;
- else
- {
- pcl::PointCloud<pcl::PointUV>::Ptr p (new pcl::PointCloud<pcl::PointUV>);
- p->reserve (keypoints_nbr_);
+ else {
+ pcl::PointCloud<pcl::PointUV>::Ptr p(new pcl::PointCloud<pcl::PointUV>);
+ p->reserve(keypoints_nbr_);
for (std::size_t i = 0; i < keypoints_nbr_; ++i)
- p->push_back (keypoints->points[i]);
+ p->push_back((*keypoints)[i]);
keypoints_ = p;
}
- keypoints_status_.reset (new pcl::PointIndices);
- keypoints_status_->indices.resize (keypoints_->size (), 0);
+ keypoints_status_.reset(new pcl::PointIndices);
+ keypoints_status_->indices.resize(keypoints_->size(), 0);
}
-
-template <typename PointInT, typename IntensityT> inline void
-PyramidalKLTTracker<PointInT, IntensityT>::setPointsToTrack (const pcl::PointIndicesConstPtr& points)
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+inline void
+PyramidalKLTTracker<PointInT, IntensityT>::setPointsToTrack(
+ const pcl::PointIndicesConstPtr& points)
{
- assert ((input_ || ref_) && "[pcl::tracking::PyramidalKLTTracker] CALL setInputCloud FIRST!");
+ assert((input_ || ref_) && "[PyramidalKLTTracker] CALL setInputCloud FIRST!");
- pcl::PointCloud<pcl::PointUV>::Ptr keypoints (new pcl::PointCloud<pcl::PointUV>);
- keypoints->reserve (keypoints_nbr_);
- for (std::size_t i = 0; i < keypoints_nbr_; ++i)
- {
+ pcl::PointCloud<pcl::PointUV>::Ptr keypoints(new pcl::PointCloud<pcl::PointUV>);
+ keypoints->reserve(keypoints_nbr_);
+ for (std::size_t i = 0; i < keypoints_nbr_; ++i) {
pcl::PointUV uv;
uv.u = points->indices[i] % input_->width;
uv.v = points->indices[i] / input_->width;
- keypoints->push_back (uv);
+ keypoints->push_back(uv);
}
- setPointsToTrack (keypoints);
+ setPointsToTrack(keypoints);
}
-
-template <typename PointInT, typename IntensityT> bool
-PyramidalKLTTracker<PointInT, IntensityT>::initCompute ()
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+bool
+PyramidalKLTTracker<PointInT, IntensityT>::initCompute()
{
// std::cout << ">>> [PyramidalKLTTracker::initCompute]" << std::endl;
- if (!PCLBase<PointInT>::initCompute ())
- {
- PCL_ERROR ("[pcl::tracking::%s::initCompute] PCLBase::Init failed.\n",
- tracker_name_.c_str ());
+ if (!PCLBase<PointInT>::initCompute()) {
+ PCL_ERROR("[%s::initCompute] PCLBase::Init failed.\n", tracker_name_.c_str());
return (false);
}
- if (!input_->isOrganized ())
- {
- PCL_ERROR ("[pcl::tracking::%s::initCompute] Need an organized point cloud to proceed!",
- tracker_name_.c_str ());
+ if (!input_->isOrganized()) {
+ PCL_ERROR(
+ "[pcl::tracking::%s::initCompute] Need an organized point cloud to proceed!",
+ tracker_name_.c_str());
return (false);
}
- if (!keypoints_ || keypoints_->empty ())
- {
- PCL_ERROR ("[pcl::tracking::%s::initCompute] No keypoints aborting!",
- tracker_name_.c_str ());
+ if (!keypoints_ || keypoints_->empty()) {
+ PCL_ERROR("[pcl::tracking::%s::initCompute] No keypoints aborting!",
+ tracker_name_.c_str());
return (false);
}
// This is the first call
- if (!ref_)
- {
+ if (!ref_) {
ref_ = input_;
// std::cout << "First run!!!" << std::endl;
- if ((track_height_ * track_width_)%2 == 0)
- {
- PCL_ERROR ("[pcl::tracking::%s::initCompute] Tracking window (%dx%d) must be odd!\n",
- tracker_name_.c_str (), track_width_, track_height_);
+ if ((track_height_ * track_width_) % 2 == 0) {
+ PCL_ERROR(
+ "[pcl::tracking::%s::initCompute] Tracking window (%dx%d) must be odd!\n",
+ tracker_name_.c_str(),
+ track_width_,
+ track_height_);
return (false);
}
- if (track_height_ < 3 || track_width_ < 3)
- {
- PCL_ERROR ("[pcl::tracking::%s::initCompute] Tracking window (%dx%d) must be >= 3x3!\n",
- tracker_name_.c_str (), track_width_, track_height_);
+ if (track_height_ < 3 || track_width_ < 3) {
+ PCL_ERROR(
+ "[pcl::tracking::%s::initCompute] Tracking window (%dx%d) must be >= 3x3!\n",
+ tracker_name_.c_str(),
+ track_width_,
+ track_height_);
return (false);
}
track_width_2_ = track_width_ / 2;
track_height_2_ = track_height_ / 2;
- if (nb_levels_ < 2)
- {
- PCL_ERROR ("[pcl::tracking::%s::initCompute] Number of pyramid levels should be at least 2!",
- tracker_name_.c_str ());
+ if (nb_levels_ < 2) {
+ PCL_ERROR("[pcl::tracking::%s::initCompute] Number of pyramid levels should be "
+ "at least 2!",
+ tracker_name_.c_str());
return (false);
}
- if (nb_levels_ > 5)
- {
- PCL_ERROR ("[pcl::tracking::%s::initCompute] Number of pyramid levels should not exceed 5!",
- tracker_name_.c_str ());
+ if (nb_levels_ > 5) {
+ PCL_ERROR("[pcl::tracking::%s::initCompute] Number of pyramid levels should not "
+ "exceed 5!",
+ tracker_name_.c_str());
return (false);
}
- computePyramids (ref_, ref_pyramid_, pcl::BORDER_REFLECT_101);
+ computePyramids(ref_, ref_pyramid_, pcl::BORDER_REFLECT_101);
return (true);
}
return (true);
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::derivatives (const FloatImage& src, FloatImage& grad_x, FloatImage& grad_y) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::derivatives(const FloatImage& src,
+ FloatImage& grad_x,
+ FloatImage& grad_y) const
{
// std::cout << ">>> derivatives" << std::endl;
////////////////////////////////////////////////////////
// +10 0 -10 //
// +3 0 -3 //
////////////////////////////////////////////////////////
- if (grad_x.size () != src.size () || grad_x.width != src.width || grad_x.height != src.height)
- grad_x = FloatImage (src.width, src.height);
- if (grad_y.size () != src.size () || grad_y.width != src.width || grad_y.height != src.height)
- grad_y = FloatImage (src.width, src.height);
+ if (grad_x.size() != src.size() || grad_x.width != src.width ||
+ grad_x.height != src.height)
+ grad_x = FloatImage(src.width, src.height);
+ if (grad_y.size() != src.size() || grad_y.width != src.width ||
+ grad_y.height != src.height)
+ grad_y = FloatImage(src.width, src.height);
int height = src.height, width = src.width;
- float *row0 = new float [src.width + 2];
- float *row1 = new float [src.width + 2];
- float *trow0 = row0; ++trow0;
- float *trow1 = row1; ++trow1;
- const float* src_ptr = &(src.points[0]);
-
- for (int y = 0; y < height; y++)
- {
- const float* srow0 = src_ptr + (y > 0 ? y-1 : height > 1 ? 1 : 0) * width;
+ float* row0 = new float[src.width + 2];
+ float* row1 = new float[src.width + 2];
+ float* trow0 = row0;
+ ++trow0;
+ float* trow1 = row1;
+ ++trow1;
+ const float* src_ptr = &(src[0]);
+
+ for (int y = 0; y < height; y++) {
+ const float* srow0 = src_ptr + (y > 0 ? y - 1 : height > 1 ? 1 : 0) * width;
const float* srow1 = src_ptr + y * width;
- const float* srow2 = src_ptr + (y < height-1 ? y+1 : height > 1 ? height-2 : 0) * width;
- float* grad_x_row = &(grad_x.points[y * width]);
- float* grad_y_row = &(grad_y.points[y * width]);
+ const float* srow2 =
+ src_ptr + (y < height - 1 ? y + 1 : height > 1 ? height - 2 : 0) * width;
+ float* grad_x_row = &(grad_x[y * width]);
+ float* grad_y_row = &(grad_y[y * width]);
// do vertical convolution
- for (int x = 0; x < width; x++)
- {
- trow0[x] = (srow0[x] + srow2[x])*3 + srow1[x]*10;
+ for (int x = 0; x < width; x++) {
+ trow0[x] = (srow0[x] + srow2[x]) * 3 + srow1[x] * 10;
trow1[x] = srow2[x] - srow0[x];
}
// make border
- int x0 = width > 1 ? 1 : 0, x1 = width > 1 ? width-2 : 0;
- trow0[-1] = trow0[x0]; trow0[width] = trow0[x1];
- trow1[-1] = trow1[x0]; trow1[width] = trow1[x1];
+ int x0 = width > 1 ? 1 : 0, x1 = width > 1 ? width - 2 : 0;
+ trow0[-1] = trow0[x0];
+ trow0[width] = trow0[x1];
+ trow1[-1] = trow1[x0];
+ trow1[width] = trow1[x1];
// do horizontal convolution and store results
- for (int x = 0; x < width; x++)
- {
- grad_x_row[x] = trow0[x+1] - trow0[x-1];
- grad_y_row[x] = (trow1[x+1] + trow1[x-1])*3 + trow1[x]*10;
+ for (int x = 0; x < width; x++) {
+ grad_x_row[x] = trow0[x + 1] - trow0[x - 1];
+ grad_y_row[x] = (trow1[x + 1] + trow1[x - 1]) * 3 + trow1[x] * 10;
}
}
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::downsample (const FloatImageConstPtr& input,
- FloatImageConstPtr& output) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::downsample(const FloatImageConstPtr& input,
+ FloatImageConstPtr& output) const
{
- FloatImage smoothed (input->width, input->height);
- convolve (input, smoothed);
+ FloatImage smoothed(input->width, input->height);
+ convolve(input, smoothed);
- int width = (smoothed.width +1) / 2;
- int height = (smoothed.height +1) / 2;
- std::vector<int> ii (width);
+ int width = (smoothed.width + 1) / 2;
+ int height = (smoothed.height + 1) / 2;
+ std::vector<int> ii(width);
for (int i = 0; i < width; ++i)
ii[i] = 2 * i;
- FloatImagePtr down (new FloatImage (width, height));
+ FloatImagePtr down(new FloatImage(width, height));
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(down, height, output, smoothed, width) \
firstprivate(ii) \
num_threads(threads_)
- for (int j = 0; j < height; ++j)
- {
- int jj = 2*j;
+ // clang-format on
+ for (int j = 0; j < height; ++j) {
+ int jj = 2 * j;
for (int i = 0; i < width; ++i)
- (*down) (i,j) = smoothed (ii[i],jj);
+ (*down)(i, j) = smoothed(ii[i], jj);
}
output = down;
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::downsample (const FloatImageConstPtr& input,
- FloatImageConstPtr& output,
- FloatImageConstPtr& output_grad_x,
- FloatImageConstPtr& output_grad_y) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::downsample(
+ const FloatImageConstPtr& input,
+ FloatImageConstPtr& output,
+ FloatImageConstPtr& output_grad_x,
+ FloatImageConstPtr& output_grad_y) const
{
- downsample (input, output);
- FloatImagePtr grad_x (new FloatImage (input->width, input->height));
- FloatImagePtr grad_y (new FloatImage (input->width, input->height));
- derivatives (*output, *grad_x, *grad_y);
+ downsample(input, output);
+ FloatImagePtr grad_x(new FloatImage(input->width, input->height));
+ FloatImagePtr grad_y(new FloatImage(input->width, input->height));
+ derivatives(*output, *grad_x, *grad_y);
output_grad_x = grad_x;
output_grad_y = grad_y;
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::convolve (const FloatImageConstPtr& input, FloatImage& output) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::convolve(
+ const FloatImageConstPtr& input, FloatImage& output) const
{
- FloatImagePtr tmp (new FloatImage (input->width, input->height));
- convolveRows (input, *tmp);
- convolveCols (tmp, output);
+ FloatImagePtr tmp(new FloatImage(input->width, input->height));
+ convolveRows(input, *tmp);
+ convolveCols(tmp, output);
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::convolveRows (const FloatImageConstPtr& input, FloatImage& output) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::convolveRows(
+ const FloatImageConstPtr& input, FloatImage& output) const
{
int width = input->width;
int height = input->height;
int last = input->width - kernel_size_2_;
int w = last - 1;
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(input, height, last, output, w, width) \
num_threads(threads_)
- for (int j = 0; j < height; ++j)
- {
- for (int i = kernel_size_2_; i < last; ++i)
- {
+ // clang-format on
+ for (int j = 0; j < height; ++j) {
+ for (int i = kernel_size_2_; i < last; ++i) {
double result = 0;
for (int k = kernel_last_, l = i - kernel_size_2_; k > -1; --k, ++l)
- result+= kernel_[k] * (*input) (l,j);
+ result += kernel_[k] * (*input)(l, j);
- output (i,j) = static_cast<float> (result);
+ output(i, j) = static_cast<float>(result);
}
for (int i = last; i < width; ++i)
- output (i,j) = output (w, j);
+ output(i, j) = output(w, j);
for (int i = 0; i < kernel_size_2_; ++i)
- output (i,j) = output (kernel_size_2_, j);
+ output(i, j) = output(kernel_size_2_, j);
}
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::convolveCols (const FloatImageConstPtr& input, FloatImage& output) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::convolveCols(const FloatImageConstPtr& input,
+ FloatImage& output) const
{
- output = FloatImage (input->width, input->height);
+ output = FloatImage(input->width, input->height);
int width = input->width;
int height = input->height;
int last = input->height - kernel_size_2_;
- int h = last -1;
+ int h = last - 1;
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(input, h, height, last, output, width) \
num_threads(threads_)
- for (int i = 0; i < width; ++i)
- {
- for (int j = kernel_size_2_; j < last; ++j)
- {
+ // clang-format on
+ for (int i = 0; i < width; ++i) {
+ for (int j = kernel_size_2_; j < last; ++j) {
double result = 0;
for (int k = kernel_last_, l = j - kernel_size_2_; k > -1; --k, ++l)
- result += kernel_[k] * (*input) (i,l);
- output (i,j) = static_cast<float> (result);
+ result += kernel_[k] * (*input)(i, l);
+ output(i, j) = static_cast<float>(result);
}
for (int j = last; j < height; ++j)
- output (i,j) = output (i,h);
+ output(i, j) = output(i, h);
for (int j = 0; j < kernel_size_2_; ++j)
- output (i,j) = output (i, kernel_size_2_);
+ output(i, j) = output(i, kernel_size_2_);
}
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::computePyramids (const PointCloudInConstPtr& input,
- std::vector<FloatImageConstPtr>& pyramid,
- pcl::InterpolationType border_type) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::computePyramids(
+ const PointCloudInConstPtr& input,
+ std::vector<FloatImageConstPtr>& pyramid,
+ pcl::InterpolationType border_type) const
{
int step = 3;
- pyramid.resize (step * nb_levels_);
+ pyramid.resize(step * nb_levels_);
FloatImageConstPtr previous;
- FloatImagePtr tmp (new FloatImage (input->width, input->height));
+ FloatImagePtr tmp(new FloatImage(input->width, input->height));
+ // clang-format off
#pragma omp parallel for \
default(none) \
shared(input, tmp) \
num_threads(threads_)
- for (int i = 0; i < static_cast<int> (input->size ()); ++i)
- tmp->points[i] = intensity_ (input->points[i]);
+ // clang-format on
+ for (int i = 0; i < static_cast<int>(input->size()); ++i)
+ (*tmp)[i] = intensity_((*input)[i]);
previous = tmp;
- FloatImagePtr img (new FloatImage (previous->width + 2*track_width_,
- previous->height + 2*track_height_));
-
- pcl::copyPointCloud (*tmp, *img, track_height_, track_height_, track_width_, track_width_,
- border_type, 0.f);
+ FloatImagePtr img(new FloatImage(previous->width + 2 * track_width_,
+ previous->height + 2 * track_height_));
+
+ pcl::copyPointCloud(*tmp,
+ *img,
+ track_height_,
+ track_height_,
+ track_width_,
+ track_width_,
+ border_type,
+ 0.f);
pyramid[0] = img;
// compute first level gradients
- FloatImagePtr g_x (new FloatImage (input->width, input->height));
- FloatImagePtr g_y (new FloatImage (input->width, input->height));
- derivatives (*img, *g_x, *g_y);
+ FloatImagePtr g_x(new FloatImage(input->width, input->height));
+ FloatImagePtr g_y(new FloatImage(input->width, input->height));
+ derivatives(*img, *g_x, *g_y);
// copy to bigger clouds
- FloatImagePtr grad_x (new FloatImage (previous->width + 2*track_width_,
- previous->height + 2*track_height_));
- pcl::copyPointCloud (*g_x, *grad_x, track_height_, track_height_, track_width_, track_width_,
- pcl::BORDER_CONSTANT, 0.f);
+ FloatImagePtr grad_x(new FloatImage(previous->width + 2 * track_width_,
+ previous->height + 2 * track_height_));
+ pcl::copyPointCloud(*g_x,
+ *grad_x,
+ track_height_,
+ track_height_,
+ track_width_,
+ track_width_,
+ pcl::BORDER_CONSTANT,
+ 0.f);
pyramid[1] = grad_x;
- FloatImagePtr grad_y (new FloatImage (previous->width + 2*track_width_,
- previous->height + 2*track_height_));
- pcl::copyPointCloud (*g_y, *grad_y, track_height_, track_height_, track_width_, track_width_,
- pcl::BORDER_CONSTANT, 0.f);
+ FloatImagePtr grad_y(new FloatImage(previous->width + 2 * track_width_,
+ previous->height + 2 * track_height_));
+ pcl::copyPointCloud(*g_y,
+ *grad_y,
+ track_height_,
+ track_height_,
+ track_width_,
+ track_width_,
+ pcl::BORDER_CONSTANT,
+ 0.f);
pyramid[2] = grad_y;
- for (int level = 1; level < nb_levels_; ++level)
- {
+ for (int level = 1; level < nb_levels_; ++level) {
// compute current level and current level gradients
FloatImageConstPtr current;
FloatImageConstPtr g_x;
FloatImageConstPtr g_y;
- downsample (previous, current, g_x, g_y);
+ downsample(previous, current, g_x, g_y);
// copy to bigger clouds
- FloatImagePtr image (new FloatImage (current->width + 2*track_width_,
- current->height + 2*track_height_));
- pcl::copyPointCloud (*current, *image, track_height_, track_height_, track_width_, track_width_,
- border_type, 0.f);
- pyramid[level*step] = image;
- FloatImagePtr gradx (new FloatImage (g_x->width + 2*track_width_, g_x->height + 2*track_height_));
- pcl::copyPointCloud (*g_x, *gradx, track_height_, track_height_, track_width_, track_width_,
- pcl::BORDER_CONSTANT, 0.f);
- pyramid[level*step + 1] = gradx;
- FloatImagePtr grady (new FloatImage (g_y->width + 2*track_width_, g_y->height + 2*track_height_));
- pcl::copyPointCloud (*g_y, *grady, track_height_, track_height_, track_width_, track_width_,
- pcl::BORDER_CONSTANT, 0.f);
- pyramid[level*step + 2] = grady;
+ FloatImagePtr image(new FloatImage(current->width + 2 * track_width_,
+ current->height + 2 * track_height_));
+ pcl::copyPointCloud(*current,
+ *image,
+ track_height_,
+ track_height_,
+ track_width_,
+ track_width_,
+ border_type,
+ 0.f);
+ pyramid[level * step] = image;
+ FloatImagePtr gradx(
+ new FloatImage(g_x->width + 2 * track_width_, g_x->height + 2 * track_height_));
+ pcl::copyPointCloud(*g_x,
+ *gradx,
+ track_height_,
+ track_height_,
+ track_width_,
+ track_width_,
+ pcl::BORDER_CONSTANT,
+ 0.f);
+ pyramid[level * step + 1] = gradx;
+ FloatImagePtr grady(
+ new FloatImage(g_y->width + 2 * track_width_, g_y->height + 2 * track_height_));
+ pcl::copyPointCloud(*g_y,
+ *grady,
+ track_height_,
+ track_height_,
+ track_width_,
+ track_width_,
+ pcl::BORDER_CONSTANT,
+ 0.f);
+ pyramid[level * step + 2] = grady;
// set the new level
previous = current;
}
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::spatialGradient (const FloatImage& img,
- const FloatImage& grad_x,
- const FloatImage& grad_y,
- const Eigen::Array2i& location,
- const Eigen::Array4f& weight,
- Eigen::ArrayXXf& win,
- Eigen::ArrayXXf& grad_x_win,
- Eigen::ArrayXXf& grad_y_win,
- Eigen::Array3f &covariance) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::spatialGradient(
+ const FloatImage& img,
+ const FloatImage& grad_x,
+ const FloatImage& grad_y,
+ const Eigen::Array2i& location,
+ const Eigen::Array4f& weight,
+ Eigen::ArrayXXf& win,
+ Eigen::ArrayXXf& grad_x_win,
+ Eigen::ArrayXXf& grad_y_win,
+ Eigen::Array3f& covariance) const
{
const int step = img.width;
- covariance.setZero ();
-
- for (int y = 0; y < track_height_; y++)
- {
- const float* img_ptr = &(img.points[0]) + (y + location[1])*step + location[0];
- const float* grad_x_ptr = &(grad_x.points[0]) + (y + location[1])*step + location[0];
- const float* grad_y_ptr = &(grad_y.points[0]) + (y + location[1])*step + location[0];
-
- float* win_ptr = win.data () + y*win.cols ();
- float* grad_x_win_ptr = grad_x_win.data () + y*grad_x_win.cols ();
- float* grad_y_win_ptr = grad_y_win.data () + y*grad_y_win.cols ();
-
- for (int x =0; x < track_width_; ++x, ++grad_x_ptr, ++grad_y_ptr)
- {
- *win_ptr++ = img_ptr[x]*weight[0] + img_ptr[x+1]*weight[1] + img_ptr[x+step]*weight[2] + img_ptr[x+step+1]*weight[3];
- float ixval = grad_x_ptr[0]*weight[0] + grad_x_ptr[1]*weight[1] + grad_x_ptr[step]*weight[2] + grad_x_ptr[step+1]*weight[3];
- float iyval = grad_y_ptr[0]*weight[0] + grad_y_ptr[1]*weight[1] + grad_y_ptr[step]*weight[2] + grad_y_ptr[step+1]*weight[3];
+ covariance.setZero();
+
+ for (int y = 0; y < track_height_; y++) {
+ const float* img_ptr = &(img[0]) + (y + location[1]) * step + location[0];
+ const float* grad_x_ptr = &(grad_x[0]) + (y + location[1]) * step + location[0];
+ const float* grad_y_ptr = &(grad_y[0]) + (y + location[1]) * step + location[0];
+
+ float* win_ptr = win.data() + y * win.cols();
+ float* grad_x_win_ptr = grad_x_win.data() + y * grad_x_win.cols();
+ float* grad_y_win_ptr = grad_y_win.data() + y * grad_y_win.cols();
+
+ for (int x = 0; x < track_width_; ++x, ++grad_x_ptr, ++grad_y_ptr) {
+ *win_ptr++ = img_ptr[x] * weight[0] + img_ptr[x + 1] * weight[1] +
+ img_ptr[x + step] * weight[2] + img_ptr[x + step + 1] * weight[3];
+ float ixval = grad_x_ptr[0] * weight[0] + grad_x_ptr[1] * weight[1] +
+ grad_x_ptr[step] * weight[2] + grad_x_ptr[step + 1] * weight[3];
+ float iyval = grad_y_ptr[0] * weight[0] + grad_y_ptr[1] * weight[1] +
+ grad_y_ptr[step] * weight[2] + grad_y_ptr[step + 1] * weight[3];
//!!! store those
*grad_x_win_ptr++ = ixval;
*grad_y_win_ptr++ = iyval;
- //covariance components
- covariance[0] += ixval*ixval;
- covariance[1] += ixval*iyval;
- covariance[2] += iyval*iyval;
+ // covariance components
+ covariance[0] += ixval * ixval;
+ covariance[1] += ixval * iyval;
+ covariance[2] += iyval * iyval;
}
}
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::mismatchVector (const Eigen::ArrayXXf& prev,
- const Eigen::ArrayXXf& prev_grad_x,
- const Eigen::ArrayXXf& prev_grad_y,
- const FloatImage& next,
- const Eigen::Array2i& location,
- const Eigen::Array4f& weight,
- Eigen::Array2f &b) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::mismatchVector(
+ const Eigen::ArrayXXf& prev,
+ const Eigen::ArrayXXf& prev_grad_x,
+ const Eigen::ArrayXXf& prev_grad_y,
+ const FloatImage& next,
+ const Eigen::Array2i& location,
+ const Eigen::Array4f& weight,
+ Eigen::Array2f& b) const
{
const int step = next.width;
- b.setZero ();
- for (int y = 0; y < track_height_; y++)
- {
- const float* next_ptr = &(next.points[0]) + (y + location[1])*step + location[0];
- const float* prev_ptr = prev.data () + y*prev.cols ();
- const float* prev_grad_x_ptr = prev_grad_x.data () + y*prev_grad_x.cols ();
- const float* prev_grad_y_ptr = prev_grad_y.data () + y*prev_grad_y.cols ();
-
- for (int x = 0; x < track_width_; ++x, ++prev_grad_y_ptr, ++prev_grad_x_ptr)
- {
- float diff = next_ptr[x]*weight[0] + next_ptr[x+1]*weight[1]
- + next_ptr[x+step]*weight[2] + next_ptr[x+step+1]*weight[3] - prev_ptr[x];
+ b.setZero();
+ for (int y = 0; y < track_height_; y++) {
+ const float* next_ptr = &(next[0]) + (y + location[1]) * step + location[0];
+ const float* prev_ptr = prev.data() + y * prev.cols();
+ const float* prev_grad_x_ptr = prev_grad_x.data() + y * prev_grad_x.cols();
+ const float* prev_grad_y_ptr = prev_grad_y.data() + y * prev_grad_y.cols();
+
+ for (int x = 0; x < track_width_; ++x, ++prev_grad_y_ptr, ++prev_grad_x_ptr) {
+ float diff = next_ptr[x] * weight[0] + next_ptr[x + 1] * weight[1] +
+ next_ptr[x + step] * weight[2] + next_ptr[x + step + 1] * weight[3] -
+ prev_ptr[x];
b[0] += *prev_grad_x_ptr * diff;
b[1] += *prev_grad_y_ptr * diff;
}
}
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::track (const PointCloudInConstPtr& prev_input,
- const PointCloudInConstPtr& input,
- const std::vector<FloatImageConstPtr>& prev_pyramid,
- const std::vector<FloatImageConstPtr>& pyramid,
- const pcl::PointCloud<pcl::PointUV>::ConstPtr& prev_keypoints,
- pcl::PointCloud<pcl::PointUV>::Ptr& keypoints,
- std::vector<int>& status,
- Eigen::Affine3f& motion) const
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::track(
+ const PointCloudInConstPtr& prev_input,
+ const PointCloudInConstPtr& input,
+ const std::vector<FloatImageConstPtr>& prev_pyramid,
+ const std::vector<FloatImageConstPtr>& pyramid,
+ const pcl::PointCloud<pcl::PointUV>::ConstPtr& prev_keypoints,
+ pcl::PointCloud<pcl::PointUV>::Ptr& keypoints,
+ std::vector<int>& status,
+ Eigen::Affine3f& motion) const
{
- std::vector<Eigen::Array2f, Eigen::aligned_allocator<Eigen::Array2f> > next_pts (prev_keypoints->size ());
- Eigen::Array2f half_win ((track_width_-1)*0.5f, (track_height_-1)*0.5f);
+ std::vector<Eigen::Array2f, Eigen::aligned_allocator<Eigen::Array2f>> next_pts(
+ prev_keypoints->size());
+ Eigen::Array2f half_win((track_width_ - 1) * 0.5f, (track_height_ - 1) * 0.5f);
pcl::TransformationFromCorrespondences transformation_computer;
- const int nb_points = prev_keypoints->size ();
- for (int level = nb_levels_ - 1; level >= 0; --level)
- {
- const FloatImage& prev = *(prev_pyramid[level*3]);
- const FloatImage& next = *(pyramid[level*3]);
- const FloatImage& grad_x = *(prev_pyramid[level*3+1]);
- const FloatImage& grad_y = *(prev_pyramid[level*3+2]);
-
- Eigen::ArrayXXf prev_win (track_height_, track_width_);
- Eigen::ArrayXXf grad_x_win (track_height_, track_width_);
- Eigen::ArrayXXf grad_y_win (track_height_, track_width_);
- float ratio (1./(1 << level));
- for (int ptidx = 0; ptidx < nb_points; ptidx++)
- {
- Eigen::Array2f prev_pt (prev_keypoints->points[ptidx].u * ratio,
- prev_keypoints->points[ptidx].v * ratio);
+ const int nb_points = prev_keypoints->size();
+ for (int level = nb_levels_ - 1; level >= 0; --level) {
+ const FloatImage& prev = *(prev_pyramid[level * 3]);
+ const FloatImage& next = *(pyramid[level * 3]);
+ const FloatImage& grad_x = *(prev_pyramid[level * 3 + 1]);
+ const FloatImage& grad_y = *(prev_pyramid[level * 3 + 2]);
+
+ Eigen::ArrayXXf prev_win(track_height_, track_width_);
+ Eigen::ArrayXXf grad_x_win(track_height_, track_width_);
+ Eigen::ArrayXXf grad_y_win(track_height_, track_width_);
+ float ratio(1. / (1 << level));
+ for (int ptidx = 0; ptidx < nb_points; ptidx++) {
+ Eigen::Array2f prev_pt((*prev_keypoints)[ptidx].u * ratio,
+ (*prev_keypoints)[ptidx].v * ratio);
Eigen::Array2f next_pt;
- if (level == nb_levels_ -1)
+ if (level == nb_levels_ - 1)
next_pt = prev_pt;
else
- next_pt = next_pts[ptidx]*2.f;
+ next_pt = next_pts[ptidx] * 2.f;
next_pts[ptidx] = next_pt;
Eigen::Array2i iprev_point;
prev_pt -= half_win;
- iprev_point[0] = std::floor (prev_pt[0]);
- iprev_point[1] = std::floor (prev_pt[1]);
+ iprev_point[0] = std::floor(prev_pt[0]);
+ iprev_point[1] = std::floor(prev_pt[1]);
- if (iprev_point[0] < -track_width_ || (std::uint32_t) iprev_point[0] >= grad_x.width ||
- iprev_point[1] < -track_height_ || (std::uint32_t) iprev_point[1] >= grad_y.height)
- {
+ if (iprev_point[0] < -track_width_ ||
+ (std::uint32_t)iprev_point[0] >= grad_x.width ||
+ iprev_point[1] < -track_height_ ||
+ (std::uint32_t)iprev_point[1] >= grad_y.height) {
if (level == 0)
- status [ptidx] = -1;
+ status[ptidx] = -1;
continue;
}
float a = prev_pt[0] - iprev_point[0];
float b = prev_pt[1] - iprev_point[1];
Eigen::Array4f weight;
- weight[0] = (1.f - a)*(1.f - b);
- weight[1] = a*(1.f - b);
- weight[2] = (1.f - a)*b;
+ weight[0] = (1.f - a) * (1.f - b);
+ weight[1] = a * (1.f - b);
+ weight[2] = (1.f - a) * b;
weight[3] = 1 - weight[0] - weight[1] - weight[2];
- Eigen::Array3f covar = Eigen::Array3f::Zero ();
- spatialGradient (prev, grad_x, grad_y, iprev_point, weight, prev_win, grad_x_win, grad_y_win, covar);
-
- float det = covar[0]*covar[2] - covar[1]*covar[1];
- float min_eigenvalue = (covar[2] + covar[0] - std::sqrt ((covar[0]-covar[2])*(covar[0]-covar[2]) + 4.f*covar[1]*covar[1]))/2.f;
-
- if (min_eigenvalue < min_eigenvalue_threshold_ || det < std::numeric_limits<float>::epsilon ())
- {
+ Eigen::Array3f covar = Eigen::Array3f::Zero();
+ spatialGradient(prev,
+ grad_x,
+ grad_y,
+ iprev_point,
+ weight,
+ prev_win,
+ grad_x_win,
+ grad_y_win,
+ covar);
+
+ float det = covar[0] * covar[2] - covar[1] * covar[1];
+ float min_eigenvalue = (covar[2] + covar[0] -
+ std::sqrt((covar[0] - covar[2]) * (covar[0] - covar[2]) +
+ 4.f * covar[1] * covar[1])) /
+ 2.f;
+
+ if (min_eigenvalue < min_eigenvalue_threshold_ ||
+ det < std::numeric_limits<float>::epsilon()) {
status[ptidx] = -2;
continue;
}
- det = 1.f/det;
+ det = 1.f / det;
next_pt -= half_win;
- Eigen::Array2f prev_delta (0, 0);
- for (unsigned int j = 0; j < max_iterations_; j++)
- {
- Eigen::Array2i inext_pt = next_pt.floor ().cast<int> ();
+ Eigen::Array2f prev_delta(0, 0);
+ for (unsigned int j = 0; j < max_iterations_; j++) {
+ Eigen::Array2i inext_pt = next_pt.floor().cast<int>();
- if (inext_pt[0] < -track_width_ || (std::uint32_t) inext_pt[0] >= next.width ||
- inext_pt[1] < -track_height_ || (std::uint32_t) inext_pt[1] >= next.height)
- {
+ if (inext_pt[0] < -track_width_ || (std::uint32_t)inext_pt[0] >= next.width ||
+ inext_pt[1] < -track_height_ || (std::uint32_t)inext_pt[1] >= next.height) {
if (level == 0)
status[ptidx] = -1;
break;
a = next_pt[0] - inext_pt[0];
b = next_pt[1] - inext_pt[1];
- weight[0] = (1.f - a)*(1.f - b);
- weight[1] = a*(1.f - b);
- weight[2] = (1.f - a)*b;
+ weight[0] = (1.f - a) * (1.f - b);
+ weight[1] = a * (1.f - b);
+ weight[2] = (1.f - a) * b;
weight[3] = 1 - weight[0] - weight[1] - weight[2];
// compute mismatch vector
- Eigen::Array2f beta = Eigen::Array2f::Zero ();
- mismatchVector (prev_win, grad_x_win, grad_y_win, next, inext_pt, weight, beta);
+ Eigen::Array2f beta = Eigen::Array2f::Zero();
+ mismatchVector(prev_win, grad_x_win, grad_y_win, next, inext_pt, weight, beta);
// optical flow resolution
- Eigen::Vector2f delta ((covar[1]*beta[1] - covar[2]*beta[0])*det, (covar[1]*beta[0] - covar[0]*beta[1])*det);
+ Eigen::Vector2f delta((covar[1] * beta[1] - covar[2] * beta[0]) * det,
+ (covar[1] * beta[0] - covar[0] * beta[1]) * det);
// update position
- next_pt[0] += delta[0]; next_pt[1] += delta[1];
+ next_pt[0] += delta[0];
+ next_pt[1] += delta[1];
next_pts[ptidx] = next_pt + half_win;
- if (delta.squaredNorm () <= epsilon_)
+ if (delta.squaredNorm() <= epsilon_)
break;
- if (j > 0 && std::abs (delta[0] + prev_delta[0]) < 0.01 &&
- std::abs (delta[1] + prev_delta[1]) < 0.01 )
- {
- next_pts[ptidx][0] -= delta[0]*0.5f;
- next_pts[ptidx][1] -= delta[1]*0.5f;
+ if (j > 0 && std::abs(delta[0] + prev_delta[0]) < 0.01 &&
+ std::abs(delta[1] + prev_delta[1]) < 0.01) {
+ next_pts[ptidx][0] -= delta[0] * 0.5f;
+ next_pts[ptidx][1] -= delta[1] * 0.5f;
break;
}
// update delta
}
// update tracked points
- if (level == 0 && !status[ptidx])
- {
+ if (level == 0 && !status[ptidx]) {
Eigen::Array2f next_point = next_pts[ptidx] - half_win;
Eigen::Array2i inext_point;
- inext_point[0] = std::floor (next_point[0]);
- inext_point[1] = std::floor (next_point[1]);
+ inext_point[0] = std::floor(next_point[0]);
+ inext_point[1] = std::floor(next_point[1]);
- if (inext_point[0] < -track_width_ || (std::uint32_t) inext_point[0] >= next.width ||
- inext_point[1] < -track_height_ || (std::uint32_t) inext_point[1] >= next.height)
- {
+ if (inext_point[0] < -track_width_ ||
+ (std::uint32_t)inext_point[0] >= next.width ||
+ inext_point[1] < -track_height_ ||
+ (std::uint32_t)inext_point[1] >= next.height) {
status[ptidx] = -1;
continue;
}
pcl::PointUV n;
n.u = next_pts[ptidx][0];
n.v = next_pts[ptidx][1];
- keypoints->push_back (n);
+ keypoints->push_back(n);
// add points pair to compute transformation
- inext_point[0] = std::floor (next_pts[ptidx][0]);
- inext_point[1] = std::floor (next_pts[ptidx][1]);
- iprev_point[0] = std::floor (prev_keypoints->points[ptidx].u);
- iprev_point[1] = std::floor (prev_keypoints->points[ptidx].v);
- const PointInT& prev_pt = prev_input->points[iprev_point[1]*prev_input->width + iprev_point[0]];
- const PointInT& next_pt = input->points[inext_point[1]*input->width + inext_point[0]];
- transformation_computer.add (prev_pt.getVector3fMap (), next_pt.getVector3fMap (), 1.0);
+ inext_point[0] = std::floor(next_pts[ptidx][0]);
+ inext_point[1] = std::floor(next_pts[ptidx][1]);
+ iprev_point[0] = std::floor((*prev_keypoints)[ptidx].u);
+ iprev_point[1] = std::floor((*prev_keypoints)[ptidx].v);
+ const PointInT& prev_pt =
+ (*prev_input)[iprev_point[1] * prev_input->width + iprev_point[0]];
+ const PointInT& next_pt =
+ (*input)[inext_point[1] * input->width + inext_point[0]];
+ transformation_computer.add(
+ prev_pt.getVector3fMap(), next_pt.getVector3fMap(), 1.0);
}
}
}
-
- motion = transformation_computer.getTransformation ();
+ motion = transformation_computer.getTransformation();
}
-
-template <typename PointInT, typename IntensityT> void
-PyramidalKLTTracker<PointInT, IntensityT>::computeTracking ()
+///////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename IntensityT>
+void
+PyramidalKLTTracker<PointInT, IntensityT>::computeTracking()
{
if (!initialized_)
return;
std::vector<FloatImageConstPtr> pyramid;
- computePyramids (input_, pyramid, pcl::BORDER_REFLECT_101);
- pcl::PointCloud<pcl::PointUV>::Ptr keypoints (new pcl::PointCloud<pcl::PointUV>);
- keypoints->reserve (keypoints_->size ());
- std::vector<int> status (keypoints_->size (), 0);
- track (ref_, input_, ref_pyramid_, pyramid, keypoints_, keypoints, status, motion_);
- //swap reference and input
+ computePyramids(input_, pyramid, pcl::BORDER_REFLECT_101);
+ pcl::PointCloud<pcl::PointUV>::Ptr keypoints(new pcl::PointCloud<pcl::PointUV>);
+ keypoints->reserve(keypoints_->size());
+ std::vector<int> status(keypoints_->size(), 0);
+ track(ref_, input_, ref_pyramid_, pyramid, keypoints_, keypoints, status, motion_);
+ // swap reference and input
ref_ = input_;
ref_pyramid_ = pyramid;
keypoints_ = keypoints;
} // namespace pcl
#endif
-
#define PCL_TRACKING_IMPL_TRACKER_H_
#include <pcl/common/eigen.h>
-#include <ctime>
#include <pcl/tracking/tracker.h>
-template <typename PointInT, typename StateT> bool
-pcl::tracking::Tracker<PointInT, StateT>::initCompute ()
+#include <ctime>
+
+namespace pcl {
+namespace tracking {
+template <typename PointInT, typename StateT>
+bool
+Tracker<PointInT, StateT>::initCompute()
{
- if (!PCLBase<PointInT>::initCompute ())
- {
- PCL_ERROR ("[pcl::%s::initCompute] PCLBase::Init failed.\n", getClassName ().c_str ());
+ if (!PCLBase<PointInT>::initCompute()) {
+ PCL_ERROR("[pcl::%s::initCompute] PCLBase::Init failed.\n", getClassName().c_str());
return (false);
}
// If the dataset is empty, just return
- if (input_->points.empty ())
- {
- PCL_ERROR ("[pcl::%s::compute] input_ is empty!\n", getClassName ().c_str ());
+ if (input_->points.empty()) {
+ PCL_ERROR("[pcl::%s::compute] input_ is empty!\n", getClassName().c_str());
// Cleanup
- deinitCompute ();
+ deinitCompute();
return (false);
}
return (true);
}
-template <typename PointInT, typename StateT> void
-pcl::tracking::Tracker<PointInT, StateT>::compute ()
+template <typename PointInT, typename StateT>
+void
+Tracker<PointInT, StateT>::compute()
{
- if (!initCompute ())
+ if (!initCompute())
return;
-
- computeTracking ();
- deinitCompute ();
+
+ computeTracking();
+ deinitCompute();
}
+} // namespace tracking
+} // namespace pcl
#endif
#ifndef PCL_TRACKING_IMPL_TRACKING_H_
#define PCL_TRACKING_IMPL_TRACKING_H_
+#include <pcl/common/eigen.h>
+#include <pcl/tracking/tracking.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
-#include <pcl/common/eigen.h>
+
#include <ctime>
-#include <pcl/tracking/tracking.h>
-namespace pcl
-{
- namespace tracking
- {
- struct _ParticleXYZRPY
- {
- PCL_ADD_POINT4D;
- union
- {
- struct
- {
- float roll;
- float pitch;
- float yaw;
- float weight;
- };
- float data_c[4];
- };
+namespace pcl {
+namespace tracking {
+struct _ParticleXYZRPY {
+ PCL_ADD_POINT4D;
+ union {
+ struct {
+ float roll;
+ float pitch;
+ float yaw;
+ float weight;
};
+ float data_c[4];
+ };
+};
- // particle definition
- struct EIGEN_ALIGN16 ParticleXYZRPY : public _ParticleXYZRPY
- {
- inline ParticleXYZRPY ()
- {
- x = y = z = roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYZRPY (float _x, float _y, float _z)
- {
- x = _x; y = _y; z = _z;
- roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYZRPY (float _x, float _y, float _z, float _roll, float _pitch, float _yaw)
- {
- x = _x; y = _y; z = _z;
- roll = _roll; pitch = _pitch; yaw = _yaw;
- data[3] = 1.0f;
- }
-
- inline static int
- stateDimension () { return 6; }
-
- void
- sample (const std::vector<double>& mean, const std::vector<double>& cov)
- {
- x += static_cast<float> (sampleNormal (mean[0], cov[0]));
- y += static_cast<float> (sampleNormal (mean[1], cov[1]));
- z += static_cast<float> (sampleNormal (mean[2], cov[2]));
-
- // The roll, pitch, yaw space is not Euclidean, so if we sample roll,
- // pitch, and yaw independently, we bias our sampling in a complicated
- // way that depends on where in the space we are sampling.
- //
- // A solution is to always sample around mean = 0 and project our
- // samples onto the space of rotations, SO(3). We rely on the fact
- // that we are sampling with small variance, so we do not bias
- // ourselves too much with this projection. We then rotate our
- // samples by the user's requested mean. The benefit of this approach
- // is that our distribution's properties are consistent over the space
- // of rotations.
- Eigen::Matrix3f current_rotation;
- current_rotation = getTransformation (x, y, z, roll, pitch, yaw).rotation ();
- Eigen::Quaternionf q_current_rotation (current_rotation);
-
- Eigen::Matrix3f mean_rotation;
- mean_rotation = getTransformation (mean[0], mean[1], mean[2],
- mean[3], mean[4], mean[5]).rotation ();
- Eigen::Quaternionf q_mean_rotation (mean_rotation);
-
- // Scales 1.0 radians of variance in RPY sampling into equivalent units for quaternion sampling.
- const float scale_factor = 0.2862;
-
- float a = sampleNormal (0, scale_factor*cov[3]);
- float b = sampleNormal (0, scale_factor*cov[4]);
- float c = sampleNormal (0, scale_factor*cov[5]);
-
- Eigen::Vector4f vec_sample_mean_0 (a, b, c, 1);
- Eigen::Quaternionf q_sample_mean_0 (vec_sample_mean_0);
- q_sample_mean_0.normalize ();
-
- Eigen::Quaternionf q_sample_user_mean = q_sample_mean_0 * q_mean_rotation * q_current_rotation;
-
- Eigen::Affine3f affine_R (q_sample_user_mean.toRotationMatrix ());
- pcl::getEulerAngles (affine_R, roll, pitch, yaw);
- }
-
- void
- zero ()
- {
- x = 0.0;
- y = 0.0;
- z = 0.0;
- roll = 0.0;
- pitch = 0.0;
- yaw = 0.0;
- }
-
- inline Eigen::Affine3f
- toEigenMatrix () const
- {
- return getTransformation(x, y, z, roll, pitch, yaw);
- }
-
- static pcl::tracking::ParticleXYZRPY
- toState (const Eigen::Affine3f &trans)
- {
- float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
- getTranslationAndEulerAngles (trans,
- trans_x, trans_y, trans_z,
- trans_roll, trans_pitch, trans_yaw);
- return {trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw};
- }
-
- // a[i]
- inline float operator [] (unsigned int i)
- {
- switch (i)
- {
- case 0: return x;
- case 1: return y;
- case 2: return z;
- case 3: return roll;
- case 4: return pitch;
- case 5: return yaw;
- default: return 0.0;
- }
- }
-
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
-
- inline std::ostream& operator << (std::ostream& os, const ParticleXYZRPY& p)
- {
- os << "(" << p.x << "," << p.y << "," << p.z << ","
- << p.roll << "," << p.pitch << "," << p.yaw << ")";
- return (os);
- }
-
- // a * k
- inline pcl::tracking::ParticleXYZRPY operator * (const ParticleXYZRPY& p, double val)
- {
- pcl::tracking::ParticleXYZRPY newp;
- newp.x = static_cast<float> (p.x * val);
- newp.y = static_cast<float> (p.y * val);
- newp.z = static_cast<float> (p.z * val);
- newp.roll = static_cast<float> (p.roll * val);
- newp.pitch = static_cast<float> (p.pitch * val);
- newp.yaw = static_cast<float> (p.yaw * val);
- return (newp);
- }
-
- // a + b
- inline pcl::tracking::ParticleXYZRPY operator + (const ParticleXYZRPY& a, const ParticleXYZRPY& b)
- {
- pcl::tracking::ParticleXYZRPY newp;
- newp.x = a.x + b.x;
- newp.y = a.y + b.y;
- newp.z = a.z + b.z;
- newp.roll = a.roll + b.roll;
- newp.pitch = a.pitch + b.pitch;
- newp.yaw = a.yaw + b.yaw;
- return (newp);
- }
+// particle definition
+struct EIGEN_ALIGN16 ParticleXYZRPY : public _ParticleXYZRPY {
+ inline ParticleXYZRPY()
+ {
+ x = y = z = roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYZRPY(float _x, float _y, float _z)
+ {
+ x = _x;
+ y = _y;
+ z = _z;
+ roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYZRPY(
+ float _x, float _y, float _z, float _roll, float _pitch, float _yaw)
+ {
+ x = _x;
+ y = _y;
+ z = _z;
+ roll = _roll;
+ pitch = _pitch;
+ yaw = _yaw;
+ data[3] = 1.0f;
+ }
+
+ inline static int
+ stateDimension()
+ {
+ return 6;
+ }
+
+ void
+ sample(const std::vector<double>& mean, const std::vector<double>& cov)
+ {
+ x += static_cast<float>(sampleNormal(mean[0], cov[0]));
+ y += static_cast<float>(sampleNormal(mean[1], cov[1]));
+ z += static_cast<float>(sampleNormal(mean[2], cov[2]));
+
+ // The roll, pitch, yaw space is not Euclidean, so if we sample roll,
+ // pitch, and yaw independently, we bias our sampling in a complicated
+ // way that depends on where in the space we are sampling.
+ //
+ // A solution is to always sample around mean = 0 and project our
+ // samples onto the space of rotations, SO(3). We rely on the fact
+ // that we are sampling with small variance, so we do not bias
+ // ourselves too much with this projection. We then rotate our
+ // samples by the user's requested mean. The benefit of this approach
+ // is that our distribution's properties are consistent over the space
+ // of rotations.
+ Eigen::Matrix3f current_rotation;
+ current_rotation = getTransformation(x, y, z, roll, pitch, yaw).rotation();
+ Eigen::Quaternionf q_current_rotation(current_rotation);
+
+ Eigen::Matrix3f mean_rotation;
+ mean_rotation =
+ getTransformation(mean[0], mean[1], mean[2], mean[3], mean[4], mean[5])
+ .rotation();
+ Eigen::Quaternionf q_mean_rotation(mean_rotation);
+
+ // Scales 1.0 radians of variance in RPY sampling into equivalent units for
+ // quaternion sampling.
+ const float scale_factor = 0.2862;
+
+ float a = sampleNormal(0, scale_factor * cov[3]);
+ float b = sampleNormal(0, scale_factor * cov[4]);
+ float c = sampleNormal(0, scale_factor * cov[5]);
+
+ Eigen::Vector4f vec_sample_mean_0(a, b, c, 1);
+ Eigen::Quaternionf q_sample_mean_0(vec_sample_mean_0);
+ q_sample_mean_0.normalize();
+
+ Eigen::Quaternionf q_sample_user_mean =
+ q_sample_mean_0 * q_mean_rotation * q_current_rotation;
+
+ Eigen::Affine3f affine_R(q_sample_user_mean.toRotationMatrix());
+ pcl::getEulerAngles(affine_R, roll, pitch, yaw);
+ }
+
+ void
+ zero()
+ {
+ x = 0.0;
+ y = 0.0;
+ z = 0.0;
+ roll = 0.0;
+ pitch = 0.0;
+ yaw = 0.0;
+ }
- // a - b
- inline pcl::tracking::ParticleXYZRPY operator - (const ParticleXYZRPY& a, const ParticleXYZRPY& b)
- {
- pcl::tracking::ParticleXYZRPY newp;
- newp.x = a.x - b.x;
- newp.y = a.y - b.y;
- newp.z = a.z - b.z;
- newp.roll = a.roll - b.roll;
- newp.pitch = a.pitch - b.pitch;
- newp.yaw = a.yaw - b.yaw;
- return (newp);
+ inline Eigen::Affine3f
+ toEigenMatrix() const
+ {
+ return getTransformation(x, y, z, roll, pitch, yaw);
+ }
+
+ static ParticleXYZRPY
+ toState(const Eigen::Affine3f& trans)
+ {
+ float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
+ getTranslationAndEulerAngles(
+ trans, trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw);
+ return {trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw};
+ }
+
+ // a[i]
+ inline float
+ operator[](unsigned int i)
+ {
+ switch (i) {
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ case 3:
+ return roll;
+ case 4:
+ return pitch;
+ case 5:
+ return yaw;
+ default:
+ return 0.0;
}
-
}
-}
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
-//########################################################################33
+inline std::ostream&
+operator<<(std::ostream& os, const ParticleXYZRPY& p)
+{
+ os << "(" << p.x << "," << p.y << "," << p.z << "," << p.roll << "," << p.pitch << ","
+ << p.yaw << ")";
+ return (os);
+}
+// a * k
+inline ParticleXYZRPY
+operator*(const ParticleXYZRPY& p, double val)
+{
+ pcl::tracking::ParticleXYZRPY newp;
+ newp.x = static_cast<float>(p.x * val);
+ newp.y = static_cast<float>(p.y * val);
+ newp.z = static_cast<float>(p.z * val);
+ newp.roll = static_cast<float>(p.roll * val);
+ newp.pitch = static_cast<float>(p.pitch * val);
+ newp.yaw = static_cast<float>(p.yaw * val);
+ return (newp);
+}
-namespace pcl
+// a + b
+inline ParticleXYZRPY
+operator+(const ParticleXYZRPY& a, const ParticleXYZRPY& b)
{
- namespace tracking
- {
- struct _ParticleXYZR
- {
- PCL_ADD_POINT4D;
- union
- {
- struct
- {
- float roll;
- float pitch;
- float yaw;
- float weight;
- };
- float data_c[4];
- };
- };
+ pcl::tracking::ParticleXYZRPY newp;
+ newp.x = a.x + b.x;
+ newp.y = a.y + b.y;
+ newp.z = a.z + b.z;
+ newp.roll = a.roll + b.roll;
+ newp.pitch = a.pitch + b.pitch;
+ newp.yaw = a.yaw + b.yaw;
+ return (newp);
+}
+
+// a - b
+inline ParticleXYZRPY
+operator-(const ParticleXYZRPY& a, const ParticleXYZRPY& b)
+{
+ pcl::tracking::ParticleXYZRPY newp;
+ newp.x = a.x - b.x;
+ newp.y = a.y - b.y;
+ newp.z = a.z - b.z;
+ newp.roll = a.roll - b.roll;
+ newp.pitch = a.pitch - b.pitch;
+ newp.yaw = a.yaw - b.yaw;
+ return (newp);
+}
+
+} // namespace tracking
+} // namespace pcl
+
+//########################################################################33
- // particle definition
- struct EIGEN_ALIGN16 ParticleXYZR : public _ParticleXYZR
- {
- inline ParticleXYZR ()
- {
- x = y = z = roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYZR (float _x, float _y, float _z)
- {
- x = _x; y = _y; z = _z;
- roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYZR (float _x, float _y, float _z, float, float _pitch, float)
- {
- x = _x; y = _y; z = _z;
- roll = 0; pitch = _pitch; yaw = 0;
- data[3] = 1.0f;
- }
-
- inline static int
- stateDimension () { return 6; }
-
- void
- sample (const std::vector<double>& mean, const std::vector<double>& cov)
- {
- x += static_cast<float> (sampleNormal (mean[0], cov[0]));
- y += static_cast<float> (sampleNormal (mean[1], cov[1]));
- z += static_cast<float> (sampleNormal (mean[2], cov[2]));
- roll = 0;
- pitch += static_cast<float> (sampleNormal (mean[4], cov[4]));
- yaw = 0;
- }
-
- void
- zero ()
- {
- x = 0.0;
- y = 0.0;
- z = 0.0;
- roll = 0.0;
- pitch = 0.0;
- yaw = 0.0;
- }
-
- inline Eigen::Affine3f
- toEigenMatrix () const
- {
- return getTransformation(x, y, z, roll, pitch, yaw);
- }
-
- static pcl::tracking::ParticleXYZR
- toState (const Eigen::Affine3f &trans)
- {
- float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
- getTranslationAndEulerAngles (trans,
- trans_x, trans_y, trans_z,
- trans_roll, trans_pitch, trans_yaw);
- return (pcl::tracking::ParticleXYZR (trans_x, trans_y, trans_z, 0, trans_pitch, 0));
- }
-
- // a[i]
- inline float operator [] (unsigned int i)
- {
- switch (i)
- {
- case 0: return x;
- case 1: return y;
- case 2: return z;
- case 3: return roll;
- case 4: return pitch;
- case 5: return yaw;
- default: return 0.0;
- }
- }
-
- PCL_MAKE_ALIGNED_OPERATOR_NEW
+namespace pcl {
+namespace tracking {
+struct _ParticleXYZR {
+ PCL_ADD_POINT4D;
+ union {
+ struct {
+ float roll;
+ float pitch;
+ float yaw;
+ float weight;
};
-
- inline std::ostream& operator << (std::ostream& os, const ParticleXYZR& p)
- {
- os << "(" << p.x << "," << p.y << "," << p.z << ","
- << p.roll << "," << p.pitch << "," << p.yaw << ")";
- return (os);
- }
-
- // a * k
- inline pcl::tracking::ParticleXYZR operator * (const ParticleXYZR& p, double val)
- {
- pcl::tracking::ParticleXYZR newp;
- newp.x = static_cast<float> (p.x * val);
- newp.y = static_cast<float> (p.y * val);
- newp.z = static_cast<float> (p.z * val);
- newp.roll = static_cast<float> (p.roll * val);
- newp.pitch = static_cast<float> (p.pitch * val);
- newp.yaw = static_cast<float> (p.yaw * val);
- return (newp);
- }
-
- // a + b
- inline pcl::tracking::ParticleXYZR operator + (const ParticleXYZR& a, const ParticleXYZR& b)
- {
- pcl::tracking::ParticleXYZR newp;
- newp.x = a.x + b.x;
- newp.y = a.y + b.y;
- newp.z = a.z + b.z;
- newp.roll = 0;
- newp.pitch = a.pitch + b.pitch;
- newp.yaw = 0.0;
- return (newp);
- }
+ float data_c[4];
+ };
+};
+
+// particle definition
+struct EIGEN_ALIGN16 ParticleXYZR : public _ParticleXYZR {
+ inline ParticleXYZR()
+ {
+ x = y = z = roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
- // a - b
- inline pcl::tracking::ParticleXYZR operator - (const ParticleXYZR& a, const ParticleXYZR& b)
- {
- pcl::tracking::ParticleXYZR newp;
- newp.x = a.x - b.x;
- newp.y = a.y - b.y;
- newp.z = a.z - b.z;
- newp.roll = 0.0;
- newp.pitch = a.pitch - b.pitch;
- newp.yaw = 0.0;
- return (newp);
+ inline ParticleXYZR(float _x, float _y, float _z)
+ {
+ x = _x;
+ y = _y;
+ z = _z;
+ roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYZR(float _x, float _y, float _z, float, float _pitch, float)
+ {
+ x = _x;
+ y = _y;
+ z = _z;
+ roll = 0;
+ pitch = _pitch;
+ yaw = 0;
+ data[3] = 1.0f;
+ }
+
+ inline static int
+ stateDimension()
+ {
+ return 6;
+ }
+
+ void
+ sample(const std::vector<double>& mean, const std::vector<double>& cov)
+ {
+ x += static_cast<float>(sampleNormal(mean[0], cov[0]));
+ y += static_cast<float>(sampleNormal(mean[1], cov[1]));
+ z += static_cast<float>(sampleNormal(mean[2], cov[2]));
+ roll = 0;
+ pitch += static_cast<float>(sampleNormal(mean[4], cov[4]));
+ yaw = 0;
+ }
+
+ void
+ zero()
+ {
+ x = 0.0;
+ y = 0.0;
+ z = 0.0;
+ roll = 0.0;
+ pitch = 0.0;
+ yaw = 0.0;
+ }
+
+ inline Eigen::Affine3f
+ toEigenMatrix() const
+ {
+ return getTransformation(x, y, z, roll, pitch, yaw);
+ }
+
+ static ParticleXYZR
+ toState(const Eigen::Affine3f& trans)
+ {
+ float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
+ getTranslationAndEulerAngles(
+ trans, trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw);
+ return (pcl::tracking::ParticleXYZR(trans_x, trans_y, trans_z, 0, trans_pitch, 0));
+ }
+
+ // a[i]
+ inline float
+ operator[](unsigned int i)
+ {
+ switch (i) {
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ case 3:
+ return roll;
+ case 4:
+ return pitch;
+ case 5:
+ return yaw;
+ default:
+ return 0.0;
}
-
}
-}
-//########################################################################33
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+inline std::ostream&
+operator<<(std::ostream& os, const ParticleXYZR& p)
+{
+ os << "(" << p.x << "," << p.y << "," << p.z << "," << p.roll << "," << p.pitch << ","
+ << p.yaw << ")";
+ return (os);
+}
+// a * k
+inline ParticleXYZR
+operator*(const ParticleXYZR& p, double val)
+{
+ pcl::tracking::ParticleXYZR newp;
+ newp.x = static_cast<float>(p.x * val);
+ newp.y = static_cast<float>(p.y * val);
+ newp.z = static_cast<float>(p.z * val);
+ newp.roll = static_cast<float>(p.roll * val);
+ newp.pitch = static_cast<float>(p.pitch * val);
+ newp.yaw = static_cast<float>(p.yaw * val);
+ return (newp);
+}
+// a + b
+inline ParticleXYZR
+operator+(const ParticleXYZR& a, const ParticleXYZR& b)
+{
+ pcl::tracking::ParticleXYZR newp;
+ newp.x = a.x + b.x;
+ newp.y = a.y + b.y;
+ newp.z = a.z + b.z;
+ newp.roll = 0;
+ newp.pitch = a.pitch + b.pitch;
+ newp.yaw = 0.0;
+ return (newp);
+}
-namespace pcl
+// a - b
+inline ParticleXYZR
+operator-(const ParticleXYZR& a, const ParticleXYZR& b)
{
- namespace tracking
- {
- struct _ParticleXYRPY
- {
- PCL_ADD_POINT4D;
- union
- {
- struct
- {
- float roll;
- float pitch;
- float yaw;
- float weight;
- };
- float data_c[4];
- };
- };
+ pcl::tracking::ParticleXYZR newp;
+ newp.x = a.x - b.x;
+ newp.y = a.y - b.y;
+ newp.z = a.z - b.z;
+ newp.roll = 0.0;
+ newp.pitch = a.pitch - b.pitch;
+ newp.yaw = 0.0;
+ return (newp);
+}
+
+} // namespace tracking
+} // namespace pcl
- // particle definition
- struct EIGEN_ALIGN16 ParticleXYRPY : public _ParticleXYRPY
- {
- inline ParticleXYRPY ()
- {
- x = y = z = roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYRPY (float _x, float, float _z)
- {
- x = _x; y = 0; z = _z;
- roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYRPY (float _x, float, float _z, float _roll, float _pitch, float _yaw)
- {
- x = _x; y = 0; z = _z;
- roll = _roll; pitch = _pitch; yaw = _yaw;
- data[3] = 1.0f;
- }
-
- inline static int
- stateDimension () { return 6; }
-
- void
- sample (const std::vector<double>& mean, const std::vector<double>& cov)
- {
- x += static_cast<float> (sampleNormal (mean[0], cov[0]));
- y = 0;
- z += static_cast<float> (sampleNormal (mean[2], cov[2]));
- roll += static_cast<float> (sampleNormal (mean[3], cov[3]));
- pitch += static_cast<float> (sampleNormal (mean[4], cov[4]));
- yaw += static_cast<float> (sampleNormal (mean[5], cov[5]));
- }
-
- void
- zero ()
- {
- x = 0.0;
- y = 0.0;
- z = 0.0;
- roll = 0.0;
- pitch = 0.0;
- yaw = 0.0;
- }
-
- inline Eigen::Affine3f
- toEigenMatrix () const
- {
- return getTransformation(x, y, z, roll, pitch, yaw);
- }
-
- static pcl::tracking::ParticleXYRPY
- toState (const Eigen::Affine3f &trans)
- {
- float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
- getTranslationAndEulerAngles (trans,
- trans_x, trans_y, trans_z,
- trans_roll, trans_pitch, trans_yaw);
- return (pcl::tracking::ParticleXYRPY (trans_x, 0, trans_z, trans_roll, trans_pitch, trans_yaw));
- }
-
- // a[i]
- inline float operator [] (unsigned int i)
- {
- switch (i)
- {
- case 0: return x;
- case 1: return y;
- case 2: return z;
- case 3: return roll;
- case 4: return pitch;
- case 5: return yaw;
- default: return 0.0;
- }
- }
-
- PCL_MAKE_ALIGNED_OPERATOR_NEW
+//########################################################################33
+
+namespace pcl {
+namespace tracking {
+struct _ParticleXYRPY {
+ PCL_ADD_POINT4D;
+ union {
+ struct {
+ float roll;
+ float pitch;
+ float yaw;
+ float weight;
};
-
- inline std::ostream& operator << (std::ostream& os, const ParticleXYRPY& p)
- {
- os << "(" << p.x << "," << p.y << "," << p.z << ","
- << p.roll << "," << p.pitch << "," << p.yaw << ")";
- return (os);
- }
-
- // a * k
- inline pcl::tracking::ParticleXYRPY operator * (const ParticleXYRPY& p, double val)
- {
- pcl::tracking::ParticleXYRPY newp;
- newp.x = static_cast<float> (p.x * val);
- newp.y = static_cast<float> (p.y * val);
- newp.z = static_cast<float> (p.z * val);
- newp.roll = static_cast<float> (p.roll * val);
- newp.pitch = static_cast<float> (p.pitch * val);
- newp.yaw = static_cast<float> (p.yaw * val);
- return (newp);
- }
-
- // a + b
- inline pcl::tracking::ParticleXYRPY operator + (const ParticleXYRPY& a, const ParticleXYRPY& b)
- {
- pcl::tracking::ParticleXYRPY newp;
- newp.x = a.x + b.x;
- newp.y = 0;
- newp.z = a.z + b.z;
- newp.roll = a.roll + b.roll;
- newp.pitch = a.pitch + b.pitch;
- newp.yaw = a.yaw + b.yaw;
- return (newp);
- }
+ float data_c[4];
+ };
+};
+
+// particle definition
+struct EIGEN_ALIGN16 ParticleXYRPY : public _ParticleXYRPY {
+ inline ParticleXYRPY()
+ {
+ x = y = z = roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYRPY(float _x, float, float _z)
+ {
+ x = _x;
+ y = 0;
+ z = _z;
+ roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYRPY(float _x, float, float _z, float _roll, float _pitch, float _yaw)
+ {
+ x = _x;
+ y = 0;
+ z = _z;
+ roll = _roll;
+ pitch = _pitch;
+ yaw = _yaw;
+ data[3] = 1.0f;
+ }
+
+ inline static int
+ stateDimension()
+ {
+ return 6;
+ }
+
+ void
+ sample(const std::vector<double>& mean, const std::vector<double>& cov)
+ {
+ x += static_cast<float>(sampleNormal(mean[0], cov[0]));
+ y = 0;
+ z += static_cast<float>(sampleNormal(mean[2], cov[2]));
+ roll += static_cast<float>(sampleNormal(mean[3], cov[3]));
+ pitch += static_cast<float>(sampleNormal(mean[4], cov[4]));
+ yaw += static_cast<float>(sampleNormal(mean[5], cov[5]));
+ }
- // a - b
- inline pcl::tracking::ParticleXYRPY operator - (const ParticleXYRPY& a, const ParticleXYRPY& b)
- {
- pcl::tracking::ParticleXYRPY newp;
- newp.x = a.x - b.x;
- newp.z = a.z - b.z;
- newp.y = 0;
- newp.roll = a.roll - b.roll;
- newp.pitch = a.pitch - b.pitch;
- newp.yaw = a.yaw - b.yaw;
- return (newp);
+ void
+ zero()
+ {
+ x = 0.0;
+ y = 0.0;
+ z = 0.0;
+ roll = 0.0;
+ pitch = 0.0;
+ yaw = 0.0;
+ }
+
+ inline Eigen::Affine3f
+ toEigenMatrix() const
+ {
+ return getTransformation(x, y, z, roll, pitch, yaw);
+ }
+
+ static ParticleXYRPY
+ toState(const Eigen::Affine3f& trans)
+ {
+ float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
+ getTranslationAndEulerAngles(
+ trans, trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw);
+ return (pcl::tracking::ParticleXYRPY(
+ trans_x, 0, trans_z, trans_roll, trans_pitch, trans_yaw));
+ }
+
+ // a[i]
+ inline float
+ operator[](unsigned int i)
+ {
+ switch (i) {
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ case 3:
+ return roll;
+ case 4:
+ return pitch;
+ case 5:
+ return yaw;
+ default:
+ return 0.0;
}
-
}
+
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+inline std::ostream&
+operator<<(std::ostream& os, const ParticleXYRPY& p)
+{
+ os << "(" << p.x << "," << p.y << "," << p.z << "," << p.roll << "," << p.pitch << ","
+ << p.yaw << ")";
+ return (os);
}
-//########################################################################33
+// a * k
+inline ParticleXYRPY
+operator*(const ParticleXYRPY& p, double val)
+{
+ pcl::tracking::ParticleXYRPY newp;
+ newp.x = static_cast<float>(p.x * val);
+ newp.y = static_cast<float>(p.y * val);
+ newp.z = static_cast<float>(p.z * val);
+ newp.roll = static_cast<float>(p.roll * val);
+ newp.pitch = static_cast<float>(p.pitch * val);
+ newp.yaw = static_cast<float>(p.yaw * val);
+ return (newp);
+}
-namespace pcl
+// a + b
+inline ParticleXYRPY
+operator+(const ParticleXYRPY& a, const ParticleXYRPY& b)
{
- namespace tracking
- {
- struct _ParticleXYRP
- {
- PCL_ADD_POINT4D;
- union
- {
- struct
- {
- float roll;
- float pitch;
- float yaw;
- float weight;
- };
- float data_c[4];
- };
- };
+ pcl::tracking::ParticleXYRPY newp;
+ newp.x = a.x + b.x;
+ newp.y = 0;
+ newp.z = a.z + b.z;
+ newp.roll = a.roll + b.roll;
+ newp.pitch = a.pitch + b.pitch;
+ newp.yaw = a.yaw + b.yaw;
+ return (newp);
+}
+
+// a - b
+inline ParticleXYRPY
+operator-(const ParticleXYRPY& a, const ParticleXYRPY& b)
+{
+ pcl::tracking::ParticleXYRPY newp;
+ newp.x = a.x - b.x;
+ newp.z = a.z - b.z;
+ newp.y = 0;
+ newp.roll = a.roll - b.roll;
+ newp.pitch = a.pitch - b.pitch;
+ newp.yaw = a.yaw - b.yaw;
+ return (newp);
+}
- // particle definition
- struct EIGEN_ALIGN16 ParticleXYRP : public _ParticleXYRP
- {
- inline ParticleXYRP ()
- {
- x = y = z = roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYRP (float _x, float, float _z)
- {
- x = _x; y = 0; z = _z;
- roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYRP (float _x, float, float _z, float, float _pitch, float _yaw)
- {
- x = _x; y = 0; z = _z;
- roll = 0; pitch = _pitch; yaw = _yaw;
- data[3] = 1.0f;
- }
-
- inline static int
- stateDimension () { return 6; }
-
- void
- sample (const std::vector<double>& mean, const std::vector<double>& cov)
- {
- x += static_cast<float> (sampleNormal (mean[0], cov[0]));
- y = 0;
- z += static_cast<float> (sampleNormal (mean[2], cov[2]));
- roll = 0;
- pitch += static_cast<float> (sampleNormal (mean[4], cov[4]));
- yaw += static_cast<float> (sampleNormal (mean[5], cov[5]));
- }
-
- void
- zero ()
- {
- x = 0.0;
- y = 0.0;
- z = 0.0;
- roll = 0.0;
- pitch = 0.0;
- yaw = 0.0;
- }
-
- inline Eigen::Affine3f
- toEigenMatrix () const
- {
- return getTransformation(x, y, z, roll, pitch, yaw);
- }
-
- static pcl::tracking::ParticleXYRP
- toState (const Eigen::Affine3f &trans)
- {
- float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
- getTranslationAndEulerAngles (trans,
- trans_x, trans_y, trans_z,
- trans_roll, trans_pitch, trans_yaw);
- return (pcl::tracking::ParticleXYRP (trans_x, 0, trans_z, 0, trans_pitch, trans_yaw));
- }
-
- // a[i]
- inline float operator [] (unsigned int i)
- {
- switch (i)
- {
- case 0: return x;
- case 1: return y;
- case 2: return z;
- case 3: return roll;
- case 4: return pitch;
- case 5: return yaw;
- default: return 0.0;
- }
- }
-
- PCL_MAKE_ALIGNED_OPERATOR_NEW
+} // namespace tracking
+} // namespace pcl
+
+//########################################################################33
+
+namespace pcl {
+namespace tracking {
+struct _ParticleXYRP {
+ PCL_ADD_POINT4D;
+ union {
+ struct {
+ float roll;
+ float pitch;
+ float yaw;
+ float weight;
};
-
- inline std::ostream& operator << (std::ostream& os, const ParticleXYRP& p)
- {
- os << "(" << p.x << "," << p.y << "," << p.z << ","
- << p.roll << "," << p.pitch << "," << p.yaw << ")";
- return (os);
- }
-
- // a * k
- inline pcl::tracking::ParticleXYRP operator * (const ParticleXYRP& p, double val)
- {
- pcl::tracking::ParticleXYRP newp;
- newp.x = static_cast<float> (p.x * val);
- newp.y = static_cast<float> (p.y * val);
- newp.z = static_cast<float> (p.z * val);
- newp.roll = static_cast<float> (p.roll * val);
- newp.pitch = static_cast<float> (p.pitch * val);
- newp.yaw = static_cast<float> (p.yaw * val);
- return (newp);
- }
-
- // a + b
- inline pcl::tracking::ParticleXYRP operator + (const ParticleXYRP& a, const ParticleXYRP& b)
- {
- pcl::tracking::ParticleXYRP newp;
- newp.x = a.x + b.x;
- newp.y = 0;
- newp.z = a.z + b.z;
- newp.roll = 0;
- newp.pitch = a.pitch + b.pitch;
- newp.yaw = a.yaw + b.yaw;
- return (newp);
- }
+ float data_c[4];
+ };
+};
- // a - b
- inline pcl::tracking::ParticleXYRP operator - (const ParticleXYRP& a, const ParticleXYRP& b)
- {
- pcl::tracking::ParticleXYRP newp;
- newp.x = a.x - b.x;
- newp.z = a.z - b.z;
- newp.y = 0;
- newp.roll = 0.0;
- newp.pitch = a.pitch - b.pitch;
- newp.yaw = a.yaw - b.yaw;
- return (newp);
+// particle definition
+struct EIGEN_ALIGN16 ParticleXYRP : public _ParticleXYRP {
+ inline ParticleXYRP()
+ {
+ x = y = z = roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYRP(float _x, float, float _z)
+ {
+ x = _x;
+ y = 0;
+ z = _z;
+ roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYRP(float _x, float, float _z, float, float _pitch, float _yaw)
+ {
+ x = _x;
+ y = 0;
+ z = _z;
+ roll = 0;
+ pitch = _pitch;
+ yaw = _yaw;
+ data[3] = 1.0f;
+ }
+
+ inline static int
+ stateDimension()
+ {
+ return 6;
+ }
+
+ void
+ sample(const std::vector<double>& mean, const std::vector<double>& cov)
+ {
+ x += static_cast<float>(sampleNormal(mean[0], cov[0]));
+ y = 0;
+ z += static_cast<float>(sampleNormal(mean[2], cov[2]));
+ roll = 0;
+ pitch += static_cast<float>(sampleNormal(mean[4], cov[4]));
+ yaw += static_cast<float>(sampleNormal(mean[5], cov[5]));
+ }
+
+ void
+ zero()
+ {
+ x = 0.0;
+ y = 0.0;
+ z = 0.0;
+ roll = 0.0;
+ pitch = 0.0;
+ yaw = 0.0;
+ }
+
+ inline Eigen::Affine3f
+ toEigenMatrix() const
+ {
+ return getTransformation(x, y, z, roll, pitch, yaw);
+ }
+
+ static ParticleXYRP
+ toState(const Eigen::Affine3f& trans)
+ {
+ float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
+ getTranslationAndEulerAngles(
+ trans, trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw);
+ return (
+ pcl::tracking::ParticleXYRP(trans_x, 0, trans_z, 0, trans_pitch, trans_yaw));
+ }
+
+ // a[i]
+ inline float
+ operator[](unsigned int i)
+ {
+ switch (i) {
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ case 3:
+ return roll;
+ case 4:
+ return pitch;
+ case 5:
+ return yaw;
+ default:
+ return 0.0;
}
-
}
+
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+inline std::ostream&
+operator<<(std::ostream& os, const ParticleXYRP& p)
+{
+ os << "(" << p.x << "," << p.y << "," << p.z << "," << p.roll << "," << p.pitch << ","
+ << p.yaw << ")";
+ return (os);
}
-//########################################################################33
+// a * k
+inline ParticleXYRP
+operator*(const ParticleXYRP& p, double val)
+{
+ pcl::tracking::ParticleXYRP newp;
+ newp.x = static_cast<float>(p.x * val);
+ newp.y = static_cast<float>(p.y * val);
+ newp.z = static_cast<float>(p.z * val);
+ newp.roll = static_cast<float>(p.roll * val);
+ newp.pitch = static_cast<float>(p.pitch * val);
+ newp.yaw = static_cast<float>(p.yaw * val);
+ return (newp);
+}
-namespace pcl
+// a + b
+inline ParticleXYRP
+operator+(const ParticleXYRP& a, const ParticleXYRP& b)
{
- namespace tracking
- {
- struct _ParticleXYR
- {
- PCL_ADD_POINT4D;
- union
- {
- struct
- {
- float roll;
- float pitch;
- float yaw;
- float weight;
- };
- float data_c[4];
- };
- };
+ pcl::tracking::ParticleXYRP newp;
+ newp.x = a.x + b.x;
+ newp.y = 0;
+ newp.z = a.z + b.z;
+ newp.roll = 0;
+ newp.pitch = a.pitch + b.pitch;
+ newp.yaw = a.yaw + b.yaw;
+ return (newp);
+}
+
+// a - b
+inline ParticleXYRP
+operator-(const ParticleXYRP& a, const ParticleXYRP& b)
+{
+ pcl::tracking::ParticleXYRP newp;
+ newp.x = a.x - b.x;
+ newp.z = a.z - b.z;
+ newp.y = 0;
+ newp.roll = 0.0;
+ newp.pitch = a.pitch - b.pitch;
+ newp.yaw = a.yaw - b.yaw;
+ return (newp);
+}
- // particle definition
- struct EIGEN_ALIGN16 ParticleXYR : public _ParticleXYR
- {
- inline ParticleXYR ()
- {
- x = y = z = roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYR (float _x, float, float _z)
- {
- x = _x; y = 0; z = _z;
- roll = pitch = yaw = 0.0;
- data[3] = 1.0f;
- }
-
- inline ParticleXYR (float _x, float, float _z, float, float _pitch, float)
- {
- x = _x; y = 0; z = _z;
- roll = 0; pitch = _pitch; yaw = 0;
- data[3] = 1.0f;
- }
-
- inline static int
- stateDimension () { return 6; }
-
- void
- sample (const std::vector<double>& mean, const std::vector<double>& cov)
- {
- x += static_cast<float> (sampleNormal (mean[0], cov[0]));
- y = 0;
- z += static_cast<float> (sampleNormal (mean[2], cov[2]));
- roll = 0;
- pitch += static_cast<float> (sampleNormal (mean[4], cov[4]));
- yaw = 0;
- }
-
- void
- zero ()
- {
- x = 0.0;
- y = 0.0;
- z = 0.0;
- roll = 0.0;
- pitch = 0.0;
- yaw = 0.0;
- }
-
- inline Eigen::Affine3f
- toEigenMatrix () const
- {
- return getTransformation(x, y, z, roll, pitch, yaw);
- }
-
- static pcl::tracking::ParticleXYR
- toState (const Eigen::Affine3f &trans)
- {
- float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
- getTranslationAndEulerAngles (trans,
- trans_x, trans_y, trans_z,
- trans_roll, trans_pitch, trans_yaw);
- return (pcl::tracking::ParticleXYR (trans_x, 0, trans_z, 0, trans_pitch, 0));
- }
-
- // a[i]
- inline float operator [] (unsigned int i)
- {
- switch (i)
- {
- case 0: return x;
- case 1: return y;
- case 2: return z;
- case 3: return roll;
- case 4: return pitch;
- case 5: return yaw;
- default: return 0.0;
- }
- }
-
- PCL_MAKE_ALIGNED_OPERATOR_NEW
+} // namespace tracking
+} // namespace pcl
+
+//########################################################################33
+
+namespace pcl {
+namespace tracking {
+struct _ParticleXYR {
+ PCL_ADD_POINT4D;
+ union {
+ struct {
+ float roll;
+ float pitch;
+ float yaw;
+ float weight;
};
-
- inline std::ostream& operator << (std::ostream& os, const ParticleXYR& p)
- {
- os << "(" << p.x << "," << p.y << "," << p.z << ","
- << p.roll << "," << p.pitch << "," << p.yaw << ")";
- return (os);
- }
-
- // a * k
- inline pcl::tracking::ParticleXYR operator * (const ParticleXYR& p, double val)
- {
- pcl::tracking::ParticleXYR newp;
- newp.x = static_cast<float> (p.x * val);
- newp.y = static_cast<float> (p.y * val);
- newp.z = static_cast<float> (p.z * val);
- newp.roll = static_cast<float> (p.roll * val);
- newp.pitch = static_cast<float> (p.pitch * val);
- newp.yaw = static_cast<float> (p.yaw * val);
- return (newp);
- }
-
- // a + b
- inline pcl::tracking::ParticleXYR operator + (const ParticleXYR& a, const ParticleXYR& b)
- {
- pcl::tracking::ParticleXYR newp;
- newp.x = a.x + b.x;
- newp.y = 0;
- newp.z = a.z + b.z;
- newp.roll = 0;
- newp.pitch = a.pitch + b.pitch;
- newp.yaw = 0.0;
- return (newp);
- }
+ float data_c[4];
+ };
+};
+
+// particle definition
+struct EIGEN_ALIGN16 ParticleXYR : public _ParticleXYR {
+ inline ParticleXYR()
+ {
+ x = y = z = roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYR(float _x, float, float _z)
+ {
+ x = _x;
+ y = 0;
+ z = _z;
+ roll = pitch = yaw = 0.0;
+ data[3] = 1.0f;
+ }
+
+ inline ParticleXYR(float _x, float, float _z, float, float _pitch, float)
+ {
+ x = _x;
+ y = 0;
+ z = _z;
+ roll = 0;
+ pitch = _pitch;
+ yaw = 0;
+ data[3] = 1.0f;
+ }
+
+ inline static int
+ stateDimension()
+ {
+ return 6;
+ }
- // a - b
- inline pcl::tracking::ParticleXYR operator - (const ParticleXYR& a, const ParticleXYR& b)
- {
- pcl::tracking::ParticleXYR newp;
- newp.x = a.x - b.x;
- newp.z = a.z - b.z;
- newp.y = 0;
- newp.roll = 0.0;
- newp.pitch = a.pitch - b.pitch;
- newp.yaw = 0.0;
- return (newp);
+ void
+ sample(const std::vector<double>& mean, const std::vector<double>& cov)
+ {
+ x += static_cast<float>(sampleNormal(mean[0], cov[0]));
+ y = 0;
+ z += static_cast<float>(sampleNormal(mean[2], cov[2]));
+ roll = 0;
+ pitch += static_cast<float>(sampleNormal(mean[4], cov[4]));
+ yaw = 0;
+ }
+
+ void
+ zero()
+ {
+ x = 0.0;
+ y = 0.0;
+ z = 0.0;
+ roll = 0.0;
+ pitch = 0.0;
+ yaw = 0.0;
+ }
+
+ inline Eigen::Affine3f
+ toEigenMatrix() const
+ {
+ return getTransformation(x, y, z, roll, pitch, yaw);
+ }
+
+ static ParticleXYR
+ toState(const Eigen::Affine3f& trans)
+ {
+ float trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw;
+ getTranslationAndEulerAngles(
+ trans, trans_x, trans_y, trans_z, trans_roll, trans_pitch, trans_yaw);
+ return (pcl::tracking::ParticleXYR(trans_x, 0, trans_z, 0, trans_pitch, 0));
+ }
+
+ // a[i]
+ inline float
+ operator[](unsigned int i)
+ {
+ switch (i) {
+ case 0:
+ return x;
+ case 1:
+ return y;
+ case 2:
+ return z;
+ case 3:
+ return roll;
+ case 4:
+ return pitch;
+ case 5:
+ return yaw;
+ default:
+ return 0.0;
}
-
}
+
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+inline std::ostream&
+operator<<(std::ostream& os, const ParticleXYR& p)
+{
+ os << "(" << p.x << "," << p.y << "," << p.z << "," << p.roll << "," << p.pitch << ","
+ << p.yaw << ")";
+ return (os);
}
-#define PCL_STATE_POINT_TYPES \
- (pcl::tracking::ParticleXYR) \
- (pcl::tracking::ParticleXYZRPY) \
- (pcl::tracking::ParticleXYZR) \
- (pcl::tracking::ParticleXYRPY) \
- (pcl::tracking::ParticleXYRP)
+// a * k
+inline ParticleXYR
+operator*(const ParticleXYR& p, double val)
+{
+ pcl::tracking::ParticleXYR newp;
+ newp.x = static_cast<float>(p.x * val);
+ newp.y = static_cast<float>(p.y * val);
+ newp.z = static_cast<float>(p.z * val);
+ newp.roll = static_cast<float>(p.roll * val);
+ newp.pitch = static_cast<float>(p.pitch * val);
+ newp.yaw = static_cast<float>(p.yaw * val);
+ return (newp);
+}
+
+// a + b
+inline ParticleXYR
+operator+(const ParticleXYR& a, const ParticleXYR& b)
+{
+ pcl::tracking::ParticleXYR newp;
+ newp.x = a.x + b.x;
+ newp.y = 0;
+ newp.z = a.z + b.z;
+ newp.roll = 0;
+ newp.pitch = a.pitch + b.pitch;
+ newp.yaw = 0.0;
+ return (newp);
+}
+
+// a - b
+inline ParticleXYR
+operator-(const ParticleXYR& a, const ParticleXYR& b)
+{
+ pcl::tracking::ParticleXYR newp;
+ newp.x = a.x - b.x;
+ newp.z = a.z - b.z;
+ newp.y = 0;
+ newp.roll = 0.0;
+ newp.pitch = a.pitch - b.pitch;
+ newp.yaw = 0.0;
+ return (newp);
+}
+
+} // namespace tracking
+} // namespace pcl
+
+#define PCL_STATE_POINT_TYPES \
+ (pcl::tracking::ParticleXYR)(pcl::tracking::ParticleXYZRPY)( \
+ pcl::tracking::ParticleXYZR)(pcl::tracking::ParticleXYRPY)( \
+ pcl::tracking::ParticleXYRP)
-#endif //
+#endif //
#pragma once
-#include <pcl/tracking/tracking.h>
-#include <pcl/tracking/particle_filter.h>
#include <pcl/tracking/coherence.h>
+#include <pcl/tracking/particle_filter.h>
+#include <pcl/tracking/tracking.h>
+
+namespace pcl {
+namespace tracking {
+
+/** \brief @b KLDAdaptiveParticleFilterTracker tracks the PointCloud which is given by
+ * setReferenceCloud within the measured PointCloud using particle filter method. The
+ * number of the particles changes adaptively based on KLD sampling [D. Fox, NIPS-01],
+ * [D.Fox, IJRR03].
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT, typename StateT>
+class KLDAdaptiveParticleFilterTracker
+: public ParticleFilterTracker<PointInT, StateT> {
+public:
+ using Tracker<PointInT, StateT>::tracker_name_;
+ using Tracker<PointInT, StateT>::search_;
+ using Tracker<PointInT, StateT>::input_;
+ using Tracker<PointInT, StateT>::getClassName;
+ using ParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
+ using ParticleFilterTracker<PointInT, StateT>::coherence_;
+ using ParticleFilterTracker<PointInT, StateT>::initParticles;
+ using ParticleFilterTracker<PointInT, StateT>::weight;
+ using ParticleFilterTracker<PointInT, StateT>::update;
+ using ParticleFilterTracker<PointInT, StateT>::iteration_num_;
+ using ParticleFilterTracker<PointInT, StateT>::particle_num_;
+ using ParticleFilterTracker<PointInT, StateT>::particles_;
+ using ParticleFilterTracker<PointInT, StateT>::use_normal_;
+ using ParticleFilterTracker<PointInT, StateT>::use_change_detector_;
+ using ParticleFilterTracker<PointInT, StateT>::change_detector_resolution_;
+ using ParticleFilterTracker<PointInT, StateT>::change_detector_;
+ using ParticleFilterTracker<PointInT, StateT>::motion_;
+ using ParticleFilterTracker<PointInT, StateT>::motion_ratio_;
+ using ParticleFilterTracker<PointInT, StateT>::step_noise_covariance_;
+ using ParticleFilterTracker<PointInT, StateT>::representative_state_;
+ using ParticleFilterTracker<PointInT, StateT>::sampleWithReplacement;
+
+ using BaseClass = Tracker<PointInT, StateT>;
-namespace pcl
-{
- namespace tracking
+ using Ptr = shared_ptr<KLDAdaptiveParticleFilterTracker<PointInT, StateT>>;
+ using ConstPtr = shared_ptr<const KLDAdaptiveParticleFilterTracker<PointInT, StateT>>;
+
+ using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
+ using PointCloudInPtr = typename PointCloudIn::Ptr;
+ using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
+
+ using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
+ using PointCloudStatePtr = typename PointCloudState::Ptr;
+ using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
+
+ using Coherence = PointCoherence<PointInT>;
+ using CoherencePtr = typename Coherence::Ptr;
+ using CoherenceConstPtr = typename Coherence::ConstPtr;
+
+ using CloudCoherence = PointCloudCoherence<PointInT>;
+ using CloudCoherencePtr = typename CloudCoherence::Ptr;
+ using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
+
+ /** \brief Empty constructor. */
+ KLDAdaptiveParticleFilterTracker()
+ : ParticleFilterTracker<PointInT, StateT>()
+ , maximum_particle_number_()
+ , epsilon_(0)
+ , delta_(0.99)
+ , bin_size_()
{
+ tracker_name_ = "KLDAdaptiveParticleFilterTracker";
+ }
- /** \brief @b KLDAdaptiveParticleFilterTracker tracks the PointCloud which is given by
- setReferenceCloud within the measured PointCloud using particle filter method.
- The number of the particles changes adaptively based on KLD sampling [D. Fox, NIPS-01], [D.Fox, IJRR03].
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT, typename StateT>
- class KLDAdaptiveParticleFilterTracker: public ParticleFilterTracker<PointInT, StateT>
- {
- public:
- using Tracker<PointInT, StateT>::tracker_name_;
- using Tracker<PointInT, StateT>::search_;
- using Tracker<PointInT, StateT>::input_;
- using Tracker<PointInT, StateT>::getClassName;
- using ParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
- using ParticleFilterTracker<PointInT, StateT>::coherence_;
- using ParticleFilterTracker<PointInT, StateT>::initParticles;
- using ParticleFilterTracker<PointInT, StateT>::weight;
- using ParticleFilterTracker<PointInT, StateT>::update;
- using ParticleFilterTracker<PointInT, StateT>::iteration_num_;
- using ParticleFilterTracker<PointInT, StateT>::particle_num_;
- using ParticleFilterTracker<PointInT, StateT>::particles_;
- using ParticleFilterTracker<PointInT, StateT>::use_normal_;
- using ParticleFilterTracker<PointInT, StateT>::use_change_detector_;
- using ParticleFilterTracker<PointInT, StateT>::change_detector_resolution_;
- using ParticleFilterTracker<PointInT, StateT>::change_detector_;
- using ParticleFilterTracker<PointInT, StateT>::motion_;
- using ParticleFilterTracker<PointInT, StateT>::motion_ratio_;
- using ParticleFilterTracker<PointInT, StateT>::step_noise_covariance_;
- using ParticleFilterTracker<PointInT, StateT>::representative_state_;
- using ParticleFilterTracker<PointInT, StateT>::sampleWithReplacement;
-
- using BaseClass = Tracker<PointInT, StateT>;
-
- using Ptr = shared_ptr<KLDAdaptiveParticleFilterTracker<PointInT, StateT>>;
- using ConstPtr = shared_ptr<const KLDAdaptiveParticleFilterTracker<PointInT, StateT>>;
-
- using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
- using PointCloudInPtr = typename PointCloudIn::Ptr;
- using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
-
- using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
- using PointCloudStatePtr = typename PointCloudState::Ptr;
- using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
-
- using Coherence = PointCoherence<PointInT>;
- using CoherencePtr = typename Coherence::Ptr;
- using CoherenceConstPtr = typename Coherence::ConstPtr;
-
- using CloudCoherence = PointCloudCoherence<PointInT>;
- using CloudCoherencePtr = typename CloudCoherence::Ptr;
- using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
-
- /** \brief Empty constructor. */
- KLDAdaptiveParticleFilterTracker ()
- : ParticleFilterTracker<PointInT, StateT> ()
- , maximum_particle_number_ ()
- , epsilon_ (0)
- , delta_ (0.99)
- , bin_size_ ()
- {
- tracker_name_ = "KLDAdaptiveParticleFilterTracker";
- }
-
- /** \brief set the bin size.
- * \param bin_size the size of a bin
- */
- inline void setBinSize (const StateT& bin_size) { bin_size_ = bin_size; }
-
- /** \brief get the bin size. */
- inline StateT getBinSize () const { return (bin_size_); }
-
- /** \brief set the maximum number of the particles.
- * \param nr the maximum number of the particles.
- */
- inline void setMaximumParticleNum (unsigned int nr) { maximum_particle_number_ = nr; }
-
- /** \brief get the maximum number of the particles.*/
- inline unsigned int getMaximumParticleNum () const { return (maximum_particle_number_); }
-
- /** \brief set epsilon to be used to calc K-L boundary.
- * \param eps epsilon
- */
- inline void setEpsilon (double eps) { epsilon_ = eps; }
-
- /** \brief get epsilon to be used to calc K-L boundary. */
- inline double getEpsilon () const { return (epsilon_); }
-
- /** \brief set delta to be used in chi-squared distribution.
- * \param delta delta of chi-squared distribution.
- */
- inline void setDelta (double delta) { delta_ = delta; }
-
- /** \brief get delta to be used in chi-squared distribution.*/
- inline double getDelta () const { return (delta_); }
-
- protected:
-
- /** \brief return true if the two bins are equal.
- * \param a index of the bin
- * \param b index of the bin
- */
- virtual bool
- equalBin (const std::vector<int> &a, const std::vector<int> &b)
- {
- int dimension = StateT::stateDimension ();
- for (int i = 0; i < dimension; i++)
- if (a[i] != b[i])
- return (false);
- return (true);
- }
-
- /** \brief return upper quantile of standard normal distribution.
- * \param[in] u ratio of quantile.
- */
- double
- normalQuantile (double u)
- {
- const double a[9] = { 1.24818987e-4, -1.075204047e-3, 5.198775019e-3,
- -0.019198292004, 0.059054035642,-0.151968751364,
- 0.319152932694,-0.5319230073, 0.797884560593};
- const double b[15] = { -4.5255659e-5, 1.5252929e-4, -1.9538132e-5,
- -6.76904986e-4, 1.390604284e-3,-7.9462082e-4,
- -2.034254874e-3, 6.549791214e-3,-0.010557625006,
- 0.011630447319,-9.279453341e-3, 5.353579108e-3,
- -2.141268741e-3, 5.35310549e-4, 0.999936657524};
- double w, y, z;
-
- if (u == 0.)
- return (0.5);
- y = u / 2.0;
- if (y < -3.)
- return (0.0);
- if (y > 3.)
- return (1.0);
- if (y < 0.0)
- y = - y;
- if (y < 1.0)
- {
- w = y * y;
- z = a[0];
- for (int i = 1; i < 9; i++)
- z = z * w + a[i];
- z *= (y * 2.0);
- }
- else
- {
- y -= 2.0;
- z = b[0];
- for (int i = 1; i < 15; i++)
- z = z * y + b[i];
- }
-
- if (u < 0.0)
- return ((1. - z) / 2.0);
- return ((1. + z) / 2.0);
- }
-
- /** \brief calculate K-L boundary. K-L boundary follows 1/2e*chi(k-1, 1-d)^2.
- * \param[in] k the number of bins and the first parameter of chi distribution.
- */
- virtual
- double calcKLBound (int k)
- {
- double z = normalQuantile (delta_);
- double chi = 1.0 - 2.0 / (9.0 * (k - 1)) + sqrt (2.0 / (9.0 * (k - 1))) * z;
- return ((k - 1.0) / (2.0 * epsilon_) * chi * chi * chi);
- }
-
- /** \brief insert a bin into the set of the bins. if that bin is already registered,
- return false. if not, return true.
- * \param new_bin a bin to be inserted.
- * \param bins a set of the bins
- */
- virtual bool
- insertIntoBins (std::vector<int> &&new_bin, std::vector<std::vector<int> > &bins);
-
- /** \brief This method should get called before starting the actual computation. */
- bool
- initCompute () override;
-
- /** \brief resampling phase of particle filter method.
- sampling the particles according to the weights calculated in weight method.
- in particular, "sample with replacement" is archieved by walker's alias method.
- */
- void
- resample () override;
-
- /** \brief the maximum number of the particles. */
- unsigned int maximum_particle_number_;
-
- /** \brief error between K-L distance and MLE*/
- double epsilon_;
-
- /** \brief probability of distance between K-L distance and MLE is less than epsilon_*/
- double delta_;
-
- /** \brief the size of a bin.*/
- StateT bin_size_;
- };
+ /** \brief set the bin size.
+ * \param bin_size the size of a bin
+ */
+ inline void
+ setBinSize(const StateT& bin_size)
+ {
+ bin_size_ = bin_size;
}
-}
+
+ /** \brief get the bin size. */
+ inline StateT
+ getBinSize() const
+ {
+ return (bin_size_);
+ }
+
+ /** \brief set the maximum number of the particles.
+ * \param nr the maximum number of the particles.
+ */
+ inline void
+ setMaximumParticleNum(unsigned int nr)
+ {
+ maximum_particle_number_ = nr;
+ }
+
+ /** \brief get the maximum number of the particles.*/
+ inline unsigned int
+ getMaximumParticleNum() const
+ {
+ return (maximum_particle_number_);
+ }
+
+ /** \brief set epsilon to be used to calc K-L boundary.
+ * \param eps epsilon
+ */
+ inline void
+ setEpsilon(double eps)
+ {
+ epsilon_ = eps;
+ }
+
+ /** \brief get epsilon to be used to calc K-L boundary. */
+ inline double
+ getEpsilon() const
+ {
+ return (epsilon_);
+ }
+
+ /** \brief set delta to be used in chi-squared distribution.
+ * \param delta delta of chi-squared distribution.
+ */
+ inline void
+ setDelta(double delta)
+ {
+ delta_ = delta;
+ }
+
+ /** \brief get delta to be used in chi-squared distribution.*/
+ inline double
+ getDelta() const
+ {
+ return (delta_);
+ }
+
+protected:
+ /** \brief return true if the two bins are equal.
+ * \param a index of the bin
+ * \param b index of the bin
+ */
+ virtual bool
+ equalBin(const std::vector<int>& a, const std::vector<int>& b)
+ {
+ int dimension = StateT::stateDimension();
+ for (int i = 0; i < dimension; i++)
+ if (a[i] != b[i])
+ return (false);
+ return (true);
+ }
+
+ /** \brief return upper quantile of standard normal distribution.
+ * \param[in] u ratio of quantile.
+ */
+ double
+ normalQuantile(double u)
+ {
+ const double a[9] = {1.24818987e-4,
+ -1.075204047e-3,
+ 5.198775019e-3,
+ -0.019198292004,
+ 0.059054035642,
+ -0.151968751364,
+ 0.319152932694,
+ -0.5319230073,
+ 0.797884560593};
+ const double b[15] = {-4.5255659e-5,
+ 1.5252929e-4,
+ -1.9538132e-5,
+ -6.76904986e-4,
+ 1.390604284e-3,
+ -7.9462082e-4,
+ -2.034254874e-3,
+ 6.549791214e-3,
+ -0.010557625006,
+ 0.011630447319,
+ -9.279453341e-3,
+ 5.353579108e-3,
+ -2.141268741e-3,
+ 5.35310549e-4,
+ 0.999936657524};
+ double w, y, z;
+
+ if (u == 0.)
+ return (0.5);
+ y = u / 2.0;
+ if (y < -3.)
+ return (0.0);
+ if (y > 3.)
+ return (1.0);
+ if (y < 0.0)
+ y = -y;
+ if (y < 1.0) {
+ w = y * y;
+ z = a[0];
+ for (int i = 1; i < 9; i++)
+ z = z * w + a[i];
+ z *= (y * 2.0);
+ }
+ else {
+ y -= 2.0;
+ z = b[0];
+ for (int i = 1; i < 15; i++)
+ z = z * y + b[i];
+ }
+
+ if (u < 0.0)
+ return ((1. - z) / 2.0);
+ return ((1. + z) / 2.0);
+ }
+
+ /** \brief calculate K-L boundary. K-L boundary follows 1/2e*chi(k-1, 1-d)^2.
+ * \param[in] k the number of bins and the first parameter of chi distribution.
+ */
+ virtual double
+ calcKLBound(int k)
+ {
+ double z = normalQuantile(delta_);
+ double chi = 1.0 - 2.0 / (9.0 * (k - 1)) + sqrt(2.0 / (9.0 * (k - 1))) * z;
+ return ((k - 1.0) / (2.0 * epsilon_) * chi * chi * chi);
+ }
+
+ /** \brief insert a bin into the set of the bins. if that bin is already registered,
+ * return false. if not, return true.
+ * \param new_bin a bin to be inserted.
+ * \param bins a set of the bins
+ */
+ virtual bool
+ insertIntoBins(std::vector<int>&& new_bin, std::vector<std::vector<int>>& bins);
+
+ /** \brief This method should get called before starting the actual
+ * computation. */
+ bool
+ initCompute() override;
+
+ /** \brief resampling phase of particle filter method. sampling the particles
+ * according to the weights calculated in weight method. in particular, "sample with
+ * replacement" is archieved by walker's alias method.
+ */
+ void
+ resample() override;
+
+ /** \brief the maximum number of the particles. */
+ unsigned int maximum_particle_number_;
+
+ /** \brief error between K-L distance and MLE*/
+ double epsilon_;
+
+ /** \brief probability of distance between K-L distance and MLE is less than
+ * epsilon_*/
+ double delta_;
+
+ /** \brief the size of a bin.*/
+ StateT bin_size_;
+};
+} // namespace tracking
+} // namespace pcl
#ifdef PCL_NO_PRECOMPILE
#include <pcl/tracking/impl/kld_adaptive_particle_filter.hpp>
#pragma once
-#include <pcl/tracking/tracking.h>
-#include <pcl/tracking/kld_adaptive_particle_filter.h>
#include <pcl/tracking/coherence.h>
+#include <pcl/tracking/kld_adaptive_particle_filter.h>
+#include <pcl/tracking/tracking.h>
-namespace pcl
-{
- namespace tracking
- {
- /** \brief @b KLDAdaptiveParticleFilterOMPTracker tracks the PointCloud which is given by
- setReferenceCloud within the measured PointCloud using particle filter method.
- The number of the particles changes adaptively based on KLD sampling [D. Fox, NIPS-01], [D.Fox, IJRR03].
- and the computation of the weights of the particles is parallelized using OpenMP.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT, typename StateT>
- class KLDAdaptiveParticleFilterOMPTracker: public KLDAdaptiveParticleFilterTracker<PointInT, StateT>
- {
- public:
- using Tracker<PointInT, StateT>::tracker_name_;
- using Tracker<PointInT, StateT>::search_;
- using Tracker<PointInT, StateT>::input_;
- using Tracker<PointInT, StateT>::indices_;
- using Tracker<PointInT, StateT>::getClassName;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::particles_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_detector_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_counter_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_detector_interval_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::use_change_detector_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::pass_x_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::pass_y_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::pass_z_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::alpha_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::changed_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::coherence_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::use_normal_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::particle_num_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_detector_filter_;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
- //using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::calcLikelihood;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::normalizeWeight;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::normalizeParticleWeight;
- using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
-
- using BaseClass = Tracker<PointInT, StateT>;
+namespace pcl {
+namespace tracking {
+/** \brief @b KLDAdaptiveParticleFilterOMPTracker tracks the PointCloud which is given
+ * by setReferenceCloud within the measured PointCloud using particle filter method. The
+ * number of the particles changes adaptively based on KLD sampling [D. Fox, NIPS-01],
+ * [D.Fox, IJRR03]. and the computation of the weights of the particles is parallelized
+ * using OpenMP.
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT, typename StateT>
+class KLDAdaptiveParticleFilterOMPTracker
+: public KLDAdaptiveParticleFilterTracker<PointInT, StateT> {
+public:
+ using Tracker<PointInT, StateT>::tracker_name_;
+ using Tracker<PointInT, StateT>::search_;
+ using Tracker<PointInT, StateT>::input_;
+ using Tracker<PointInT, StateT>::indices_;
+ using Tracker<PointInT, StateT>::getClassName;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::particles_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_detector_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_counter_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_detector_interval_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::use_change_detector_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::pass_x_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::pass_y_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::pass_z_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::alpha_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::changed_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::coherence_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::use_normal_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::particle_num_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::change_detector_filter_;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
+ // using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::calcLikelihood;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::normalizeWeight;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::normalizeParticleWeight;
+ using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
- using Ptr = shared_ptr<KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>>;
- using ConstPtr = shared_ptr<const KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>>;
+ using BaseClass = Tracker<PointInT, StateT>;
- using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
- using PointCloudInPtr = typename PointCloudIn::Ptr;
- using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
+ using Ptr = shared_ptr<KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>>;
+ using ConstPtr =
+ shared_ptr<const KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>>;
- using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
- using PointCloudStatePtr = typename PointCloudState::Ptr;
- using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
+ using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
+ using PointCloudInPtr = typename PointCloudIn::Ptr;
+ using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
- using Coherence = PointCoherence<PointInT>;
- using CoherencePtr = typename Coherence::Ptr;
- using CoherenceConstPtr = typename Coherence::ConstPtr;
+ using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
+ using PointCloudStatePtr = typename PointCloudState::Ptr;
+ using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
- using CloudCoherence = PointCloudCoherence<PointInT>;
- using CloudCoherencePtr = typename CloudCoherence::Ptr;
- using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
+ using Coherence = PointCoherence<PointInT>;
+ using CoherencePtr = typename Coherence::Ptr;
+ using CoherenceConstPtr = typename Coherence::ConstPtr;
- /** \brief Initialize the scheduler and set the number of threads to use.
- * \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
- */
- KLDAdaptiveParticleFilterOMPTracker (unsigned int nr_threads = 0)
- : KLDAdaptiveParticleFilterTracker<PointInT, StateT> ()
- {
- tracker_name_ = "KLDAdaptiveParticleFilterOMPTracker";
+ using CloudCoherence = PointCloudCoherence<PointInT>;
+ using CloudCoherencePtr = typename CloudCoherence::Ptr;
+ using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
- setNumberOfThreads(nr_threads);
- }
+ /** \brief Initialize the scheduler and set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value
+ * back to automatic)
+ */
+ KLDAdaptiveParticleFilterOMPTracker(unsigned int nr_threads = 0)
+ : KLDAdaptiveParticleFilterTracker<PointInT, StateT>()
+ {
+ tracker_name_ = "KLDAdaptiveParticleFilterOMPTracker";
- /** \brief Initialize the scheduler and set the number of threads to use.
- * \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
- */
- void
- setNumberOfThreads (unsigned int nr_threads = 0);
+ setNumberOfThreads(nr_threads);
+ }
- protected:
- /** \brief The number of threads the scheduler should use. */
- unsigned int threads_;
+ /** \brief Initialize the scheduler and set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value back to
+ * automatic)
+ */
+ void
+ setNumberOfThreads(unsigned int nr_threads = 0);
- /** \brief weighting phase of particle filter method.
- calculate the likelihood of all of the particles and set the weights.
- */
- void weight () override;
+protected:
+ /** \brief The number of threads the scheduler should use. */
+ unsigned int threads_;
- };
- }
-}
+ /** \brief weighting phase of particle filter method. calculate the likelihood of all
+ * of the particles and set the weights.
+ */
+ void
+ weight() override;
+};
+} // namespace tracking
+} // namespace pcl
//#include <pcl/tracking/impl/particle_filter_omp.hpp>
#ifdef PCL_NO_PRECOMPILE
#pragma once
#include <pcl/search/search.h>
-
#include <pcl/tracking/coherence.h>
-namespace pcl
-{
- namespace tracking
+namespace pcl {
+namespace tracking {
+/** \brief @b NearestPairPointCloudCoherence computes coherence between two pointclouds
+ * using the nearest point pairs.
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT>
+class NearestPairPointCloudCoherence : public PointCloudCoherence<PointInT> {
+public:
+ using PointCloudCoherence<PointInT>::getClassName;
+ using PointCloudCoherence<PointInT>::coherence_name_;
+ using PointCloudCoherence<PointInT>::target_input_;
+
+ using PointCoherencePtr = typename PointCloudCoherence<PointInT>::PointCoherencePtr;
+ using PointCloudInConstPtr =
+ typename PointCloudCoherence<PointInT>::PointCloudInConstPtr;
+ using BaseClass = PointCloudCoherence<PointInT>;
+
+ using Ptr = shared_ptr<NearestPairPointCloudCoherence<PointInT>>;
+ using ConstPtr = shared_ptr<const NearestPairPointCloudCoherence<PointInT>>;
+ using SearchPtr = typename pcl::search::Search<PointInT>::Ptr;
+ using SearchConstPtr = typename pcl::search::Search<PointInT>::ConstPtr;
+
+ /** \brief empty constructor */
+ NearestPairPointCloudCoherence()
+ : new_target_(false), search_(), maximum_distance_(std::numeric_limits<double>::max())
+ {
+ coherence_name_ = "NearestPairPointCloudCoherence";
+ }
+
+ /** \brief Provide a pointer to a dataset to add additional information
+ * to estimate the features for every point in the input dataset. This
+ * is optional, if this is not set, it will only use the data in the
+ * input cloud to estimate the features. This is useful when you only
+ * need to compute the features for a downsampled cloud.
+ * \param search a pointer to a PointCloud message
+ */
+ inline void
+ setSearchMethod(const SearchPtr& search)
+ {
+ search_ = search;
+ }
+
+ /** \brief Get a pointer to the point cloud dataset. */
+ inline SearchPtr
+ getSearchMethod()
+ {
+ return (search_);
+ }
+
+ /** \brief add a PointCoherence to the PointCloudCoherence.
+ * \param[in] cloud coherence a pointer to PointCoherence.
+ */
+ inline void
+ setTargetCloud(const PointCloudInConstPtr& cloud) override
+ {
+ new_target_ = true;
+ PointCloudCoherence<PointInT>::setTargetCloud(cloud);
+ }
+
+ /** \brief set maximum distance to be taken into account.
+ * \param[in] val maximum distance.
+ */
+ inline void
+ setMaximumDistance(double val)
{
- /** \brief @b NearestPairPointCloudCoherence computes coherence between two pointclouds using the
- nearest point pairs.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT>
- class NearestPairPointCloudCoherence: public PointCloudCoherence<PointInT>
- {
- public:
- using PointCloudCoherence<PointInT>::getClassName;
- using PointCloudCoherence<PointInT>::coherence_name_;
- using PointCloudCoherence<PointInT>::target_input_;
-
- using PointCoherencePtr = typename PointCloudCoherence<PointInT>::PointCoherencePtr;
- using PointCloudInConstPtr = typename PointCloudCoherence<PointInT>::PointCloudInConstPtr;
- using BaseClass = PointCloudCoherence<PointInT>;
-
- using Ptr = shared_ptr<NearestPairPointCloudCoherence<PointInT> >;
- using ConstPtr = shared_ptr<const NearestPairPointCloudCoherence<PointInT> >;
- using SearchPtr = typename pcl::search::Search<PointInT>::Ptr;
- using SearchConstPtr = typename pcl::search::Search<PointInT>::ConstPtr;
-
- /** \brief empty constructor */
- NearestPairPointCloudCoherence ()
- : new_target_ (false)
- , search_ ()
- , maximum_distance_ (std::numeric_limits<double>::max ())
- {
- coherence_name_ = "NearestPairPointCloudCoherence";
- }
-
- /** \brief Provide a pointer to a dataset to add additional information
- * to estimate the features for every point in the input dataset. This
- * is optional, if this is not set, it will only use the data in the
- * input cloud to estimate the features. This is useful when you only
- * need to compute the features for a downsampled cloud.
- * \param search a pointer to a PointCloud message
- */
- inline void
- setSearchMethod (const SearchPtr &search) { search_ = search; }
-
- /** \brief Get a pointer to the point cloud dataset. */
- inline SearchPtr
- getSearchMethod () { return (search_); }
-
- /** \brief add a PointCoherence to the PointCloudCoherence.
- * \param[in] cloud coherence a pointer to PointCoherence.
- */
- inline void
- setTargetCloud (const PointCloudInConstPtr &cloud) override
- {
- new_target_ = true;
- PointCloudCoherence<PointInT>::setTargetCloud (cloud);
- }
-
- /** \brief set maximum distance to be taken into account.
- * \param[in] val maximum distance.
- */
- inline void setMaximumDistance (double val) { maximum_distance_ = val; }
-
- protected:
- using PointCloudCoherence<PointInT>::point_coherences_;
-
- /** \brief This method should get called before starting the actual computation. */
- bool initCompute () override;
-
- /** \brief A flag which is true if target_input_ is updated */
- bool new_target_;
-
- /** \brief A pointer to the spatial search object. */
- SearchPtr search_;
-
- /** \brief max of distance for points to be taken into account*/
- double maximum_distance_;
-
- /** \brief compute the nearest pairs and compute coherence using point_coherences_ */
- void
- computeCoherence (const PointCloudInConstPtr &cloud, const IndicesConstPtr &indices, float &w_j) override;
-
- };
+ maximum_distance_ = val;
}
-}
+
+protected:
+ using PointCloudCoherence<PointInT>::point_coherences_;
+
+ /** \brief This method should get called before starting the actual
+ * computation. */
+ bool
+ initCompute() override;
+
+ /** \brief A flag which is true if target_input_ is updated */
+ bool new_target_;
+
+ /** \brief A pointer to the spatial search object. */
+ SearchPtr search_;
+
+ /** \brief max of distance for points to be taken into account*/
+ double maximum_distance_;
+
+ /** \brief compute the nearest pairs and compute coherence using
+ * point_coherences_ */
+ void
+ computeCoherence(const PointCloudInConstPtr& cloud,
+ const IndicesConstPtr& indices,
+ float& w_j) override;
+};
+} // namespace tracking
+} // namespace pcl
// #include <pcl/tracking/impl/nearest_pair_point_cloud_coherence.hpp>
#ifdef PCL_NO_PRECOMPILE
#pragma once
#include <pcl/tracking/coherence.h>
-namespace pcl
-{
- namespace tracking
- {
- /** \brief @b NormalCoherence computes coherence between two points from the angle
- between their normals. the coherence is calculated by 1 / (1 + weight * theta^2 ).
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT>
- class NormalCoherence: public PointCoherence<PointInT>
- {
- public:
-
- /** \brief initialize the weight to 1.0. */
- NormalCoherence ()
- : PointCoherence<PointInT> ()
- , weight_ (1.0)
- {}
-
- /** \brief set the weight of coherence
- * \param weight the weight of coherence
- */
- inline void setWeight (double weight) { weight_ = weight; }
-
- /** \brief get the weight of coherence */
- inline double getWeight () { return weight_; }
-
- protected:
- /** \brief return the normal coherence between the two points.
- * \param source instance of source point.
- * \param target instance of target point.
- */
- double computeCoherence (PointInT &source, PointInT &target) override;
+namespace pcl {
+namespace tracking {
+/** \brief @b NormalCoherence computes coherence between two points from the angle
+ * between their normals. the coherence is calculated by 1 / (1 + weight * theta^2 ).
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT>
+class NormalCoherence : public PointCoherence<PointInT> {
+public:
+ /** \brief initialize the weight to 1.0. */
+ NormalCoherence() : PointCoherence<PointInT>(), weight_(1.0) {}
+
+ /** \brief set the weight of coherence
+ * \param weight the weight of coherence
+ */
+ inline void
+ setWeight(double weight)
+ {
+ weight_ = weight;
+ }
- /** \brief the weight of coherence */
- double weight_;
-
- };
+ /** \brief get the weight of coherence */
+ inline double
+ getWeight()
+ {
+ return weight_;
}
-}
+
+protected:
+ /** \brief return the normal coherence between the two points.
+ * \param source instance of source point.
+ * \param target instance of target point.
+ */
+ double
+ computeCoherence(PointInT& source, PointInT& target) override;
+
+ /** \brief the weight of coherence */
+ double weight_;
+};
+} // namespace tracking
+} // namespace pcl
// #include <pcl/tracking/impl/normal_coherence.hpp>
#ifdef PCL_NO_PRECOMPILE
#pragma once
-#include <pcl/memory.h>
#include <pcl/filters/passthrough.h>
#include <pcl/octree/octree_pointcloud_changedetector.h>
#include <pcl/tracking/coherence.h>
#include <pcl/tracking/tracker.h>
#include <pcl/tracking/tracking.h>
+#include <pcl/memory.h>
#include <Eigen/Dense>
+namespace pcl {
+namespace tracking {
+/** \brief @b ParticleFilterTracker tracks the PointCloud which is given by
+ * setReferenceCloud within the measured PointCloud using particle filter method.
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT, typename StateT>
+class ParticleFilterTracker : public Tracker<PointInT, StateT> {
+protected:
+ using Tracker<PointInT, StateT>::deinitCompute;
+
+public:
+ using Tracker<PointInT, StateT>::tracker_name_;
+ using Tracker<PointInT, StateT>::search_;
+ using Tracker<PointInT, StateT>::input_;
+ using Tracker<PointInT, StateT>::indices_;
+ using Tracker<PointInT, StateT>::getClassName;
+
+ using Ptr = shared_ptr<ParticleFilterTracker<PointInT, StateT>>;
+ using ConstPtr = shared_ptr<const ParticleFilterTracker<PointInT, StateT>>;
+
+ using BaseClass = Tracker<PointInT, StateT>;
+
+ using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
+ using PointCloudInPtr = typename PointCloudIn::Ptr;
+ using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
+
+ using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
+ using PointCloudStatePtr = typename PointCloudState::Ptr;
+ using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
+
+ using Coherence = PointCoherence<PointInT>;
+ using CoherencePtr = typename Coherence::Ptr;
+ using CoherenceConstPtr = typename Coherence::ConstPtr;
+
+ using CloudCoherence = PointCloudCoherence<PointInT>;
+ using CloudCoherencePtr = typename CloudCoherence::Ptr;
+ using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
+
+ /** \brief Empty constructor. */
+ ParticleFilterTracker()
+ : iteration_num_(1)
+ , particle_num_()
+ , min_indices_(1)
+ , ref_()
+ , particles_()
+ , coherence_()
+ , resample_likelihood_thr_(0.0)
+ , occlusion_angle_thr_(M_PI / 2.0)
+ , alpha_(15.0)
+ , representative_state_()
+ , use_normal_(false)
+ , motion_()
+ , motion_ratio_(0.25)
+ , pass_x_()
+ , pass_y_()
+ , pass_z_()
+ , transed_reference_vector_()
+ , change_detector_()
+ , changed_(false)
+ , change_counter_(0)
+ , change_detector_filter_(10)
+ , change_detector_interval_(10)
+ , change_detector_resolution_(0.01)
+ , use_change_detector_(false)
+ {
+ tracker_name_ = "ParticleFilterTracker";
+ pass_x_.setFilterFieldName("x");
+ pass_y_.setFilterFieldName("y");
+ pass_z_.setFilterFieldName("z");
+ pass_x_.setKeepOrganized(false);
+ pass_y_.setKeepOrganized(false);
+ pass_z_.setKeepOrganized(false);
+ }
+
+ /** \brief Set the number of iteration.
+ * \param[in] iteration_num the number of iteration.
+ */
+ inline void
+ setIterationNum(const int iteration_num)
+ {
+ iteration_num_ = iteration_num;
+ }
+
+ /** \brief Get the number of iteration. */
+ inline int
+ getIterationNum() const
+ {
+ return iteration_num_;
+ }
+
+ /** \brief Set the number of the particles.
+ * \param[in] particle_num the number of the particles.
+ */
+ inline void
+ setParticleNum(const int particle_num)
+ {
+ particle_num_ = particle_num;
+ }
+
+ /** \brief Get the number of the particles. */
+ inline int
+ getParticleNum() const
+ {
+ return particle_num_;
+ }
+
+ /** \brief Set a pointer to a reference dataset to be tracked.
+ * \param[in] ref a pointer to a PointCloud message
+ */
+ inline void
+ setReferenceCloud(const PointCloudInConstPtr& ref)
+ {
+ ref_ = ref;
+ }
+
+ /** \brief Get a pointer to a reference dataset to be tracked. */
+ inline PointCloudInConstPtr const
+ getReferenceCloud()
+ {
+ return ref_;
+ }
+
+ /** \brief Set the PointCloudCoherence as likelihood.
+ * \param[in] coherence a pointer to PointCloudCoherence.
+ */
+ inline void
+ setCloudCoherence(const CloudCoherencePtr& coherence)
+ {
+ coherence_ = coherence;
+ }
+
+ /** \brief Get the PointCloudCoherence to compute likelihood. */
+ inline CloudCoherencePtr
+ getCloudCoherence() const
+ {
+ return coherence_;
+ }
+
+ /** \brief Set the covariance of step noise.
+ * \param[in] step_noise_covariance the diagonal elements of covariance matrix
+ * of step noise.
+ */
+ inline void
+ setStepNoiseCovariance(const std::vector<double>& step_noise_covariance)
+ {
+ step_noise_covariance_ = step_noise_covariance;
+ }
+
+ /** \brief Set the covariance of the initial noise. It will be used when
+ * initializing the particles.
+ * \param[in] initial_noise_covariance the diagonal elements of covariance matrix of
+ * initial noise.
+ */
+ inline void
+ setInitialNoiseCovariance(const std::vector<double>& initial_noise_covariance)
+ {
+ initial_noise_covariance_ = initial_noise_covariance;
+ }
+
+ /** \brief Set the mean of the initial noise. It will be used when
+ * initializing the particles.
+ * \param[in] initial_noise_mean the mean values of initial noise.
+ */
+ inline void
+ setInitialNoiseMean(const std::vector<double>& initial_noise_mean)
+ {
+ initial_noise_mean_ = initial_noise_mean;
+ }
+
+ /** \brief Set the threshold to re-initialize the particles.
+ * \param[in] resample_likelihood_thr threshold to re-initialize.
+ */
+ inline void
+ setResampleLikelihoodThr(const double resample_likelihood_thr)
+ {
+ resample_likelihood_thr_ = resample_likelihood_thr;
+ }
+
+ /** \brief Set the threshold of angle to be considered occlusion (default:
+ * pi/2). ParticleFilterTracker does not take the occluded points into account
+ * according to the angle between the normal and the position.
+ * \param[in] occlusion_angle_thr threshold of angle to be considered occlusion.
+ */
+ inline void
+ setOcclusionAngleThe(const double occlusion_angle_thr)
+ {
+ occlusion_angle_thr_ = occlusion_angle_thr;
+ }
+
+ /** \brief Set the minimum number of indices (default: 1).
+ * ParticleFilterTracker does not take into account the hypothesis
+ * whose the number of points is smaller than the minimum indices.
+ * \param[in] min_indices the minimum number of indices.
+ */
+ inline void
+ setMinIndices(const int min_indices)
+ {
+ min_indices_ = min_indices;
+ }
+
+ /** \brief Set the transformation from the world coordinates to the frame of
+ * the particles.
+ * \param[in] trans Affine transformation from the worldcoordinates to the frame of
+ * the particles.
+ */
+ inline void
+ setTrans(const Eigen::Affine3f& trans)
+ {
+ trans_ = trans;
+ }
+
+ /** \brief Get the transformation from the world coordinates to the frame of
+ * the particles. */
+ inline Eigen::Affine3f
+ getTrans() const
+ {
+ return trans_;
+ }
+
+ /** \brief Get an instance of the result of tracking.
+ * This function returns the particle that represents the transform between
+ * the reference point cloud at the beginning and the best guess about its
+ * location in the most recent frame.
+ */
+ inline StateT
+ getResult() const override
+ {
+ return representative_state_;
+ }
+
+ /** \brief Convert a state to affine transformation from the world coordinates
+ * frame.
+ * \param[in] particle an instance of StateT.
+ */
+ Eigen::Affine3f
+ toEigenMatrix(const StateT& particle)
+ {
+ return particle.toEigenMatrix();
+ }
+
+ /** \brief Get a pointer to a pointcloud of the particles. */
+ inline PointCloudStatePtr
+ getParticles() const
+ {
+ return particles_;
+ }
+
+ /** \brief Normalize the weight of a particle using \f$ std::exp(1- alpha ( w
+ * - w_{min}) / (w_max - w_min)) \f$
+ * \note This method is described in [P.Azad
+ * et. al, ICRA11].
+ * \param[in] w the weight to be normalized
+ * \param[in] w_min the minimum weight of the particles
+ * \param[in] w_max the maximum weight of the particles
+ */
+ inline double
+ normalizeParticleWeight(double w, double w_min, double w_max)
+ {
+ return std::exp(1.0 - alpha_ * (w - w_min) / (w_max - w_min));
+ }
+
+ /** \brief Set the value of alpha.
+ * \param[in] alpha the value of alpha
+ */
+ inline void
+ setAlpha(double alpha)
+ {
+ alpha_ = alpha;
+ }
+
+ /** \brief Get the value of alpha. */
+ inline double
+ getAlpha()
+ {
+ return alpha_;
+ }
+
+ /** \brief Set the value of use_normal_.
+ * \param[in] use_normal the value of use_normal_.
+ */
+ inline void
+ setUseNormal(bool use_normal)
+ {
+ use_normal_ = use_normal;
+ }
+
+ /** \brief Get the value of use_normal_. */
+ inline bool
+ getUseNormal()
+ {
+ return use_normal_;
+ }
+
+ /** \brief Set the value of use_change_detector_.
+ * \param[in] use_change_detector the value of use_change_detector_.
+ */
+ inline void
+ setUseChangeDetector(bool use_change_detector)
+ {
+ use_change_detector_ = use_change_detector;
+ }
+
+ /** \brief Get the value of use_change_detector_. */
+ inline bool
+ getUseChangeDetector()
+ {
+ return use_change_detector_;
+ }
+
+ /** \brief Set the motion ratio
+ * \param[in] motion_ratio the ratio of hypothesis to use motion model.
+ */
+ inline void
+ setMotionRatio(double motion_ratio)
+ {
+ motion_ratio_ = motion_ratio;
+ }
+
+ /** \brief Get the motion ratio. */
+ inline double
+ getMotionRatio()
+ {
+ return motion_ratio_;
+ }
+
+ /** \brief Set the number of interval frames to run change detection.
+ * \param[in] change_detector_interval the number of interval frames.
+ */
+ inline void
+ setIntervalOfChangeDetection(unsigned int change_detector_interval)
+ {
+ change_detector_interval_ = change_detector_interval;
+ }
+
+ /** \brief Get the number of interval frames to run change detection. */
+ inline unsigned int
+ getIntervalOfChangeDetection()
+ {
+ return change_detector_interval_;
+ }
+
+ /** \brief Set the minimum amount of points required within leaf node to
+ * become serialized in change detection
+ * \param[in] change_detector_filter the minimum amount of points required within leaf
+ * node
+ */
+ inline void
+ setMinPointsOfChangeDetection(unsigned int change_detector_filter)
+ {
+ change_detector_filter_ = change_detector_filter;
+ }
+
+ /** \brief Set the resolution of change detection.
+ * \param[in] resolution resolution of change detection octree
+ */
+ inline void
+ setResolutionOfChangeDetection(double resolution)
+ {
+ change_detector_resolution_ = resolution;
+ }
+
+ /** \brief Get the resolution of change detection. */
+ inline double
+ getResolutionOfChangeDetection()
+ {
+ return change_detector_resolution_;
+ }
+
+ /** \brief Get the minimum amount of points required within leaf node to
+ * become serialized in change detection. */
+ inline unsigned int
+ getMinPointsOfChangeDetection()
+ {
+ return change_detector_filter_;
+ }
+
+ /** \brief Get the adjustment ratio. */
+ inline double
+ getFitRatio() const
+ {
+ return fit_ratio_;
+ }
+
+ /** \brief Reset the particles to restart tracking*/
+ virtual inline void
+ resetTracking()
+ {
+ if (particles_)
+ particles_->points.clear();
+ }
+
+protected:
+ /** \brief Compute the parameters for the bounding box of hypothesis
+ * pointclouds.
+ * \param[out] x_min the minimum value of x axis.
+ * \param[out] x_max the maximum value of x axis.
+ * \param[out] y_min the minimum value of y axis.
+ * \param[out] y_max the maximum value of y axis.
+ * \param[out] z_min the minimum value of z axis.
+ * \param[out] z_max the maximum value of z axis.
+ */
+ void
+ calcBoundingBox(double& x_min,
+ double& x_max,
+ double& y_min,
+ double& y_max,
+ double& z_min,
+ double& z_max);
+
+ /** \brief Crop the pointcloud by the bounding box calculated from hypothesis
+ * and the reference pointcloud.
+ * \param[in] cloud a pointer to pointcloud to be cropped.
+ * \param[out] output a pointer to be assigned the cropped pointcloud.
+ */
+ void
+ cropInputPointCloud(const PointCloudInConstPtr& cloud, PointCloudIn& output);
+
+ /** \brief Compute a reference pointcloud transformed to the pose that hypothesis
+ represents.
+ * \param[in] hypothesis a particle which represents a hypothesis.
+ * \param[in] indices the indices which should be taken into account.
+ * \param[out] cloud the resultant point cloud model dataset which is transformed to
+ hypothesis.
+ **/
+ void
+ computeTransformedPointCloud(const StateT& hypothesis,
+ std::vector<int>& indices,
+ PointCloudIn& cloud);
+
+ /** \brief Compute a reference pointcloud transformed to the pose that hypothesis
+ * represents and calculate indices taking occlusion into account.
+ * \param[in] hypothesis a particle which represents a hypothesis.
+ * \param[in] indices the indices which should be taken into account.
+ * \param[out] cloud the resultant point cloud model dataset which is transformed to
+ hypothesis.
+ **/
+ void
+ computeTransformedPointCloudWithNormal(const StateT& hypothesis,
+ std::vector<int>& indices,
+ PointCloudIn& cloud);
+
+ /** \brief Compute a reference pointcloud transformed to the pose that hypothesis
+ * represents and calculate indices without taking occlusion into account.
+ * \param[in] hypothesis a particle which represents a hypothesis.
+ * \param[out] cloud the resultant point cloud model dataset which is transformed to
+ *hypothesis.
+ **/
+ void
+ computeTransformedPointCloudWithoutNormal(const StateT& hypothesis,
+ PointCloudIn& cloud);
+
+ /** \brief This method should get called before starting the actua computation. */
+ bool
+ initCompute() override;
+
+ /** \brief Weighting phase of particle filter method. Calculate the likelihood
+ * of all of the particles and set the weights. */
+ virtual void
+ weight();
+
+ /** \brief Resampling phase of particle filter method. Sampling the particles
+ * according to the weights calculated in weight method. In particular,
+ * "sample with replacement" is archieved by walker's alias method.
+ */
+ virtual void
+ resample();
+
+ /** \brief Calculate the weighted mean of the particles and set it as the result. */
+ virtual void
+ update();
+
+ /** \brief Normalize the weights of all the particels. */
+ virtual void
+ normalizeWeight();
+
+ /** \brief Initialize the particles. initial_noise_covariance_ and
+ * initial_noise_mean_ are used for Gaussian sampling. */
+ void
+ initParticles(bool reset);
+
+ /** \brief Track the pointcloud using particle filter method. */
+ void
+ computeTracking() override;
+
+ /** \brief Implementation of "sample with replacement" using Walker's alias method.
+ * about Walker's alias method, you can check the paper below: article{355749}, author
+ * = {Walker, Alastair J.}, title = {An Efficient Method for Generating Discrete
+ * Random Variables with General Distributions},
+ * journal = {ACM Trans. Math. Softw.},
+ * volume = {3},
+ * number = {3},
+ * year = {1977},
+ * issn = {0098-3500},
+ * pages = {253--256},
+ * doi = {http://doi.acm.org/10.1145/355744.355749},
+ * publisher = {ACM},
+ * address = {New York, NY, USA},
+ * }
+ * \param a an alias table, which generated by genAliasTable.
+ * \param q a table of weight, which generated by genAliasTable.
+ */
+ int
+ sampleWithReplacement(const std::vector<int>& a, const std::vector<double>& q);
+
+ /** \brief Generate the tables for walker's alias method. */
+ void
+ genAliasTable(std::vector<int>& a,
+ std::vector<double>& q,
+ const PointCloudStateConstPtr& particles);
+
+ /** \brief Resampling the particle with replacement. */
+ void
+ resampleWithReplacement();
+
+ /** \brief Resampling the particle in deterministic way. */
+ void
+ resampleDeterministic();
+
+ /** \brief Run change detection and return true if there is a change.
+ * \param[in] input a pointer to the input pointcloud.
+ */
+ bool
+ testChangeDetection(const PointCloudInConstPtr& input);
+
+ /** \brief The number of iteration of particlefilter. */
+ int iteration_num_;
+
+ /** \brief The number of the particles. */
+ int particle_num_;
+
+ /** \brief The minimum number of points which the hypothesis should have. */
+ int min_indices_;
+
+ /** \brief Adjustment of the particle filter. */
+ double fit_ratio_;
+
+ /** \brief A pointer to reference point cloud. */
+ PointCloudInConstPtr ref_;
+
+ /** \brief A pointer to the particles */
+ PointCloudStatePtr particles_;
+
+ /** \brief A pointer to PointCloudCoherence. */
+ CloudCoherencePtr coherence_;
+
+ /** \brief The diagonal elements of covariance matrix of the step noise. the
+ * covariance matrix is used at every resample method.
+ */
+ std::vector<double> step_noise_covariance_;
+
+ /** \brief The diagonal elements of covariance matrix of the initial noise.
+ * the covariance matrix is used when initialize the particles.
+ */
+ std::vector<double> initial_noise_covariance_;
+
+ /** \brief The mean values of initial noise. */
+ std::vector<double> initial_noise_mean_;
+
+ /** \brief The threshold for the particles to be re-initialized. */
+ double resample_likelihood_thr_;
+
+ /** \brief The threshold for the points to be considered as occluded. */
+ double occlusion_angle_thr_;
+
+ /** \brief The weight to be used in normalization of the weights of the
+ * particles. */
+ double alpha_;
+
+ /** \brief The result of tracking. */
+ StateT representative_state_;
+
+ /** \brief An affine transformation from the world coordinates frame to the
+ * origin of the particles. */
+ Eigen::Affine3f trans_;
+
+ /** \brief A flag to use normal or not. defaults to false. */
+ bool use_normal_;
+
+ /** \brief Difference between the result in t and t-1. */
+ StateT motion_;
+
+ /** \brief Ratio of hypothesis to use motion model. */
+ double motion_ratio_;
+
+ /** \brief Pass through filter to crop the pointclouds within the hypothesis
+ * bounding box. */
+ pcl::PassThrough<PointInT> pass_x_;
+ /** \brief Pass through filter to crop the pointclouds within the hypothesis
+ * bounding box. */
+ pcl::PassThrough<PointInT> pass_y_;
+ /** \brief Pass through filter to crop the pointclouds within the hypothesis
+ * bounding box. */
+ pcl::PassThrough<PointInT> pass_z_;
+
+ /** \brief A list of the pointers to pointclouds. */
+ std::vector<PointCloudInPtr> transed_reference_vector_;
+
+ /** \brief Change detector used as a trigger to track. */
+ typename pcl::octree::OctreePointCloudChangeDetector<PointInT>::Ptr change_detector_;
+
+ /** \brief A flag to be true when change of pointclouds is detected. */
+ bool changed_;
+
+ /** \brief A counter to skip change detection. */
+ unsigned int change_counter_;
+
+ /** \brief Minimum points in a leaf when calling change detector. defaults
+ * to 10. */
+ unsigned int change_detector_filter_;
+
+ /** \brief The number of interval frame to run change detection. defaults
+ * to 10. */
+ unsigned int change_detector_interval_;
+
+ /** \brief Resolution of change detector. defaults to 0.01. */
+ double change_detector_resolution_;
-namespace pcl
-{
- namespace tracking
- {
- /** \brief @b ParticleFilterTracker tracks the PointCloud which is given by
- setReferenceCloud within the measured PointCloud using particle filter method.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT, typename StateT>
- class ParticleFilterTracker: public Tracker<PointInT, StateT>
- {
- protected:
- using Tracker<PointInT, StateT>::deinitCompute;
-
- public:
- using Tracker<PointInT, StateT>::tracker_name_;
- using Tracker<PointInT, StateT>::search_;
- using Tracker<PointInT, StateT>::input_;
- using Tracker<PointInT, StateT>::indices_;
- using Tracker<PointInT, StateT>::getClassName;
-
- using Ptr = shared_ptr<ParticleFilterTracker<PointInT, StateT>>;
- using ConstPtr = shared_ptr<const ParticleFilterTracker<PointInT, StateT>>;
-
- using BaseClass = Tracker<PointInT, StateT>;
-
- using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
- using PointCloudInPtr = typename PointCloudIn::Ptr;
- using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
-
- using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
- using PointCloudStatePtr = typename PointCloudState::Ptr;
- using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
-
- using Coherence = PointCoherence<PointInT>;
- using CoherencePtr = typename Coherence::Ptr;
- using CoherenceConstPtr = typename Coherence::ConstPtr;
-
- using CloudCoherence = PointCloudCoherence<PointInT>;
- using CloudCoherencePtr = typename CloudCoherence::Ptr;
- using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
-
- /** \brief Empty constructor. */
- ParticleFilterTracker ()
- : iteration_num_ (1)
- , particle_num_ ()
- , min_indices_ (1)
- , ref_ ()
- , particles_ ()
- , coherence_ ()
- , resample_likelihood_thr_ (0.0)
- , occlusion_angle_thr_ (M_PI / 2.0)
- , alpha_ (15.0)
- , representative_state_ ()
- , use_normal_ (false)
- , motion_ ()
- , motion_ratio_ (0.25)
- , pass_x_ ()
- , pass_y_ ()
- , pass_z_ ()
- , transed_reference_vector_ ()
- , change_detector_ ()
- , changed_ (false)
- , change_counter_ (0)
- , change_detector_filter_ (10)
- , change_detector_interval_ (10)
- , change_detector_resolution_ (0.01)
- , use_change_detector_ (false)
- {
- tracker_name_ = "ParticleFilterTracker";
- pass_x_.setFilterFieldName ("x");
- pass_y_.setFilterFieldName ("y");
- pass_z_.setFilterFieldName ("z");
- pass_x_.setKeepOrganized (false);
- pass_y_.setKeepOrganized (false);
- pass_z_.setKeepOrganized (false);
- }
-
- /** \brief Set the number of iteration.
- * \param[in] iteration_num the number of iteration.
- */
- inline void
- setIterationNum (const int iteration_num) { iteration_num_ = iteration_num; }
-
- /** \brief Get the number of iteration. */
- inline int
- getIterationNum () const { return iteration_num_; }
-
- /** \brief Set the number of the particles.
- * \param[in] particle_num the number of the particles.
- */
- inline void
- setParticleNum (const int particle_num) { particle_num_ = particle_num; }
-
- /** \brief Get the number of the particles. */
- inline int
- getParticleNum () const { return particle_num_; }
-
- /** \brief Set a pointer to a reference dataset to be tracked.
- * \param[in] ref a pointer to a PointCloud message
- */
- inline void
- setReferenceCloud (const PointCloudInConstPtr &ref) { ref_ = ref; }
-
- /** \brief Get a pointer to a reference dataset to be tracked. */
- inline PointCloudInConstPtr const
- getReferenceCloud () { return ref_; }
-
- /** \brief Set the PointCloudCoherence as likelihood.
- * \param[in] coherence a pointer to PointCloudCoherence.
- */
- inline void
- setCloudCoherence (const CloudCoherencePtr &coherence) { coherence_ = coherence; }
-
- /** \brief Get the PointCloudCoherence to compute likelihood. */
- inline CloudCoherencePtr
- getCloudCoherence () const { return coherence_; }
-
-
- /** \brief Set the covariance of step noise.
- * \param[in] step_noise_covariance the diagonal elements of covariance matrix of step noise.
- */
- inline void
- setStepNoiseCovariance (const std::vector<double> &step_noise_covariance)
- {
- step_noise_covariance_ = step_noise_covariance;
- }
-
- /** \brief Set the covariance of the initial noise. It will be used when initializing the particles.
- * \param[in] initial_noise_covariance the diagonal elements of covariance matrix of initial noise.
- */
- inline void
- setInitialNoiseCovariance (const std::vector<double> &initial_noise_covariance)
- {
- initial_noise_covariance_ = initial_noise_covariance;
- }
-
- /** \brief Set the mean of the initial noise. It will be used when initializing the particles.
- * \param[in] initial_noise_mean the mean values of initial noise.
- */
- inline void
- setInitialNoiseMean (const std::vector<double> &initial_noise_mean)
- {
- initial_noise_mean_ = initial_noise_mean;
- }
-
- /** \brief Set the threshold to re-initialize the particles.
- * \param[in] resample_likelihood_thr threshold to re-initialize.
- */
- inline void
- setResampleLikelihoodThr (const double resample_likelihood_thr)
- {
- resample_likelihood_thr_ = resample_likelihood_thr;
- }
-
- /** \brief Set the threshold of angle to be considered occlusion (default: pi/2).
- * ParticleFilterTracker does not take the occluded points into account according to the angle
- * between the normal and the position.
- * \param[in] occlusion_angle_thr threshold of angle to be considered occlusion.
- */
- inline void
- setOcclusionAngleThe (const double occlusion_angle_thr)
- {
- occlusion_angle_thr_ = occlusion_angle_thr;
- }
-
- /** \brief Set the minimum number of indices (default: 1).
- * ParticleFilterTracker does not take into account the hypothesis
- * whose the number of points is smaller than the minimum indices.
- * \param[in] min_indices the minimum number of indices.
- */
- inline void
- setMinIndices (const int min_indices) { min_indices_ = min_indices; }
-
- /** \brief Set the transformation from the world coordinates to the frame of the particles.
- * \param[in] trans Affine transformation from the worldcoordinates to the frame of the particles.
- */
- inline void setTrans (const Eigen::Affine3f &trans) { trans_ = trans; }
-
- /** \brief Get the transformation from the world coordinates to the frame of the particles. */
- inline Eigen::Affine3f getTrans () const { return trans_; }
-
- /** \brief Get an instance of the result of tracking.
- * This function returns the particle that represents the transform between the reference point cloud at the
- * beginning and the best guess about its location in the most recent frame.
- */
- inline StateT getResult () const override { return representative_state_; }
-
- /** \brief Convert a state to affine transformation from the world coordinates frame.
- * \param[in] particle an instance of StateT.
- */
- Eigen::Affine3f toEigenMatrix (const StateT& particle)
- {
- return particle.toEigenMatrix ();
- }
-
- /** \brief Get a pointer to a pointcloud of the particles. */
- inline PointCloudStatePtr getParticles () const { return particles_; }
-
- /** \brief Normalize the weight of a particle using \f$ std::exp(1- alpha ( w - w_{min}) / (w_max - w_min)) \f$
- * \note This method is described in [P.Azad et. al, ICRA11].
- * \param[in] w the weight to be normalized
- * \param[in] w_min the minimum weight of the particles
- * \param[in] w_max the maximum weight of the particles
- */
- inline double normalizeParticleWeight (double w, double w_min, double w_max)
- {
- return std::exp (1.0 - alpha_ * (w - w_min) / (w_max - w_min));
- }
-
- /** \brief Set the value of alpha.
- * \param[in] alpha the value of alpha
- */
- inline void setAlpha (double alpha) { alpha_ = alpha; }
-
- /** \brief Get the value of alpha. */
- inline double getAlpha () { return alpha_; }
-
- /** \brief Set the value of use_normal_.
- * \param[in] use_normal the value of use_normal_.
- */
- inline void setUseNormal (bool use_normal) { use_normal_ = use_normal; }
-
- /** \brief Get the value of use_normal_. */
- inline bool getUseNormal () { return use_normal_; }
-
- /** \brief Set the value of use_change_detector_.
- * \param[in] use_change_detector the value of use_change_detector_.
- */
- inline void setUseChangeDetector (bool use_change_detector) { use_change_detector_ = use_change_detector; }
-
- /** \brief Get the value of use_change_detector_. */
- inline bool getUseChangeDetector () { return use_change_detector_; }
-
- /** \brief Set the motion ratio
- * \param[in] motion_ratio the ratio of hypothesis to use motion model.
- */
- inline void setMotionRatio (double motion_ratio) { motion_ratio_ = motion_ratio; }
-
- /** \brief Get the motion ratio. */
- inline double getMotionRatio () { return motion_ratio_;}
-
- /** \brief Set the number of interval frames to run change detection.
- * \param[in] change_detector_interval the number of interval frames.
- */
- inline void setIntervalOfChangeDetection (unsigned int change_detector_interval)
- {
- change_detector_interval_ = change_detector_interval;
- }
-
- /** \brief Get the number of interval frames to run change detection. */
- inline unsigned int getIntervalOfChangeDetection ()
- {
- return change_detector_interval_;
- }
-
- /** \brief Set the minimum amount of points required within leaf node to become serialized in change detection
- * \param[in] change_detector_filter the minimum amount of points required within leaf node
- */
- inline void setMinPointsOfChangeDetection (unsigned int change_detector_filter)
- {
- change_detector_filter_ = change_detector_filter;
- }
-
- /** \brief Set the resolution of change detection.
- * \param[in] resolution resolution of change detection octree
- */
- inline void setResolutionOfChangeDetection (double resolution) { change_detector_resolution_ = resolution; }
-
- /** \brief Get the resolution of change detection. */
- inline double getResolutionOfChangeDetection () { return change_detector_resolution_; }
-
- /** \brief Get the minimum amount of points required within leaf node to become serialized in change detection. */
- inline unsigned int getMinPointsOfChangeDetection ()
- {
- return change_detector_filter_;
- }
-
- /** \brief Get the adjustment ratio. */
- inline double
- getFitRatio() const { return fit_ratio_; }
-
- /** \brief Reset the particles to restart tracking*/
- virtual inline void resetTracking ()
- {
- if (particles_)
- particles_->points.clear ();
- }
-
- protected:
-
- /** \brief Compute the parameters for the bounding box of hypothesis pointclouds.
- * \param[out] x_min the minimum value of x axis.
- * \param[out] x_max the maximum value of x axis.
- * \param[out] y_min the minimum value of y axis.
- * \param[out] y_max the maximum value of y axis.
- * \param[out] z_min the minimum value of z axis.
- * \param[out] z_max the maximum value of z axis.
- */
- void calcBoundingBox (double &x_min, double &x_max,
- double &y_min, double &y_max,
- double &z_min, double &z_max);
-
- /** \brief Crop the pointcloud by the bounding box calculated from hypothesis and the reference pointcloud.
- * \param[in] cloud a pointer to pointcloud to be cropped.
- * \param[out] output a pointer to be assigned the cropped pointcloud.
- */
- void cropInputPointCloud (const PointCloudInConstPtr &cloud, PointCloudIn &output);
-
-
-
- /** \brief Compute a reference pointcloud transformed to the pose that hypothesis represents.
- * \param[in] hypothesis a particle which represents a hypothesis.
- * \param[in] indices the indices which should be taken into account.
- * \param[out] cloud the resultant point cloud model dataset which is transformed to hypothesis.
- **/
- void computeTransformedPointCloud (const StateT& hypothesis,
- std::vector<int>& indices,
- PointCloudIn &cloud);
-
- /** \brief Compute a reference pointcloud transformed to the pose that hypothesis represents and calculate
- * indices taking occlusion into account.
- * \param[in] hypothesis a particle which represents a hypothesis.
- * \param[in] indices the indices which should be taken into account.
- * \param[out] cloud the resultant point cloud model dataset which is transformed to hypothesis.
- **/
- void computeTransformedPointCloudWithNormal (const StateT& hypothesis,
- std::vector<int>& indices,
- PointCloudIn &cloud);
-
- /** \brief Compute a reference pointcloud transformed to the pose that hypothesis represents and calculate
- * indices without taking occlusion into account.
- * \param[in] hypothesis a particle which represents a hypothesis.
- * \param[out] cloud the resultant point cloud model dataset which is transformed to hypothesis.
- **/
- void computeTransformedPointCloudWithoutNormal (const StateT& hypothesis,
- PointCloudIn &cloud);
-
-
- /** \brief This method should get called before starting the actual computation. */
- bool initCompute () override;
-
- /** \brief Weighting phase of particle filter method. Calculate the likelihood of all of the particles and set the weights. */
- virtual void weight ();
-
- /** \brief Resampling phase of particle filter method. Sampling the particles according to the weights calculated
- * in weight method. In particular, "sample with replacement" is archieved by walker's alias method.
- */
- virtual void resample ();
-
- /** \brief Calculate the weighted mean of the particles and set it as the result. */
- virtual void update ();
-
- /** \brief Normalize the weights of all the particels. */
- virtual void normalizeWeight ();
-
- /** \brief Initialize the particles. initial_noise_covariance_ and initial_noise_mean_ are used for Gaussian sampling. */
- void initParticles (bool reset);
-
- /** \brief Track the pointcloud using particle filter method. */
- void computeTracking () override;
-
- /** \brief Implementation of "sample with replacement" using Walker's alias method.
- about Walker's alias method, you can check the paper below:
- article{355749},
- author = {Walker, Alastair J.},
- title = {An Efficient Method for Generating Discrete
- Random Variables with General Distributions},
- journal = {ACM Trans. Math. Softw.},
- volume = {3},
- number = {3},
- year = {1977},
- issn = {0098-3500},
- pages = {253--256},
- doi = {http://doi.acm.org/10.1145/355744.355749},
- publisher = {ACM},
- address = {New York, NY, USA},
- }
- \param a an alias table, which generated by genAliasTable.
- \param q a table of weight, which generated by genAliasTable.
- */
- int sampleWithReplacement (const std::vector<int>& a, const std::vector<double>& q);
-
- /** \brief Generate the tables for walker's alias method. */
- void genAliasTable (std::vector<int> &a, std::vector<double> &q, const PointCloudStateConstPtr &particles);
-
- /** \brief Resampling the particle with replacement. */
- void
- resampleWithReplacement ();
-
- /** \brief Resampling the particle in deterministic way. */
- void
- resampleDeterministic ();
-
- /** \brief Run change detection and return true if there is a change.
- * \param[in] input a pointer to the input pointcloud.
- */
- bool
- testChangeDetection (const PointCloudInConstPtr &input);
-
- /** \brief The number of iteration of particlefilter. */
- int iteration_num_;
-
- /** \brief The number of the particles. */
- int particle_num_;
-
- /** \brief The minimum number of points which the hypothesis should have. */
- int min_indices_;
-
- /** \brief Adjustment of the particle filter. */
- double fit_ratio_;
-
- /** \brief A pointer to reference point cloud. */
- PointCloudInConstPtr ref_;
-
- /** \brief A pointer to the particles */
- PointCloudStatePtr particles_;
-
- /** \brief A pointer to PointCloudCoherence. */
- CloudCoherencePtr coherence_;
-
- /** \brief The diagonal elements of covariance matrix of the step noise. the covariance matrix is used
- * at every resample method.
- */
- std::vector<double> step_noise_covariance_;
-
- /** \brief The diagonal elements of covariance matrix of the initial noise. the covariance matrix is used
- * when initialize the particles.
- */
- std::vector<double> initial_noise_covariance_;
-
- /** \brief The mean values of initial noise. */
- std::vector<double> initial_noise_mean_;
-
- /** \brief The threshold for the particles to be re-initialized. */
- double resample_likelihood_thr_;
-
- /** \brief The threshold for the points to be considered as occluded. */
- double occlusion_angle_thr_;
-
- /** \brief The weight to be used in normalization of the weights of the particles. */
- double alpha_;
-
- /** \brief The result of tracking. */
- StateT representative_state_;
-
- /** \brief An affine transformation from the world coordinates frame to the origin of the particles. */
- Eigen::Affine3f trans_;
-
- /** \brief A flag to use normal or not. defaults to false. */
- bool use_normal_;
-
- /** \brief Difference between the result in t and t-1. */
- StateT motion_;
-
- /** \brief Ratio of hypothesis to use motion model. */
- double motion_ratio_;
-
- /** \brief Pass through filter to crop the pointclouds within the hypothesis bounding box. */
- pcl::PassThrough<PointInT> pass_x_;
- /** \brief Pass through filter to crop the pointclouds within the hypothesis bounding box. */
- pcl::PassThrough<PointInT> pass_y_;
- /** \brief Pass through filter to crop the pointclouds within the hypothesis bounding box. */
- pcl::PassThrough<PointInT> pass_z_;
-
- /** \brief A list of the pointers to pointclouds. */
- std::vector<PointCloudInPtr> transed_reference_vector_;
-
- /** \brief Change detector used as a trigger to track. */
- typename pcl::octree::OctreePointCloudChangeDetector<PointInT>::Ptr change_detector_;
-
- /** \brief A flag to be true when change of pointclouds is detected. */
- bool changed_;
-
- /** \brief A counter to skip change detection. */
- unsigned int change_counter_;
-
- /** \brief Minimum points in a leaf when calling change detector. defaults to 10. */
- unsigned int change_detector_filter_;
-
- /** \brief The number of interval frame to run change detection. defaults to 10. */
- unsigned int change_detector_interval_;
-
- /** \brief Resolution of change detector. defaults to 0.01. */
- double change_detector_resolution_;
-
- /** \brief The flag which will be true if using change detection. */
- bool use_change_detector_;
- };
- }
-}
+ /** \brief The flag which will be true if using change detection. */
+ bool use_change_detector_;
+};
+} // namespace tracking
+} // namespace pcl
// #include <pcl/tracking/impl/particle_filter.hpp>
#ifdef PCL_NO_PRECOMPILE
#pragma once
-#include <pcl/tracking/tracking.h>
-#include <pcl/tracking/particle_filter.h>
#include <pcl/tracking/coherence.h>
+#include <pcl/tracking/particle_filter.h>
+#include <pcl/tracking/tracking.h>
-namespace pcl
-{
- namespace tracking
- {
- /** \brief @b ParticleFilterOMPTracker tracks the PointCloud which is given by
- setReferenceCloud within the measured PointCloud using particle filter method
- in parallel, using the OpenMP standard.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT, typename StateT>
- class ParticleFilterOMPTracker: public ParticleFilterTracker<PointInT, StateT>
- {
- public:
- using Tracker<PointInT, StateT>::tracker_name_;
- using Tracker<PointInT, StateT>::search_;
- using Tracker<PointInT, StateT>::input_;
- using Tracker<PointInT, StateT>::indices_;
- using Tracker<PointInT, StateT>::getClassName;
- using ParticleFilterTracker<PointInT, StateT>::particles_;
- using ParticleFilterTracker<PointInT, StateT>::change_detector_;
- using ParticleFilterTracker<PointInT, StateT>::change_counter_;
- using ParticleFilterTracker<PointInT, StateT>::change_detector_interval_;
- using ParticleFilterTracker<PointInT, StateT>::use_change_detector_;
- using ParticleFilterTracker<PointInT, StateT>::alpha_;
- using ParticleFilterTracker<PointInT, StateT>::changed_;
- using ParticleFilterTracker<PointInT, StateT>::coherence_;
- using ParticleFilterTracker<PointInT, StateT>::use_normal_;
- using ParticleFilterTracker<PointInT, StateT>::particle_num_;
- using ParticleFilterTracker<PointInT, StateT>::change_detector_filter_;
- using ParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
- //using ParticleFilterTracker<PointInT, StateT>::calcLikelihood;
- using ParticleFilterTracker<PointInT, StateT>::normalizeWeight;
- using ParticleFilterTracker<PointInT, StateT>::normalizeParticleWeight;
- using ParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
-
- using BaseClass = Tracker<PointInT, StateT>;
+namespace pcl {
+namespace tracking {
+/** \brief @b ParticleFilterOMPTracker tracks the PointCloud which is given by
+ * setReferenceCloud within the measured PointCloud using particle filter method in
+ * parallel, using the OpenMP standard. \author Ryohei Ueda \ingroup tracking
+ */
+template <typename PointInT, typename StateT>
+class ParticleFilterOMPTracker : public ParticleFilterTracker<PointInT, StateT> {
+public:
+ using Tracker<PointInT, StateT>::tracker_name_;
+ using Tracker<PointInT, StateT>::search_;
+ using Tracker<PointInT, StateT>::input_;
+ using Tracker<PointInT, StateT>::indices_;
+ using Tracker<PointInT, StateT>::getClassName;
+ using ParticleFilterTracker<PointInT, StateT>::particles_;
+ using ParticleFilterTracker<PointInT, StateT>::change_detector_;
+ using ParticleFilterTracker<PointInT, StateT>::change_counter_;
+ using ParticleFilterTracker<PointInT, StateT>::change_detector_interval_;
+ using ParticleFilterTracker<PointInT, StateT>::use_change_detector_;
+ using ParticleFilterTracker<PointInT, StateT>::alpha_;
+ using ParticleFilterTracker<PointInT, StateT>::changed_;
+ using ParticleFilterTracker<PointInT, StateT>::coherence_;
+ using ParticleFilterTracker<PointInT, StateT>::use_normal_;
+ using ParticleFilterTracker<PointInT, StateT>::particle_num_;
+ using ParticleFilterTracker<PointInT, StateT>::change_detector_filter_;
+ using ParticleFilterTracker<PointInT, StateT>::transed_reference_vector_;
+ // using ParticleFilterTracker<PointInT, StateT>::calcLikelihood;
+ using ParticleFilterTracker<PointInT, StateT>::normalizeWeight;
+ using ParticleFilterTracker<PointInT, StateT>::normalizeParticleWeight;
+ using ParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
- using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
- using PointCloudInPtr = typename PointCloudIn::Ptr;
- using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
+ using BaseClass = Tracker<PointInT, StateT>;
- using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
- using PointCloudStatePtr = typename PointCloudState::Ptr;
- using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
+ using PointCloudIn = typename Tracker<PointInT, StateT>::PointCloudIn;
+ using PointCloudInPtr = typename PointCloudIn::Ptr;
+ using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
- using Coherence = PointCoherence<PointInT>;
- using CoherencePtr = typename Coherence::Ptr;
- using CoherenceConstPtr = typename Coherence::ConstPtr;
+ using PointCloudState = typename Tracker<PointInT, StateT>::PointCloudState;
+ using PointCloudStatePtr = typename PointCloudState::Ptr;
+ using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
- using CloudCoherence = PointCloudCoherence<PointInT>;
- using CloudCoherencePtr = typename CloudCoherence::Ptr;
- using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
+ using Coherence = PointCoherence<PointInT>;
+ using CoherencePtr = typename Coherence::Ptr;
+ using CoherenceConstPtr = typename Coherence::ConstPtr;
- /** \brief Initialize the scheduler and set the number of threads to use.
- * \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
- */
- ParticleFilterOMPTracker (unsigned int nr_threads = 0)
- : ParticleFilterTracker<PointInT, StateT> ()
- {
- tracker_name_ = "ParticleFilterOMPTracker";
+ using CloudCoherence = PointCloudCoherence<PointInT>;
+ using CloudCoherencePtr = typename CloudCoherence::Ptr;
+ using CloudCoherenceConstPtr = typename CloudCoherence::ConstPtr;
- setNumberOfThreads(nr_threads);
- }
+ /** \brief Initialize the scheduler and set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value
+ * back to automatic)
+ */
+ ParticleFilterOMPTracker(unsigned int nr_threads = 0)
+ : ParticleFilterTracker<PointInT, StateT>()
+ {
+ tracker_name_ = "ParticleFilterOMPTracker";
- /** \brief Initialize the scheduler and set the number of threads to use.
- * \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
- */
- void
- setNumberOfThreads (unsigned int nr_threads = 0);
+ setNumberOfThreads(nr_threads);
+ }
- protected:
- /** \brief The number of threads the scheduler should use. */
- unsigned int threads_;
+ /** \brief Initialize the scheduler and set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value
+ * back to automatic)
+ */
+ void
+ setNumberOfThreads(unsigned int nr_threads = 0);
- /** \brief weighting phase of particle filter method.
- calculate the likelihood of all of the particles and set the weights.
- */
- void weight () override;
+protected:
+ /** \brief The number of threads the scheduler should use. */
+ unsigned int threads_;
- };
- }
-}
+ /** \brief weighting phase of particle filter method. calculate the likelihood of all
+ * of the particles and set the weights.
+ */
+ void
+ weight() override;
+};
+} // namespace tracking
+} // namespace pcl
//#include <pcl/tracking/impl/particle_filter_omp.hpp>
#ifdef PCL_NO_PRECOMPILE
#pragma once
+#include <pcl/common/intensity.h>
+#include <pcl/common/transformation_from_correspondences.h>
+#include <pcl/tracking/tracker.h>
#include <pcl/memory.h>
#include <pcl/pcl_macros.h>
#include <pcl/point_types.h>
-#include <pcl/tracking/tracker.h>
-#include <pcl/common/intensity.h>
-#include <pcl/common/transformation_from_correspondences.h>
-namespace pcl
-{
- namespace tracking
+namespace pcl {
+namespace tracking {
+/** Pyramidal Kanade Lucas Tomasi tracker.
+ * This is an implementation of the Pyramidal Kanade Lucas Tomasi tracker that
+ * operates on organized 3D keypoints with color/intensity information (this is
+ * the default behaviour but you can alterate it by providing another operator
+ * as second template argument). It is an affine tracker that iteratively
+ * computes the optical flow to find the best guess for a point p at t given its
+ * location at t-1. User is advised to respect the Tomasi condition: the
+ * response computed is the maximum eigenvalue of the second moment matrix but
+ * no restrictin are applied to points to track so you can use a detector of
+ * your choice to indicate points to track.
+ *
+ * \author Nizar Sallem
+ */
+template <typename PointInT,
+ typename IntensityT = pcl::common::IntensityFieldAccessor<PointInT>>
+class PyramidalKLTTracker : public Tracker<PointInT, Eigen::Affine3f> {
+public:
+ using TrackerBase = pcl::tracking::Tracker<PointInT, Eigen::Affine3f>;
+ using PointCloudIn = typename TrackerBase::PointCloudIn;
+ using PointCloudInPtr = typename PointCloudIn::Ptr;
+ using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
+ using FloatImage = pcl::PointCloud<float>;
+ using FloatImagePtr = FloatImage::Ptr;
+ using FloatImageConstPtr = FloatImage::ConstPtr;
+ using Ptr = shared_ptr<PyramidalKLTTracker<PointInT, IntensityT>>;
+ using ConstPtr = shared_ptr<const PyramidalKLTTracker<PointInT, IntensityT>>;
+
+ using TrackerBase::indices_;
+ using TrackerBase::input_;
+ using TrackerBase::tracker_name_;
+
+ /** Constructor */
+ PyramidalKLTTracker(int nb_levels = 5,
+ int tracking_window_width = 7,
+ int tracking_window_height = 7)
+ : ref_()
+ , nb_levels_(nb_levels)
+ , track_width_(tracking_window_width)
+ , track_height_(tracking_window_height)
+ , threads_(0)
+ , initialized_(false)
+ {
+ tracker_name_ = "PyramidalKLTTracker";
+ accuracy_ = 0.1;
+ epsilon_ = 1e-3;
+ max_iterations_ = 10;
+ keypoints_nbr_ = 100;
+ min_eigenvalue_threshold_ = 1e-4;
+ kernel_ << 1.f / 16, 1.f / 4, 3.f / 8, 1.f / 4, 1.f / 16;
+ kernel_size_2_ = kernel_.size() / 2;
+ kernel_last_ = kernel_.size() - 1;
+ }
+
+ /** Destructor */
+ ~PyramidalKLTTracker() {}
+
+ /** \brief Set the number of pyramid levels
+ * \param levels desired number of pyramid levels
+ */
+ inline void
+ setNumberOfPyramidLevels(int levels)
+ {
+ nb_levels_ = levels;
+ }
+
+ /** \return the number of pyramid levels */
+ inline int
+ getNumberOfPyramidLevels() const
+ {
+ return (nb_levels_);
+ }
+
+ /** Set accuracy
+ * \param[in] accuracy desired accuracy.
+ */
+ inline void
+ setAccuracy(float accuracy)
{
- /** Pyramidal Kanade Lucas Tomasi tracker.
- * This is an implementation of the Pyramidal Kanade Lucas Tomasi tracker that operates on
- * organized 3D keypoints with color/intensity information (this is the default behaviour but you
- * can alterate it by providing another operator as second template argument). It is an affine
- * tracker that iteratively computes the optical flow to find the best guess for a point p at t
- * given its location at t-1.
- * User is advised to respect the Tomasi condition: the response computed is the maximum eigenvalue
- * of the second moment matrix but no restrictin are applied to points to track so you can use a
- * detector of your choice to indicate points to track.
- *
- * \author Nizar Sallem
- */
- template<typename PointInT, typename IntensityT = pcl::common::IntensityFieldAccessor<PointInT> >
- class PyramidalKLTTracker : public Tracker<PointInT, Eigen::Affine3f>
- {
- public:
- using TrackerBase = pcl::tracking::Tracker<PointInT, Eigen::Affine3f>;
- using PointCloudIn = typename TrackerBase::PointCloudIn;
- using PointCloudInPtr = typename PointCloudIn::Ptr;
- using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
- using FloatImage = pcl::PointCloud<float>;
- using FloatImagePtr = FloatImage::Ptr;
- using FloatImageConstPtr = FloatImage::ConstPtr;
- using Ptr = shared_ptr<PyramidalKLTTracker<PointInT, IntensityT> >;
- using ConstPtr = shared_ptr<const PyramidalKLTTracker<PointInT, IntensityT> >;
-
- using TrackerBase::tracker_name_;
- using TrackerBase::input_;
- using TrackerBase::indices_;
-
- /// Constructor
- PyramidalKLTTracker (int nb_levels = 5, int tracking_window_width = 7, int tracking_window_height = 7)
- : ref_ ()
- , nb_levels_ (nb_levels)
- , track_width_ (tracking_window_width)
- , track_height_ (tracking_window_height)
- , threads_ (0)
- , initialized_ (false)
- {
- tracker_name_ = "PyramidalKLTTracker";
- accuracy_ = 0.1;
- epsilon_ = 1e-3;
- max_iterations_ = 10;
- keypoints_nbr_ = 100;
- min_eigenvalue_threshold_ = 1e-4;
- kernel_ << 1.f/16 ,1.f/4 ,3.f/8 ,1.f/4 ,1.f/16;
- kernel_size_2_ = kernel_.size () / 2;
- kernel_last_ = kernel_.size () -1;
- }
-
- /// Destructor
- ~PyramidalKLTTracker () {}
-
- /** \brief Set the number of pyramid levels
- * \param levels desired number of pyramid levels
- */
- inline void
- setNumberOfPyramidLevels (int levels) { nb_levels_ = levels; }
-
- /// \brief \return the number of pyramid levels
- inline int
- getNumberOfPyramidLevels () const { return (nb_levels_); }
-
- /** Set accuracy
- * \param[in] accuracy desired accuracy.
- */
- inline void
- setAccuracy (float accuracy) { accuracy_ = accuracy; }
-
- /// \return the accuracy
- inline float
- getAccuracy () const { return (accuracy_); }
-
- /** Set epsilon
- * \param[in] epsilon desired epsilon.
- */
- inline void
- setEpsilon (float epsilon) { epsilon_ = epsilon; }
-
- /// \return the epsilon
- inline float
- getEpsilon () const { return (epsilon_); }
-
- /** \brief Set the maximum number of points to track. Only the first keypoints_nbr_
- * are used as points to track after sorting detected keypoints according to their
- * response measure.
- * \param[in] number the desired number of points to detect.
- */
- inline void
- setNumberOfKeypoints (std::size_t number) { keypoints_nbr_ = number; }
-
- /// \return the maximum number of keypoints to keep
- inline std::size_t
- getNumberOfKeypoints () { return (keypoints_nbr_); }
-
- /** \brief set the tracking window size
- * \param[in] width the tracking window width
- * \param[in] height the tracking window height
- */
- inline void
- setTrackingWindowSize (int width, int height);
-
- /// \brief Set tracking window width
- inline void
- setTrackingWindowWidth (int width) {track_width_ = width; };
-
- /// \brief \return the tracking window size
- inline int
- getTrackingWindowWidth () { return (track_width_); }
-
- /// \brief Set tracking window height
- inline void
- setTrackingWindowHeight (int height) {track_height_ = height; };
-
- /// \brief \return the tracking window size
- inline int
- getTrackingWindowHeight () { return (track_height_); }
-
- /** \brief Initialize the scheduler and set the number of threads to use.
- * \param nr_threads the number of hardware threads to use (0 sets the value back to
- * automatic).
- */
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
-
- /** \brief Get a pointer of the cloud at t-1. */
- inline PointCloudInConstPtr
- getReferenceCloud () const { return (ref_); }
-
- /** \brief Set the maximum number of iterations in the Lucas Kanade loop.
- * \param[in] max the desired maximum number of iterations
- */
- inline void
- setMaxIterationsNumber (unsigned int max) { max_iterations_ = max; }
-
- /// \brief \return the maximum iterations number
- inline unsigned int
- getMaxIterationsNumber () const { return (max_iterations_); }
-
- /** \brief Provide a pointer to points to track.
- * \param points the const boost shared pointer to a PointIndices message
- */
- inline void
- setPointsToTrack (const pcl::PointIndicesConstPtr& points);
-
- /** \brief Provide a pointer to points to track.
- * \param points the const boost shared pointer to a PointIndices message
- */
- inline void
- setPointsToTrack (const pcl::PointCloud<pcl::PointUV>::ConstPtr& points);
-
- /// \brief \return a pointer to the points successfully tracked.
- inline pcl::PointCloud<pcl::PointUV>::ConstPtr
- getTrackedPoints () const { return (keypoints_); };
-
- /** \brief \return the status of points to track.
- * Status == 0 --> points successfully tracked;
- * Status < 0 --> point is lost;
- * Status == -1 --> point is out of bond;
- * Status == -2 --> optical flow can not be computed for this point.
- */
- inline pcl::PointIndicesConstPtr
- getPointsToTrackStatus () const { return (keypoints_status_); }
-
- /** \brief Return the computed transformation from tracked points. */
- Eigen::Affine3f
- getResult () const override { return (motion_); }
-
- /// \brief \return initialization state
- bool
- getInitialized () const { return (initialized_); }
-
- protected:
- bool
- initCompute () override;
-
- /** \brief compute Scharr derivatives of a source cloud.
- * \param[in] src the image for which gradients are to be computed
- * \param[out] grad_x image gradient along X direction
- * \param[out] grad_y image gradient along Y direction
- */
- void
- derivatives (const FloatImage& src, FloatImage& grad_x, FloatImage& grad_y) const;
-
- /** \brief downsample input
- * \param[in] input the image to downsample
- * \param[out] output the downsampled image
- */
- void
- downsample (const FloatImageConstPtr& input, FloatImageConstPtr& output) const;
-
- /** \brief downsample input and compute output gradients.
- * \param[in] input the image to downsample
- * \param[out] output the downsampled image
- * \param[out] output_grad_x downsampled image gradient along X direction
- * \param[out] output_grad_y downsampled image gradient along Y direction
- */
- void
- downsample (const FloatImageConstPtr& input, FloatImageConstPtr& output,
- FloatImageConstPtr& output_grad_x, FloatImageConstPtr& output_grad_y) const;
-
- /** \brief Separately convolve image with decomposable convolution kernel.
- * \param[in] input input the image to convolve
- * \param[out] output output the convolved image
- */
- void
- convolve (const FloatImageConstPtr& input, FloatImage& output) const;
-
- /** \brief Convolve image columns.
- * \param[in] input input the image to convolve
- * \param[out] output output the convolved image
- */
- void
- convolveCols (const FloatImageConstPtr& input, FloatImage& output) const;
-
- /** \brief Convolve image rows.
- * \param[in] input input the image to convolve
- * \param[out] output output the convolved image
- */
- void
- convolveRows (const FloatImageConstPtr& input, FloatImage& output) const;
-
- /** \brief extract the patch from the previous image, previous image gradients surrounding
- * pixel alocation while interpolating image and gradients data and compute covariation
- * matrix of derivatives.
- * \param[in] img original image
- * \param[in] grad_x original image gradient along X direction
- * \param[in] grad_y original image gradient along Y direction
- * \param[in] location pixel at the center of the patch
- * \param[in] weights bilinear interpolation weights at this location computed from subpixel
- * location
- * \param[out] win patch with interpolated intensity values
- * \param[out] grad_x_win patch with interpolated gradient along X values
- * \param[out] grad_y_win patch with interpolated gradient along Y values
- * \param[out] covariance covariance matrix coefficients
- */
- virtual void
- spatialGradient (const FloatImage& img,
- const FloatImage& grad_x,
- const FloatImage& grad_y,
- const Eigen::Array2i& location,
- const Eigen::Array4f& weights,
- Eigen::ArrayXXf& win,
- Eigen::ArrayXXf& grad_x_win,
- Eigen::ArrayXXf& grad_y_win,
- Eigen::Array3f & covariance) const;
- void
- mismatchVector (const Eigen::ArrayXXf& prev,
- const Eigen::ArrayXXf& prev_grad_x,
- const Eigen::ArrayXXf& prev_grad_y,
- const FloatImage& next,
- const Eigen::Array2i& location,
- const Eigen::Array4f& weights,
- Eigen::Array2f &b) const;
-
- /** \brief Compute the pyramidal representation of an image.
- * \param[in] input the input cloud
- * \param[out] pyramid computed pyramid levels along with their respective gradients
- * \param[in] border_type
- */
- virtual void
- computePyramids (const PointCloudInConstPtr& input,
- std::vector<FloatImageConstPtr>& pyramid,
- pcl::InterpolationType border_type) const;
-
- virtual void
- track (const PointCloudInConstPtr& previous_input,
- const PointCloudInConstPtr& current_input,
- const std::vector<FloatImageConstPtr>& previous_pyramid,
- const std::vector<FloatImageConstPtr>& current_pyramid,
- const pcl::PointCloud<pcl::PointUV>::ConstPtr& previous_keypoints,
- pcl::PointCloud<pcl::PointUV>::Ptr& current_keypoints,
- std::vector<int>& status,
- Eigen::Affine3f& motion) const;
-
- void
- computeTracking () override;
-
- /// \brief input pyranid at t-1
- std::vector<FloatImageConstPtr> ref_pyramid_;
- /// \brief point cloud at t-1
- PointCloudInConstPtr ref_;
- /// \brief number of pyramid levels
- int nb_levels_;
- /// \brief detected keypoints 2D coordinates
- pcl::PointCloud<pcl::PointUV>::ConstPtr keypoints_;
- /// \brief status of keypoints of t-1 at t
- pcl::PointIndicesPtr keypoints_status_;
- /// \brief number of points to detect
- std::size_t keypoints_nbr_;
- /// \brief tracking width
- int track_width_;
- /// \brief half of tracking window width
- int track_width_2_;
- /// \brief tracking height
- int track_height_;
- /// \brief half of tracking window height
- int track_height_2_;
- /// \brief maximum number of iterations
- unsigned int max_iterations_;
- /// \brief accuracy criterion to stop iterating
- float accuracy_;
- float min_eigenvalue_threshold_;
- /// \brief epsilon for subpixel computation
- float epsilon_;
- float max_residue_;
- /// \brief number of hardware threads
- unsigned int threads_;
- /// \brief intensity accessor
- IntensityT intensity_;
- /// \brief is the tracker initialized ?
- bool initialized_;
- /// \brief compute transformation from successfully tracked points
- pcl::TransformationFromCorrespondences transformation_computer_;
- /// \brief computed transformation between tracked points
- Eigen::Affine3f motion_;
- /// \brief smoothing kernel
- Eigen::Array<float, 5, 1> kernel_;
- /// \brief smoothing kernel half size
- int kernel_size_2_;
- /// \brief index of last element in kernel
- int kernel_last_;
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ accuracy_ = accuracy;
}
-}
+
+ /** \return the accuracy */
+ inline float
+ getAccuracy() const
+ {
+ return (accuracy_);
+ }
+
+ /** Set epsilon
+ * \param[in] epsilon desired epsilon.
+ */
+ inline void
+ setEpsilon(float epsilon)
+ {
+ epsilon_ = epsilon;
+ }
+
+ /** \return the epsilon */
+ inline float
+ getEpsilon() const
+ {
+ return (epsilon_);
+ }
+
+ /** \brief Set the maximum number of points to track after sorting detected keypoints
+ * according to their response measure.
+ * \param[in] number the desired number of points to detect.
+ */
+ inline void
+ setNumberOfKeypoints(std::size_t number)
+ {
+ keypoints_nbr_ = number;
+ }
+
+ /** \return the maximum number of keypoints to keep */
+ inline std::size_t
+ getNumberOfKeypoints()
+ {
+ return (keypoints_nbr_);
+ }
+
+ /** \brief set the tracking window size
+ * \param[in] width the tracking window width
+ * \param[in] height the tracking window height
+ */
+ inline void
+ setTrackingWindowSize(int width, int height);
+
+ /** \brief Set tracking window width */
+ inline void
+ setTrackingWindowWidth(int width)
+ {
+ track_width_ = width;
+ };
+
+ /** \return the tracking window size */
+ inline int
+ getTrackingWindowWidth()
+ {
+ return (track_width_);
+ }
+
+ /** \brief Set tracking window height */
+ inline void
+ setTrackingWindowHeight(int height)
+ {
+ track_height_ = height;
+ };
+
+ /** \return the tracking window size */
+ inline int
+ getTrackingWindowHeight()
+ {
+ return (track_height_);
+ }
+
+ /** \brief Initialize the scheduler and set the number of threads to use.
+ * \param nr_threads the number of hardware threads to use (0 sets the value
+ * back to automatic).
+ */
+ inline void
+ setNumberOfThreads(unsigned int nr_threads = 0)
+ {
+ threads_ = nr_threads;
+ }
+
+ /** \brief Get a pointer of the cloud at t-1. */
+ inline PointCloudInConstPtr
+ getReferenceCloud() const
+ {
+ return (ref_);
+ }
+
+ /** \brief Set the maximum number of iterations in the Lucas Kanade loop.
+ * \param[in] max the desired maximum number of iterations
+ */
+ inline void
+ setMaxIterationsNumber(unsigned int max)
+ {
+ max_iterations_ = max;
+ }
+
+ /** \return the maximum iterations number */
+ inline unsigned int
+ getMaxIterationsNumber() const
+ {
+ return (max_iterations_);
+ }
+
+ /** \brief Provide a pointer to points to track.
+ * \param points the const boost shared pointer to a PointIndices message
+ */
+ inline void
+ setPointsToTrack(const pcl::PointIndicesConstPtr& points);
+
+ /** \brief Provide a pointer to points to track.
+ * \param points the const boost shared pointer to a PointIndices message
+ */
+ inline void
+ setPointsToTrack(const pcl::PointCloud<pcl::PointUV>::ConstPtr& points);
+
+ /** \return a pointer to the points successfully tracked. */
+ inline pcl::PointCloud<pcl::PointUV>::ConstPtr
+ getTrackedPoints() const
+ {
+ return (keypoints_);
+ };
+
+ /** \return the status of points to track.
+ * Status == 0 --> points successfully tracked;
+ * Status < 0 --> point is lost;
+ * Status == -1 --> point is out of bond;
+ * Status == -2 --> optical flow can not be computed for this point.
+ */
+ inline pcl::PointIndicesConstPtr
+ getPointsToTrackStatus() const
+ {
+ return (keypoints_status_);
+ }
+
+ /** \brief Return the computed transformation from tracked points. */
+ Eigen::Affine3f
+ getResult() const override
+ {
+ return (motion_);
+ }
+
+ /** \return initialization state */
+ bool
+ getInitialized() const
+ {
+ return (initialized_);
+ }
+
+protected:
+ bool
+ initCompute() override;
+
+ /** \brief compute Scharr derivatives of a source cloud.
+ * \param[in] src the image for which gradients are to be computed
+ * \param[out] grad_x image gradient along X direction
+ * \param[out] grad_y image gradient along Y direction
+ */
+ void
+ derivatives(const FloatImage& src, FloatImage& grad_x, FloatImage& grad_y) const;
+
+ /** \brief downsample input
+ * \param[in] input the image to downsample
+ * \param[out] output the downsampled image
+ */
+ void
+ downsample(const FloatImageConstPtr& input, FloatImageConstPtr& output) const;
+
+ /** \brief downsample input and compute output gradients.
+ * \param[in] input the image to downsample
+ * \param[out] output the downsampled image
+ * \param[out] output_grad_x downsampled image gradient along X direction
+ * \param[out] output_grad_y downsampled image gradient along Y direction
+ */
+ void
+ downsample(const FloatImageConstPtr& input,
+ FloatImageConstPtr& output,
+ FloatImageConstPtr& output_grad_x,
+ FloatImageConstPtr& output_grad_y) const;
+
+ /** \brief Separately convolve image with decomposable convolution kernel.
+ * \param[in] input input the image to convolve
+ * \param[out] output output the convolved image
+ */
+ void
+ convolve(const FloatImageConstPtr& input, FloatImage& output) const;
+
+ /** \brief Convolve image columns.
+ * \param[in] input input the image to convolve
+ * \param[out] output output the convolved image
+ */
+ void
+ convolveCols(const FloatImageConstPtr& input, FloatImage& output) const;
+
+ /** \brief Convolve image rows.
+ * \param[in] input input the image to convolve
+ * \param[out] output output the convolved image
+ */
+ void
+ convolveRows(const FloatImageConstPtr& input, FloatImage& output) const;
+
+ /** \brief extract the patch from the previous image, previous image gradients
+ * surrounding pixel alocation while interpolating image and gradients data
+ * and compute covariation matrix of derivatives.
+ * \param[in] img original image
+ * \param[in] grad_x original image gradient along X direction
+ * \param[in] grad_y original image gradient along Y direction
+ * \param[in] location pixel at the center of the patch
+ * \param[in] weights bilinear interpolation weights at this location computed from
+ * subpixel location
+ * \param[out] win patch with interpolated intensity values
+ * \param[out] grad_x_win patch with interpolated gradient along X values
+ * \param[out] grad_y_win patch with interpolated gradient along Y values
+ * \param[out] covariance covariance matrix coefficients
+ */
+ virtual void
+ spatialGradient(const FloatImage& img,
+ const FloatImage& grad_x,
+ const FloatImage& grad_y,
+ const Eigen::Array2i& location,
+ const Eigen::Array4f& weights,
+ Eigen::ArrayXXf& win,
+ Eigen::ArrayXXf& grad_x_win,
+ Eigen::ArrayXXf& grad_y_win,
+ Eigen::Array3f& covariance) const;
+ void
+ mismatchVector(const Eigen::ArrayXXf& prev,
+ const Eigen::ArrayXXf& prev_grad_x,
+ const Eigen::ArrayXXf& prev_grad_y,
+ const FloatImage& next,
+ const Eigen::Array2i& location,
+ const Eigen::Array4f& weights,
+ Eigen::Array2f& b) const;
+
+ /** \brief Compute the pyramidal representation of an image.
+ * \param[in] input the input cloud
+ * \param[out] pyramid computed pyramid levels along with their respective
+ * gradients
+ * \param[in] border_type
+ */
+ virtual void
+ computePyramids(const PointCloudInConstPtr& input,
+ std::vector<FloatImageConstPtr>& pyramid,
+ pcl::InterpolationType border_type) const;
+
+ virtual void
+ track(const PointCloudInConstPtr& previous_input,
+ const PointCloudInConstPtr& current_input,
+ const std::vector<FloatImageConstPtr>& previous_pyramid,
+ const std::vector<FloatImageConstPtr>& current_pyramid,
+ const pcl::PointCloud<pcl::PointUV>::ConstPtr& previous_keypoints,
+ pcl::PointCloud<pcl::PointUV>::Ptr& current_keypoints,
+ std::vector<int>& status,
+ Eigen::Affine3f& motion) const;
+
+ void
+ computeTracking() override;
+
+ /** \brief input pyranid at t-1 */
+ std::vector<FloatImageConstPtr> ref_pyramid_;
+ /** \brief point cloud at t-1 */
+ PointCloudInConstPtr ref_;
+ /** \brief number of pyramid levels */
+ int nb_levels_;
+ /** \brief detected keypoints 2D coordinates */
+ pcl::PointCloud<pcl::PointUV>::ConstPtr keypoints_;
+ /** \brief status of keypoints of t-1 at t */
+ pcl::PointIndicesPtr keypoints_status_;
+ /** \brief number of points to detect */
+ std::size_t keypoints_nbr_;
+ /** \brief tracking width */
+ int track_width_;
+ /** \brief half of tracking window width */
+ int track_width_2_;
+ /** \brief tracking height */
+ int track_height_;
+ /** \brief half of tracking window height */
+ int track_height_2_;
+ /** \brief maximum number of iterations */
+ unsigned int max_iterations_;
+ /** \brief accuracy criterion to stop iterating */
+ float accuracy_;
+ float min_eigenvalue_threshold_;
+ /** \brief epsilon for subpixel computation */
+ float epsilon_;
+ float max_residue_;
+ /** \brief number of hardware threads */
+ unsigned int threads_;
+ /** \brief intensity accessor */
+ IntensityT intensity_;
+ /** \brief is the tracker initialized ? */
+ bool initialized_;
+ /** \brief compute transformation from successfully tracked points */
+ pcl::TransformationFromCorrespondences transformation_computer_;
+ /** \brief computed transformation between tracked points */
+ Eigen::Affine3f motion_;
+ /** \brief smoothing kernel */
+ Eigen::Array<float, 5, 1> kernel_;
+ /** \brief smoothing kernel half size */
+ int kernel_size_2_;
+ /** \brief index of last element in kernel */
+ int kernel_last_;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace tracking
+} // namespace pcl
#include <pcl/tracking/impl/pyramidal_klt.hpp>
#pragma once
+#include <pcl/search/search.h>
#include <pcl/tracking/tracking.h>
#include <pcl/memory.h>
#include <pcl/pcl_base.h>
#include <pcl/pcl_macros.h>
-#include <pcl/search/search.h>
-namespace pcl
-{
- namespace tracking
+namespace pcl {
+namespace tracking {
+/** \brief @b Tracker represents the base tracker class.
+ * \author Ryohei Ueda
+ * \ingroup tracking
+ */
+template <typename PointInT, typename StateT>
+class Tracker : public PCLBase<PointInT> {
+protected:
+ using PCLBase<PointInT>::deinitCompute;
+
+public:
+ using PCLBase<PointInT>::indices_;
+ using PCLBase<PointInT>::input_;
+
+ using BaseClass = PCLBase<PointInT>;
+ using Ptr = shared_ptr<Tracker<PointInT, StateT>>;
+ using ConstPtr = shared_ptr<const Tracker<PointInT, StateT>>;
+
+ using SearchPtr = typename pcl::search::Search<PointInT>::Ptr;
+ using SearchConstPtr = typename pcl::search::Search<PointInT>::ConstPtr;
+
+ using PointCloudIn = pcl::PointCloud<PointInT>;
+ using PointCloudInPtr = typename PointCloudIn::Ptr;
+ using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
+
+ using PointCloudState = pcl::PointCloud<StateT>;
+ using PointCloudStatePtr = typename PointCloudState::Ptr;
+ using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
+
+public:
+ /** \brief Empty constructor. */
+ Tracker() : search_() {}
+
+ /** \brief Base method for tracking for all points given in
+ * <setInputCloud (), setIndices ()> using the indices in setIndices ()
+ */
+ void
+ compute();
+
+protected:
+ /** \brief The tracker name. */
+ std::string tracker_name_;
+
+ /** \brief A pointer to the spatial search object. */
+ SearchPtr search_;
+
+ /** \brief Get a string representation of the name of this class. */
+ inline const std::string&
+ getClassName() const
+ {
+ return (tracker_name_);
+ }
+
+ /** \brief This method should get called before starting the actual
+ * computation. */
+ virtual bool
+ initCompute();
+
+ /** \brief Provide a pointer to a dataset to add additional information
+ * to estimate the features for every point in the input dataset. This
+ * is optional, if this is not set, it will only use the data in the
+ * input cloud to estimate the features. This is useful when you only
+ * need to compute the features for a downsampled cloud.
+ * \param search a pointer to a PointCloud message
+ */
+ inline void
+ setSearchMethod(const SearchPtr& search)
{
- /** \brief @b Tracker represents the base tracker class.
- * \author Ryohei Ueda
- * \ingroup tracking
- */
- template <typename PointInT, typename StateT>
- class Tracker: public PCLBase<PointInT>
- {
- protected:
- using PCLBase<PointInT>::deinitCompute;
-
- public:
- using PCLBase<PointInT>::indices_;
- using PCLBase<PointInT>::input_;
-
- using BaseClass = PCLBase<PointInT>;
- using Ptr = shared_ptr< Tracker<PointInT, StateT> >;
- using ConstPtr = shared_ptr< const Tracker<PointInT, StateT> >;
-
- using SearchPtr = typename pcl::search::Search<PointInT>::Ptr;
- using SearchConstPtr = typename pcl::search::Search<PointInT>::ConstPtr;
-
- using PointCloudIn = pcl::PointCloud<PointInT>;
- using PointCloudInPtr = typename PointCloudIn::Ptr;
- using PointCloudInConstPtr = typename PointCloudIn::ConstPtr;
-
- using PointCloudState = pcl::PointCloud<StateT>;
- using PointCloudStatePtr = typename PointCloudState::Ptr;
- using PointCloudStateConstPtr = typename PointCloudState::ConstPtr;
-
- public:
- /** \brief Empty constructor. */
- Tracker (): search_ () {}
-
- /** \brief Base method for tracking for all points given in
- * <setInputCloud (), setIndices ()> using the indices in setIndices ()
- */
- void
- compute ();
-
- protected:
- /** \brief The tracker name. */
- std::string tracker_name_;
-
- /** \brief A pointer to the spatial search object. */
- SearchPtr search_;
-
- /** \brief Get a string representation of the name of this class. */
- inline const std::string&
- getClassName () const { return (tracker_name_); }
-
- /** \brief This method should get called before starting the actual computation. */
- virtual bool
- initCompute ();
-
- /** \brief Provide a pointer to a dataset to add additional information
- * to estimate the features for every point in the input dataset. This
- * is optional, if this is not set, it will only use the data in the
- * input cloud to estimate the features. This is useful when you only
- * need to compute the features for a downsampled cloud.
- * \param search a pointer to a PointCloud message
- */
- inline void
- setSearchMethod (const SearchPtr &search) { search_ = search; }
-
- /** \brief Get a pointer to the point cloud dataset. */
- inline SearchPtr
- getSearchMethod () { return (search_); }
-
- /** \brief Get an instance of the result of tracking. */
- virtual StateT
- getResult () const = 0;
-
- private:
- /** \brief Abstract tracking method. */
- virtual void
- computeTracking () = 0;
-
- public:
- PCL_MAKE_ALIGNED_OPERATOR_NEW
- };
+ search_ = search;
}
-}
+
+ /** \brief Get a pointer to the point cloud dataset. */
+ inline SearchPtr
+ getSearchMethod()
+ {
+ return (search_);
+ }
+
+ /** \brief Get an instance of the result of tracking. */
+ virtual StateT
+ getResult() const = 0;
+
+private:
+ /** \brief Abstract tracking method. */
+ virtual void
+ computeTracking() = 0;
+
+public:
+ PCL_MAKE_ALIGNED_OPERATOR_NEW
+};
+} // namespace tracking
+} // namespace pcl
#include <pcl/tracking/impl/tracker.hpp>
#include <pcl/point_types.h>
-namespace pcl
-{
- namespace tracking
- {
- /* state definition */
- struct ParticleXYZRPY;
- struct ParticleXYR;
+namespace pcl {
+namespace tracking {
+/* state definition */
+struct ParticleXYZRPY;
+struct ParticleXYR;
- /* \brief return the value of normal distribution */
- PCL_EXPORTS double
- sampleNormal (double mean, double sigma);
- }
-}
+/* \brief return the value of normal distribution */
+PCL_EXPORTS double
+sampleNormal(double mean, double sigma);
+} // namespace tracking
+} // namespace pcl
#include <pcl/tracking/impl/tracking.hpp>
// ==============================
// =====POINT_CLOUD_REGISTER=====
// ==============================
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::tracking::_ParticleXYZRPY,
- (float, x, x)
- (float, y, y)
- (float, z, z)
- (float, roll, roll)
- (float, pitch, pitch)
- (float, yaw, yaw)
-)
-POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYZRPY, pcl::tracking::_ParticleXYZRPY)
+// clang-format off
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::tracking::_ParticleXYZRPY,
+ (float, x, x)
+ (float, y, y)
+ (float, z, z)
+ (float, roll, roll)
+ (float, pitch, pitch)
+ (float, yaw, yaw))
+POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYZRPY,
+ pcl::tracking::_ParticleXYZRPY)
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::tracking::_ParticleXYRPY,
- (float, x, x)
- (float, y, y)
- (float, z, z)
- (float, roll, roll)
- (float, pitch, pitch)
- (float, yaw, yaw)
-)
-POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYRPY, pcl::tracking::_ParticleXYRPY)
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::tracking::_ParticleXYRPY,
+ (float, x, x)
+ (float, y, y)
+ (float, z, z)
+ (float, roll, roll)
+ (float, pitch, pitch)
+ (float, yaw, yaw))
+POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYRPY,
+ pcl::tracking::_ParticleXYRPY)
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::tracking::_ParticleXYRP,
+ (float, x, x)
+ (float, y, y)
+ (float, z, z)
+ (float, roll, roll)
+ (float, pitch, pitch)
+ (float, yaw, yaw))
+POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYRP,
+ pcl::tracking::_ParticleXYRP)
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::tracking::_ParticleXYRP,
- (float, x, x)
- (float, y, y)
- (float, z, z)
- (float, roll, roll)
- (float, pitch, pitch)
- (float, yaw, yaw)
-)
-POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYRP, pcl::tracking::_ParticleXYRP)
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::tracking::_ParticleXYR,
+ (float, x, x)
+ (float, y, y)
+ (float, z, z)
+ (float, roll, roll)
+ (float, pitch, pitch)
+ (float, yaw, yaw))
+POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYR,
+ pcl::tracking::_ParticleXYR)
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::tracking::_ParticleXYR,
- (float, x, x)
- (float, y, y)
- (float, z, z)
- (float, roll, roll)
- (float, pitch, pitch)
- (float, yaw, yaw)
-)
-POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYR, pcl::tracking::_ParticleXYR)
-
-POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::tracking::_ParticleXYZR,
- (float, x, x)
- (float, y, y)
- (float, z, z)
- (float, roll, roll)
- (float, pitch, pitch)
- (float, yaw, yaw)
-)
-POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYZR, pcl::tracking::_ParticleXYZR)
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::tracking::_ParticleXYZR,
+ (float, x, x)
+ (float, y, y)
+ (float, z, z)
+ (float, roll, roll)
+ (float, pitch, pitch)
+ (float, yaw, yaw))
+POINT_CLOUD_REGISTER_POINT_WRAPPER(pcl::tracking::ParticleXYZR,
+ pcl::tracking::_ParticleXYZR)
+// clang-format on
#ifdef PCL_NO_PRECOMPILE
#include <pcl/tracking/impl/tracking.hpp>
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
+
+// clang-format off
PCL_INSTANTIATE(ApproxNearestPairPointCloudCoherence, PCL_XYZ_POINT_TYPES)
PCL_INSTANTIATE(DistanceCoherence, PCL_XYZ_POINT_TYPES)
-PCL_INSTANTIATE(HSVColorCoherence, (pcl::PointXYZRGB)(pcl::PointXYZRGBNormal)(pcl::PointXYZRGBA))
+PCL_INSTANTIATE(HSVColorCoherence,
+ (pcl::PointXYZRGB)
+ (pcl::PointXYZRGBNormal)
+ (pcl::PointXYZRGBA))
PCL_INSTANTIATE(NearestPairPointCloudCoherence, PCL_XYZ_POINT_TYPES)
PCL_INSTANTIATE(NormalCoherence, PCL_NORMAL_POINT_TYPES)
-#endif // PCL_NO_PRECOMPILE
-
+// clang-format on
+#endif // PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
#define PCL_TRACKING_NORMAL_SUPPORTED
-PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterTracker, ((pcl::PointNormal) (pcl::PointXYZINormal) (pcl::PointXYZRGBNormal))(PCL_STATE_POINT_TYPES))
-PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterOMPTracker, ((pcl::PointNormal) (pcl::PointXYZINormal) (pcl::PointXYZRGBNormal))(PCL_STATE_POINT_TYPES))
-#undef PCL_TRACKING_NORMAL_SUPPORTED
-PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterOMPTracker, ((pcl::PointXYZ) (pcl::PointXYZI) (pcl::PointXYZRGBA) (pcl::PointXYZRGB) (pcl::InterestPoint) (pcl::PointWithRange) (pcl::PointWithViewpoint) (pcl::PointWithScale))(PCL_STATE_POINT_TYPES))
-PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterTracker, ((pcl::PointXYZ) (pcl::PointXYZI) (pcl::PointXYZRGBA) (pcl::PointXYZRGB) (pcl::InterestPoint) (pcl::PointWithRange) (pcl::PointWithViewpoint) (pcl::PointWithScale))(PCL_STATE_POINT_TYPES))
-#endif // PCL_NO_PRECOMPILE
+// clang-format off
+PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterTracker,
+ ((pcl::PointNormal)
+ (pcl::PointXYZINormal)
+ (pcl::PointXYZRGBNormal))
+ (PCL_STATE_POINT_TYPES))
+PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterOMPTracker,
+ ((pcl::PointNormal)
+ (pcl::PointXYZINormal)
+ (pcl::PointXYZRGBNormal))
+ (PCL_STATE_POINT_TYPES))
+// clang-format on
+#undef PCL_TRACKING_NORMAL_SUPPORTED
+// clang-format off
+PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterOMPTracker,
+ ((pcl::PointXYZ)
+ (pcl::PointXYZI)
+ (pcl::PointXYZRGBA)
+ (pcl::PointXYZRGB)
+ (pcl::InterestPoint)
+ (pcl::PointWithRange)
+ (pcl::PointWithViewpoint)
+ (pcl::PointWithScale))
+ (PCL_STATE_POINT_TYPES))
+PCL_INSTANTIATE_PRODUCT(KLDAdaptiveParticleFilterTracker,
+ ((pcl::PointXYZ)
+ (pcl::PointXYZI)
+ (pcl::PointXYZRGBA)
+ (pcl::PointXYZRGB)
+ (pcl::InterestPoint)
+ (pcl::PointWithRange)
+ (pcl::PointWithViewpoint)
+ (pcl::PointWithScale))
+ (PCL_STATE_POINT_TYPES))
+// clang-format on
+#endif // PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
#define PCL_TRACKING_NORMAL_SUPPORTED
-PCL_INSTANTIATE_PRODUCT(ParticleFilterTracker, ((pcl::PointNormal) (pcl::PointXYZINormal) (pcl::PointXYZRGBNormal))(PCL_STATE_POINT_TYPES))
-PCL_INSTANTIATE_PRODUCT(ParticleFilterOMPTracker, ((pcl::PointNormal) (pcl::PointXYZINormal) (pcl::PointXYZRGBNormal))(PCL_STATE_POINT_TYPES))
-#undef PCL_TRACKING_NORMAL_SUPPORTED
-PCL_INSTANTIATE_PRODUCT(ParticleFilterTracker, ((pcl::PointXYZ) (pcl::PointXYZI) (pcl::PointXYZRGBA) (pcl::PointXYZRGB) (pcl::InterestPoint) (pcl::PointWithRange) (pcl::PointWithViewpoint) (pcl::PointWithScale))(PCL_STATE_POINT_TYPES))
-PCL_INSTANTIATE_PRODUCT(ParticleFilterOMPTracker, ((pcl::PointXYZ) (pcl::PointXYZI) (pcl::PointXYZRGBA) (pcl::PointXYZRGB) (pcl::InterestPoint) (pcl::PointWithRange) (pcl::PointWithViewpoint) (pcl::PointWithScale))(PCL_STATE_POINT_TYPES))
-#endif // PCL_NO_PRECOMPILE
+// clang-format off
+PCL_INSTANTIATE_PRODUCT(ParticleFilterTracker,
+ ((pcl::PointNormal)
+ (pcl::PointXYZINormal)
+ (pcl::PointXYZRGBNormal))
+ (PCL_STATE_POINT_TYPES))
+PCL_INSTANTIATE_PRODUCT(ParticleFilterOMPTracker,
+ ((pcl::PointNormal)
+ (pcl::PointXYZINormal)
+ (pcl::PointXYZRGBNormal))
+ (PCL_STATE_POINT_TYPES))
+// clang-format on
+#undef PCL_TRACKING_NORMAL_SUPPORTED
+// clang-format off
+PCL_INSTANTIATE_PRODUCT(ParticleFilterTracker,
+ ((pcl::PointXYZ)
+ (pcl::PointXYZI)
+ (pcl::PointXYZRGBA)
+ (pcl::PointXYZRGB)
+ (pcl::InterestPoint)
+ (pcl::PointWithRange)
+ (pcl::PointWithViewpoint)
+ (pcl::PointWithScale))
+ (PCL_STATE_POINT_TYPES))
+PCL_INSTANTIATE_PRODUCT(ParticleFilterOMPTracker,
+ ((pcl::PointXYZ)
+ (pcl::PointXYZI)
+ (pcl::PointXYZRGBA)
+ (pcl::PointXYZRGB)
+ (pcl::InterestPoint)
+ (pcl::PointWithRange)
+ (pcl::PointWithViewpoint)
+ (pcl::PointWithScale))
+ (PCL_STATE_POINT_TYPES))
+// clang-format on
+#endif // PCL_NO_PRECOMPILE
#include <random>
double
-pcl::tracking::sampleNormal (double mean, double sigma)
+pcl::tracking::sampleNormal(double mean, double sigma)
{
- static std::mt19937 rng([] { std::random_device rd; return rd(); } ());
- std::normal_distribution<> nd (mean, sqrt (sigma));
-
- return (nd (rng));
+ static std::mt19937 rng([] {
+ std::random_device rd;
+ return rd();
+ }());
+ std::normal_distribution<> nd(mean, sqrt(sigma));
+
+ return (nd(rng));
}
vtkSmartPointer<vtkPoints> poly_points = vtkSmartPointer<vtkPoints>::New ();
vtkSmartPointer<vtkPolygon> polygon = vtkSmartPointer<vtkPolygon>::New ();
- poly_points->SetNumberOfPoints (cloud->points.size ());
- polygon->GetPointIds ()->SetNumberOfIds (cloud->points.size ());
+ poly_points->SetNumberOfPoints (cloud->size ());
+ polygon->GetPointIds ()->SetNumberOfIds (cloud->size ());
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
- poly_points->SetPoint (i, cloud->points[i].x, cloud->points[i].y, cloud->points[i].z);
+ poly_points->SetPoint (i, (*cloud)[i].x, (*cloud)[i].y, (*cloud)[i].z);
polygon->GetPointIds ()->SetId (i, i);
}
#pragma once
-#include <pcl/ModelCoefficients.h>
#include <pcl/point_cloud.h>
#include <pcl/visualization/eigen.h>
-#include <pcl/geometry/planar_polygon.h>
template <typename T> class vtkSmartPointer;
class vtkDataSet;
/*@{*/
namespace pcl
{
+ struct ModelCoefficients;
+ template <typename PointT> class PlanarPolygon;
+
namespace visualization
{
/** \brief Create a 3d poly line from a set of points.
for (int d = 0; d < hsize; ++d)
{
xy[0] = d;
- xy[1] = cloud.points[0].histogram[d];
+ xy[1] = cloud[0].histogram[d];
xy_array->SetTuple (d, xy);
}
RenWinInteract renwinint;
const int index,
const std::string &id, int win_width, int win_height)
{
- if (index < 0 || index >= cloud.points.size ())
+ if (index < 0 || index >= cloud.size ())
{
PCL_ERROR ("[addFeatureHistogram] Invalid point index (%d) given!\n", index);
return (false);
for (std::uint32_t d = 0; d < fields[field_idx].count; ++d)
{
xy[0] = d;
- //xy[1] = cloud.points[index].histogram[d];
+ //xy[1] = cloud[index].histogram[d];
float data;
- memcpy (&data, reinterpret_cast<const char*> (&cloud.points[index]) + fields[field_idx].offset + d * sizeof (float), sizeof (float));
+ memcpy (&data, reinterpret_cast<const char*> (&cloud[index]) + fields[field_idx].offset + d * sizeof (float), sizeof (float));
xy[1] = data;
xy_array->SetTuple (d, xy);
}
for (int d = 0; d < hsize; ++d)
{
xy[0] = d;
- xy[1] = cloud.points[0].histogram[d];
+ xy[1] = cloud[0].histogram[d];
xy_array->SetTuple (d, xy);
}
reCreateActor (xy_array, renwinupd, hsize);
const pcl::PointCloud<PointT> &cloud, const std::string &field_name, const int index,
const std::string &id)
{
- if (index < 0 || index >= cloud.points.size ())
+ if (index < 0 || index >= cloud.size ())
{
PCL_ERROR ("[updateFeatureHistogram] Invalid point index (%d) given!\n", index);
return (false);
for (std::uint32_t d = 0; d < fields[field_idx].count; ++d)
{
xy[0] = d;
- //xy[1] = cloud.points[index].histogram[d];
+ //xy[1] = cloud[index].histogram[d];
float data;
- memcpy (&data, reinterpret_cast<const char*> (&cloud.points[index]) + fields[field_idx].offset + d * sizeof (float), sizeof (float));
+ memcpy (&data, reinterpret_cast<const char*> (&cloud[index]) + fields[field_idx].offset + d * sizeof (float), sizeof (float));
xy[1] = data;
xy_array->SetTuple (d, xy);
}
boost::shared_array<unsigned char> &data)
{
int j = 0;
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (const auto& point: cloud)
{
- data[j++] = cloud.points[i].r;
- data[j++] = cloud.points[i].g;
- data[j++] = cloud.points[i].b;
+ data[j++] = point.r;
+ data[j++] = point.g;
+ data[j++] = point.b;
}
}
// Construct a search object to get the camera parameters
pcl::search::OrganizedNeighbor<T> search;
search.setInputCloud (image);
- std::vector<pcl::PointXY> pp_2d (mask.points.size ());
- for (std::size_t i = 0; i < mask.points.size (); ++i)
- search.projectPoint (mask.points[i], pp_2d[i]);
+ std::vector<pcl::PointXY> pp_2d (mask.size ());
+ for (std::size_t i = 0; i < mask.size (); ++i)
+ search.projectPoint (mask[i], pp_2d[i]);
pcl::PointXY min_pt_2d, max_pt_2d;
min_pt_2d.x = min_pt_2d.y = std::numeric_limits<float>::max ();
for (int i = 0; i < hsize; ++i)
{
array_x[i] = i;
- array_y[i] = cloud.points[0].histogram[i];
+ array_y[i] = cloud[0].histogram[i];
}
this->addPlotData(array_x, array_y, id.c_str(), vtkChart::LINE);
const int index,
const std::string &id, int win_width, int win_height)
{
- if (index < 0 || index >= cloud.points.size ())
+ if (index < 0 || index >= cloud.size ())
{
PCL_ERROR ("[addFeatureHistogram] Invalid point index (%d) given!\n", index);
return (false);
array_x[i] = i;
float data;
// TODO: replace float with the real data type
- memcpy (&data, reinterpret_cast<const char*> (&cloud.points[index]) + fields[field_idx].offset + i * sizeof (float), sizeof (float));
+ memcpy (&data, reinterpret_cast<const char*> (&cloud[index]) + fields[field_idx].offset + i * sizeof (float), sizeof (float));
array_y[i] = data;
}
if (!vertices)
vertices = vtkSmartPointer<vtkCellArray>::New ();
- vtkIdType nr_points = cloud->points.size ();
+ vtkIdType nr_points = cloud->size ();
// Create the point set
vtkSmartPointer<vtkPoints> points = polydata->GetPoints ();
if (!points)
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
}
else
{
for (vtkIdType i = 0; i < nr_points; ++i)
{
// Check if the point is invalid
- if (!std::isfinite (cloud->points[i].x) ||
- !std::isfinite (cloud->points[i].y) ||
- !std::isfinite (cloud->points[i].z))
+ if (!std::isfinite ((*cloud)[i].x) ||
+ !std::isfinite ((*cloud)[i].y) ||
+ !std::isfinite ((*cloud)[i].z))
continue;
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
j++;
ptr += 3;
}
int level, float scale,
const std::string &id, int viewport)
{
- if (normals->points.size () != cloud->points.size ())
+ if (normals->size () != cloud->size ())
{
PCL_ERROR ("[addPointCloudNormals] The number of points differs from the number of normals!\n");
return (false);
}
else
{
- nr_normals = (cloud->points.size () - 1) / level + 1 ;
+ nr_normals = (cloud->size () - 1) / level + 1 ;
pts = new float[2 * nr_normals * 3];
for (vtkIdType i = 0, j = 0; j < nr_normals; j++, i = j * level)
{
- PointT p = cloud->points[i];
- p.x += normals->points[i].normal[0] * scale;
- p.y += normals->points[i].normal[1] * scale;
- p.z += normals->points[i].normal[2] * scale;
-
- pts[2 * j * 3 + 0] = cloud->points[i].x;
- pts[2 * j * 3 + 1] = cloud->points[i].y;
- pts[2 * j * 3 + 2] = cloud->points[i].z;
+ PointT p = (*cloud)[i];
+ p.x += (*normals)[i].normal[0] * scale;
+ p.y += (*normals)[i].normal[1] * scale;
+ p.z += (*normals)[i].normal[2] * scale;
+
+ pts[2 * j * 3 + 0] = (*cloud)[i].x;
+ pts[2 * j * 3 + 1] = (*cloud)[i].y;
+ pts[2 * j * 3 + 2] = (*cloud)[i].z;
pts[2 * j * 3 + 3] = p.x;
pts[2 * j * 3 + 4] = p.y;
pts[2 * j * 3 + 5] = p.z;
int level, float scale,
const std::string &id, int viewport)
{
- if (pcs->points.size () != cloud->points.size () || normals->points.size () != cloud->points.size ())
+ if (pcs->size () != cloud->size () || normals->size () != cloud->size ())
{
pcl::console::print_error ("[addPointCloudPrincipalCurvatures] The number of points differs from the number of principal curvatures/normals!\n");
return (false);
line_2_colors->SetName ("Colors");
// Create the first sets of lines
- for (std::size_t i = 0; i < cloud->points.size (); i+=level)
+ for (std::size_t i = 0; i < cloud->size (); i+=level)
{
- PointT p = cloud->points[i];
- p.x += (pcs->points[i].pc1 * pcs->points[i].principal_curvature[0]) * scale;
- p.y += (pcs->points[i].pc1 * pcs->points[i].principal_curvature[1]) * scale;
- p.z += (pcs->points[i].pc1 * pcs->points[i].principal_curvature[2]) * scale;
+ PointT p = (*cloud)[i];
+ p.x += ((*pcs)[i].pc1 * (*pcs)[i].principal_curvature[0]) * scale;
+ p.y += ((*pcs)[i].pc1 * (*pcs)[i].principal_curvature[1]) * scale;
+ p.z += ((*pcs)[i].pc1 * (*pcs)[i].principal_curvature[2]) * scale;
vtkSmartPointer<vtkLineSource> line_1 = vtkSmartPointer<vtkLineSource>::New ();
- line_1->SetPoint1 (cloud->points[i].x, cloud->points[i].y, cloud->points[i].z);
+ line_1->SetPoint1 ((*cloud)[i].x, (*cloud)[i].y, (*cloud)[i].z);
line_1->SetPoint2 (p.x, p.y, p.z);
line_1->Update ();
polydata_1->AddInputData (line_1->GetOutput ());
line_1_data->GetCellData ()->SetScalars (line_1_colors);
// Create the second sets of lines
- for (std::size_t i = 0; i < cloud->points.size (); i += level)
- {
- Eigen::Vector3f pc (pcs->points[i].principal_curvature[0],
- pcs->points[i].principal_curvature[1],
- pcs->points[i].principal_curvature[2]);
- Eigen::Vector3f normal (normals->points[i].normal[0],
- normals->points[i].normal[1],
- normals->points[i].normal[2]);
+ for (std::size_t i = 0; i < cloud->size (); i += level)
+ {
+ Eigen::Vector3f pc ((*pcs)[i].principal_curvature[0],
+ (*pcs)[i].principal_curvature[1],
+ (*pcs)[i].principal_curvature[2]);
+ Eigen::Vector3f normal ((*normals)[i].normal[0],
+ (*normals)[i].normal[1],
+ (*normals)[i].normal[2]);
Eigen::Vector3f pc_c = pc.cross (normal);
- PointT p = cloud->points[i];
- p.x += (pcs->points[i].pc2 * pc_c[0]) * scale;
- p.y += (pcs->points[i].pc2 * pc_c[1]) * scale;
- p.z += (pcs->points[i].pc2 * pc_c[2]) * scale;
+ PointT p = (*cloud)[i];
+ p.x += ((*pcs)[i].pc2 * pc_c[0]) * scale;
+ p.y += ((*pcs)[i].pc2 * pc_c[1]) * scale;
+ p.z += ((*pcs)[i].pc2 * pc_c[2]) * scale;
vtkSmartPointer<vtkLineSource> line_2 = vtkSmartPointer<vtkLineSource>::New ();
- line_2->SetPoint1 (cloud->points[i].x, cloud->points[i].y, cloud->points[i].z);
+ line_2->SetPoint1 ((*cloud)[i].x, (*cloud)[i].y, (*cloud)[i].z);
line_2->SetPoint2 (p.x, p.y, p.z);
line_2->Update ();
polydata_2->AddInputData (line_2->GetOutput ());
int level, double scale,
const std::string &id, int viewport)
{
- if (gradients->points.size () != cloud->points.size ())
+ if (gradients->size () != cloud->size ())
{
PCL_ERROR ("[addPointCloudGradients] The number of points differs from the number of gradients!\n");
return (false);
vtkSmartPointer<vtkFloatArray> data = vtkSmartPointer<vtkFloatArray>::New ();
data->SetNumberOfComponents (3);
- vtkIdType nr_gradients = (cloud->points.size () - 1) / level + 1 ;
+ vtkIdType nr_gradients = (cloud->size () - 1) / level + 1 ;
float* pts = new float[2 * nr_gradients * 3];
for (vtkIdType i = 0, j = 0; j < nr_gradients; j++, i = j * level)
{
- PointT p = cloud->points[i];
- p.x += gradients->points[i].gradient[0] * scale;
- p.y += gradients->points[i].gradient[1] * scale;
- p.z += gradients->points[i].gradient[2] * scale;
+ PointT p = (*cloud)[i];
+ p.x += (*gradients)[i].gradient[0] * scale;
+ p.y += (*gradients)[i].gradient[1] * scale;
+ p.z += (*gradients)[i].gradient[2] * scale;
- pts[2 * j * 3 + 0] = cloud->points[i].x;
- pts[2 * j * 3 + 1] = cloud->points[i].y;
- pts[2 * j * 3 + 2] = cloud->points[i].z;
+ pts[2 * j * 3 + 0] = (*cloud)[i].x;
+ pts[2 * j * 3 + 1] = (*cloud)[i].y;
+ pts[2 * j * 3 + 2] = (*cloud)[i].z;
pts[2 * j * 3 + 3] = p.x;
pts[2 * j * 3 + 4] = p.y;
pts[2 * j * 3 + 5] = p.z;
continue;
}
- PointT p_src (source_points->points[correspondences[i].index_query]);
- PointT p_tgt (target_points->points[correspondences[i].index_match]);
+ PointT p_src ((*source_points)[correspondences[i].index_query]);
+ PointT p_tgt ((*target_points)[correspondences[i].index_match]);
p_src.getVector3fMap () = source_transformation * p_src.getVector3fMap ();
p_tgt.getVector3fMap () = target_transformation * p_tgt.getVector3fMap ();
vtkSmartPointer<vtkCellArray> vertices = polydata->GetVerts ();
vtkSmartPointer<vtkPoints> points = polydata->GetPoints ();
// Copy the new point array in
- vtkIdType nr_points = cloud->points.size ();
+ vtkIdType nr_points = cloud->size ();
points->SetNumberOfPoints (nr_points);
// Get a pointer to the beginning of the data array
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, pts += 3)
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[pts]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[pts]);
}
else
{
for (vtkIdType i = 0; i < nr_points; ++i)
{
// Check if the point is invalid
- if (!isFinite (cloud->points[i]))
+ if (!isFinite ((*cloud)[i]))
continue;
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[pts]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[pts]);
pts += 3;
j++;
}
std::uint32_t offset = fields[rgb_idx].offset;
for (std::size_t i = 0; i < cloud->size (); ++i)
{
- if (!isFinite (cloud->points[i]))
+ if (!isFinite ((*cloud)[i]))
continue;
- const pcl::RGB* const rgb_data = reinterpret_cast<const pcl::RGB*>(reinterpret_cast<const char*> (&cloud->points[i]) + offset);
+ const pcl::RGB* const rgb_data = reinterpret_cast<const pcl::RGB*>(reinterpret_cast<const char*> (&(*cloud)[i]) + offset);
unsigned char color[3];
color[0] = rgb_data->r;
color[1] = rgb_data->g;
// Create points from polyMesh.cloud
vtkSmartPointer<vtkPoints> points = vtkSmartPointer<vtkPoints>::New ();
- vtkIdType nr_points = cloud->points.size ();
+ vtkIdType nr_points = cloud->size ();
points->SetNumberOfPoints (nr_points);
vtkSmartPointer<vtkLODActor> actor;
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
}
else
{
for (vtkIdType i = 0; i < nr_points; ++i)
{
// Check if the point is invalid
- if (!isFinite (cloud->points[i]))
+ if (!isFinite ((*cloud)[i]))
continue;
lookup[i] = static_cast<int> (j);
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
j++;
ptr += 3;
}
return (false);
vtkSmartPointer<vtkPoints> points = polydata->GetPoints ();
// Copy the new point array in
- vtkIdType nr_points = cloud->points.size ();
+ vtkIdType nr_points = cloud->size ();
points->SetNumberOfPoints (nr_points);
// Get a pointer to the beginning of the data array
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
}
else
{
for (vtkIdType i = 0; i < nr_points; ++i)
{
// Check if the point is invalid
- if (!isFinite (cloud->points[i]))
+ if (!isFinite ((*cloud)[i]))
continue;
lookup [i] = static_cast<int> (j);
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
j++;
ptr += 3;
}
std::uint32_t offset = fields[rgb_idx].offset;
for (std::size_t i = 0; i < cloud->size (); ++i)
{
- if (!isFinite (cloud->points[i]))
+ if (!isFinite ((*cloud)[i]))
continue;
- const pcl::RGB* const rgb_data = reinterpret_cast<const pcl::RGB*>(reinterpret_cast<const char*> (&cloud->points[i]) + offset);
+ const pcl::RGB* const rgb_data = reinterpret_cast<const pcl::RGB*>(reinterpret_cast<const char*> (&(*cloud)[i]) + offset);
unsigned char color[3];
color[0] = rgb_data->r;
color[1] = rgb_data->g;
auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
scalars->SetNumberOfTuples (nr_points);
// Get a random color
auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
scalars->SetNumberOfTuples (nr_points);
// Get a random color
auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
scalars->SetNumberOfTuples (nr_points);
unsigned char* colors = scalars->GetPointer (0);
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite (cloud_->points[cp].x) ||
- !std::isfinite (cloud_->points[cp].y) ||
- !std::isfinite (cloud_->points[cp].z))
+ if (!std::isfinite ((*cloud_)[cp].x) ||
+ !std::isfinite ((*cloud_)[cp].y) ||
+ !std::isfinite ((*cloud_)[cp].z))
continue;
- memcpy (&rgb, (reinterpret_cast<const char *> (&cloud_->points[cp])) + rgba_offset, sizeof (pcl::RGB));
+ memcpy (&rgb, (reinterpret_cast<const char *> (&(*cloud_)[cp])) + rgba_offset, sizeof (pcl::RGB));
colors[j ] = rgb.r;
colors[j + 1] = rgb.g;
colors[j + 2] = rgb.b;
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
int idx = static_cast<int> (cp) * 3;
- memcpy (&rgb, (reinterpret_cast<const char *> (&cloud_->points[cp])) + rgba_offset, sizeof (pcl::RGB));
+ memcpy (&rgb, (reinterpret_cast<const char *> (&(*cloud_)[cp])) + rgba_offset, sizeof (pcl::RGB));
colors[idx ] = rgb.r;
colors[idx + 1] = rgb.g;
colors[idx + 2] = rgb.b;
auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
scalars->SetNumberOfTuples (nr_points);
unsigned char* colors = scalars->GetPointer (0);
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite (cloud_->points[cp].x) ||
- !std::isfinite (cloud_->points[cp].y) ||
- !std::isfinite (cloud_->points[cp].z))
+ if (!std::isfinite ((*cloud_)[cp].x) ||
+ !std::isfinite ((*cloud_)[cp].y) ||
+ !std::isfinite ((*cloud_)[cp].z))
continue;
///@todo do this with the point_types_conversion in common, first template it!
- float h = cloud_->points[cp].h;
- float v = cloud_->points[cp].v;
- float s = cloud_->points[cp].s;
+ float h = (*cloud_)[cp].h;
+ float v = (*cloud_)[cp].v;
+ float s = (*cloud_)[cp].s;
// Fill color data with HSV here:
// restrict the hue value to [0,360[
// Color every point
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
- float h = cloud_->points[cp].h;
- float v = cloud_->points[cp].v;
- float s = cloud_->points[cp].s;
+ float h = (*cloud_)[cp].h;
+ float v = (*cloud_)[cp].v;
+ float s = (*cloud_)[cp].s;
// Fill color data with HSV here:
// restrict the hue value to [0,360[
auto scalars = vtkSmartPointer<vtkFloatArray>::New ();
scalars->SetNumberOfComponents (1);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
scalars->SetNumberOfTuples (nr_points);
using FieldList = typename pcl::traits::fieldList<PointT>::type;
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite (cloud_->points[cp].x) || !std::isfinite (cloud_->points[cp].y) || !std::isfinite (cloud_->points[cp].z))
+ if (!std::isfinite ((*cloud_)[cp].x) || !std::isfinite ((*cloud_)[cp].y) || !std::isfinite ((*cloud_)[cp].z))
continue;
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&cloud_->points[cp]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud_)[cp]);
memcpy (&field_data, pt_data + fields_[field_idx_].offset, pcl::getFieldSize (fields_[field_idx_].datatype));
colors[j] = field_data;
// Color every point
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&cloud_->points[cp]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud_)[cp]);
memcpy (&field_data, pt_data + fields_[field_idx_].offset, pcl::getFieldSize (fields_[field_idx_].datatype));
if (!std::isfinite (field_data))
auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (4);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
scalars->SetNumberOfTuples (nr_points);
unsigned char* colors = scalars->GetPointer (0);
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
// Copy the value at the specified field
- if (!std::isfinite (cloud_->points[cp].x) ||
- !std::isfinite (cloud_->points[cp].y) ||
- !std::isfinite (cloud_->points[cp].z))
+ if (!std::isfinite ((*cloud_)[cp].x) ||
+ !std::isfinite ((*cloud_)[cp].y) ||
+ !std::isfinite ((*cloud_)[cp].z))
continue;
- colors[j ] = cloud_->points[cp].r;
- colors[j + 1] = cloud_->points[cp].g;
- colors[j + 2] = cloud_->points[cp].b;
- colors[j + 3] = cloud_->points[cp].a;
+ colors[j ] = (*cloud_)[cp].r;
+ colors[j + 1] = (*cloud_)[cp].g;
+ colors[j + 2] = (*cloud_)[cp].b;
+ colors[j + 3] = (*cloud_)[cp].a;
j += 4;
}
}
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
int idx = static_cast<int> (cp) * 4;
- colors[idx ] = cloud_->points[cp].r;
- colors[idx + 1] = cloud_->points[cp].g;
- colors[idx + 2] = cloud_->points[cp].b;
- colors[idx + 3] = cloud_->points[cp].a;
+ colors[idx ] = (*cloud_)[cp].r;
+ colors[idx + 1] = (*cloud_)[cp].g;
+ colors[idx + 2] = (*cloud_)[cp].b;
+ colors[idx + 3] = (*cloud_)[cp].a;
}
}
return scalars;
auto scalars = vtkSmartPointer<vtkUnsignedCharArray>::New ();
scalars->SetNumberOfComponents (3);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
scalars->SetNumberOfTuples (nr_points);
unsigned char* colors = scalars->GetPointer (0);
std::set<std::uint32_t> labels;
// First pass: find unique labels
for (vtkIdType i = 0; i < nr_points; ++i)
- labels.insert (cloud_->points[i].label);
+ labels.insert ((*cloud_)[i].label);
// Assign Glasbey colors in ascending order of labels
std::size_t color = 0;
int j = 0;
for (vtkIdType cp = 0; cp < nr_points; ++cp)
{
- if (pcl::isFinite (cloud_->points[cp]))
+ if (pcl::isFinite ((*cloud_)[cp]))
{
- const pcl::RGB& color = static_mapping_ ? GlasbeyLUT::at (cloud_->points[cp].label % GlasbeyLUT::size ()) : colormap[cloud_->points[cp].label];
+ const pcl::RGB& color = static_mapping_ ? GlasbeyLUT::at ((*cloud_)[cp].label % GlasbeyLUT::size ()) : colormap[(*cloud_)[cp].label];
colors[j ] = color.r;
colors[j + 1] = color.g;
colors[j + 2] = color.b;
: PointCloudGeometryHandler<PointT>::PointCloudGeometryHandler (cloud)
{
field_x_idx_ = pcl::getFieldIndex<PointT> ("x", fields_);
- if (field_x_idx_ == -1)
+ if (field_x_idx_ == UNAVAILABLE)
return;
field_y_idx_ = pcl::getFieldIndex<PointT> ("y", fields_);
- if (field_y_idx_ == -1)
+ if (field_y_idx_ == UNAVAILABLE)
return;
field_z_idx_ = pcl::getFieldIndex<PointT> ("z", fields_);
- if (field_z_idx_ == -1)
+ if (field_z_idx_ == UNAVAILABLE)
return;
capable_ = true;
}
vtkSmartPointer<vtkFloatArray> data = vtkSmartPointer<vtkFloatArray>::New ();
data->SetNumberOfComponents (3);
- vtkIdType nr_points = cloud_->points.size ();
+ vtkIdType nr_points = cloud_->size ();
// Add all points
vtkIdType j = 0; // true point index
{
for (vtkIdType i = 0; i < nr_points; ++i)
{
- pts[i * 3 + 0] = cloud_->points[i].x;
- pts[i * 3 + 1] = cloud_->points[i].y;
- pts[i * 3 + 2] = cloud_->points[i].z;
+ pts[i * 3 + 0] = (*cloud_)[i].x;
+ pts[i * 3 + 1] = (*cloud_)[i].y;
+ pts[i * 3 + 2] = (*cloud_)[i].z;
}
data->SetArray (&pts[0], nr_points * 3, 0);
points->SetData (data);
for (vtkIdType i = 0; i < nr_points; ++i)
{
// Check if the point is invalid
- if (!std::isfinite (cloud_->points[i].x) || !std::isfinite (cloud_->points[i].y) || !std::isfinite (cloud_->points[i].z))
+ if (!std::isfinite ((*cloud_)[i].x) || !std::isfinite ((*cloud_)[i].y) || !std::isfinite ((*cloud_)[i].z))
continue;
- pts[j * 3 + 0] = cloud_->points[i].x;
- pts[j * 3 + 1] = cloud_->points[i].y;
- pts[j * 3 + 2] = cloud_->points[i].z;
+ pts[j * 3 + 0] = (*cloud_)[i].x;
+ pts[j * 3 + 1] = (*cloud_)[i].y;
+ pts[j * 3 + 2] = (*cloud_)[i].z;
// Set j and increment
j++;
}
: PointCloudGeometryHandler<PointT>::PointCloudGeometryHandler (cloud)
{
field_x_idx_ = pcl::getFieldIndex<PointT> ("normal_x", fields_);
- if (field_x_idx_ == -1)
+ if (field_x_idx_ == UNAVAILABLE)
return;
field_y_idx_ = pcl::getFieldIndex<PointT> ("normal_y", fields_);
- if (field_y_idx_ == -1)
+ if (field_y_idx_ == UNAVAILABLE)
return;
field_z_idx_ = pcl::getFieldIndex<PointT> ("normal_z", fields_);
- if (field_z_idx_ == -1)
+ if (field_z_idx_ == UNAVAILABLE)
return;
capable_ = true;
}
if (!points)
points = vtkSmartPointer<vtkPoints>::New ();
points->SetDataTypeToFloat ();
- points->SetNumberOfPoints (cloud_->points.size ());
+ points->SetNumberOfPoints (cloud_->size ());
// Add all points
double p[3];
- for (vtkIdType i = 0; i < static_cast<vtkIdType> (cloud_->points.size ()); ++i)
+ for (vtkIdType i = 0; i < static_cast<vtkIdType> (cloud_->size ()); ++i)
{
- p[0] = cloud_->points[i].normal[0];
- p[1] = cloud_->points[i].normal[1];
- p[2] = cloud_->points[i].normal[2];
+ p[0] = (*cloud_)[i].normal[0];
+ p[1] = (*cloud_)[i].normal[1];
+ p[2] = (*cloud_)[i].normal[2];
points->SetPoint (i, p);
}
/** \brief Constructor. */
PointCloudGeometryHandler (const PointCloudConstPtr &cloud) :
cloud_ (cloud), capable_ (false),
- field_x_idx_ (-1), field_y_idx_ (-1), field_z_idx_ (-1),
+ field_x_idx_ (UNAVAILABLE), field_y_idx_ (UNAVAILABLE), field_z_idx_ (UNAVAILABLE),
fields_ ()
{}
bool capable_;
/** \brief The index of the field holding the X data. */
- int field_x_idx_;
+ index_t field_x_idx_;
/** \brief The index of the field holding the Y data. */
- int field_y_idx_;
+ index_t field_y_idx_;
/** \brief The index of the field holding the Z data. */
- int field_z_idx_;
+ index_t field_z_idx_;
/** \brief The list of fields available for this PointCloud. */
std::vector<pcl::PCLPointField> fields_;
: pcl::visualization::PointCloudGeometryHandler<PointT>::PointCloudGeometryHandler (cloud)
{
field_x_idx_ = pcl::getFieldIndex<PointT> (x_field_name, fields_);
- if (field_x_idx_ == -1)
+ if (field_x_idx_ == UNAVAILABLE)
return;
field_y_idx_ = pcl::getFieldIndex<PointT> (y_field_name, fields_);
- if (field_y_idx_ == -1)
+ if (field_y_idx_ == UNAVAILABLE)
return;
field_z_idx_ = pcl::getFieldIndex<PointT> (z_field_name, fields_);
- if (field_z_idx_ == -1)
+ if (field_z_idx_ == UNAVAILABLE)
return;
field_name_ = x_field_name + y_field_name + z_field_name;
capable_ = true;
if (!points)
points = vtkSmartPointer<vtkPoints>::New ();
points->SetDataTypeToFloat ();
- points->SetNumberOfPoints (cloud_->points.size ());
+ points->SetNumberOfPoints (cloud_->size ());
float data;
// Add all points
double p[3];
- for (vtkIdType i = 0; i < static_cast<vtkIdType> (cloud_->points.size ()); ++i)
+ for (vtkIdType i = 0; i < static_cast<vtkIdType> (cloud_->size ()); ++i)
{
// Copy the value at the specified field
- const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&cloud_->points[i]);
+ const std::uint8_t* pt_data = reinterpret_cast<const std::uint8_t*> (&(*cloud_)[i]);
memcpy (&data, pt_data + fields_[field_x_idx_].offset, sizeof (float));
p[0] = data;
PointCloudGeometryHandler (const PointCloudConstPtr &cloud, const Eigen::Vector4f & = Eigen::Vector4f::Zero ())
: cloud_ (cloud)
, capable_ (false)
- , field_x_idx_ (-1)
- , field_y_idx_ (-1)
- , field_z_idx_ (-1)
+ , field_x_idx_ (UNAVAILABLE)
+ , field_y_idx_ (UNAVAILABLE)
+ , field_z_idx_ (UNAVAILABLE)
, fields_ (cloud_->fields)
{
}
bool capable_;
/** \brief The index of the field holding the X data. */
- int field_x_idx_;
+ index_t field_x_idx_;
/** \brief The index of the field holding the Y data. */
- int field_y_idx_;
+ index_t field_y_idx_;
/** \brief The index of the field holding the Z data. */
- int field_z_idx_;
+ index_t field_z_idx_;
/** \brief The list of fields available for this PointCloud. */
std::vector<pcl::PCLPointField> fields_;
{
for(int i = 0 ; i < nb_values_ ; ++i)
{
- cloud_.points[0].histogram[i] = values_[i];
+ cloud_[0].histogram[i] = values_[i];
}
}
#include <pcl/pcl_macros.h>
#include <vtkImageCanvasSource2D.h>
+class vtkImageData;
namespace pcl
{
class vtkOpenGLExtensionManager;
class vtkRenderWindow;
-PCL_DEPRECATED(1, 13, "The OpenGL backend of VTK is deprecated. Please switch to the OpenGL2 backend.")
-class PCL_EXPORTS vtkVertexBufferObject : public vtkObject
+class PCL_DEPRECATED(1, 13, "The OpenGL backend of VTK is deprecated. Please switch to the OpenGL2 backend.")
+PCL_EXPORTS vtkVertexBufferObject : public vtkObject
{
public:
class vtkShaderProgram2;
class vtkVertexBufferObject;
-PCL_DEPRECATED(1, 13, "The OpenGL backend of VTK is deprecated. Please switch to the OpenGL2 backend.")
-class PCL_EXPORTS vtkVertexBufferObjectMapper : public vtkMapper
+class PCL_DEPRECATED(1, 13, "The OpenGL backend of VTK is deprecated. Please switch to the OpenGL2 backend.")
+PCL_EXPORTS vtkVertexBufferObjectMapper : public vtkMapper
{
public:
static vtkVertexBufferObjectMapper *New();
remove (const std::string &key)
{
std::lock_guard<std::mutex> lock (c_mtx);
- if (callables.find (key) != callables.end ())
- callables.erase (key);
+ callables.erase (key);
}
std::string window_name_;
{
double p[3];
src->GetPoint (i, p);
- cloud.points[i].x = static_cast<float> (p[0]);
- cloud.points[i].y = static_cast<float> (p[1]);
- cloud.points[i].z = static_cast<float> (p[2]);
+ cloud[i].x = static_cast<float> (p[0]);
+ cloud[i].y = static_cast<float> (p[1]);
+ cloud[i].z = static_cast<float> (p[2]);
}
// Compute a kd-tree for tgt
* POSSIBILITY OF SUCH DAMAGE.
*
*/
+#include <pcl/ModelCoefficients.h>
#include <pcl/visualization/common/shapes.h>
#include <pcl/common/angles.h>
#include <vtkLineSource.h>
#include <thread>
-#include <pcl/common/common_headers.h>
+#include <pcl/common/time.h> // for DO_EVERY
#include <pcl/visualization/common/common.h>
#include <vtkRenderWindowInteractor.h>
#include <pcl/visualization/histogram_visualizer.h>
#include <vtkTable.h>
#include <fstream>
-#include <sstream>
#include <pcl/visualization/pcl_plotter.h>
-#include <pcl/common/common_headers.h>
#define VTK_CREATE(type, name) \
vtkSmartPointer<type> name = vtkSmartPointer<type>::New()
char const *filename,
int type)
{
- using namespace std;
- ifstream fin(filename);
+ std::ifstream fin(filename);
//getting the no of column
- string line;
+ std::string line;
getline (fin, line);
- stringstream ss(line);
+ std::stringstream ss(line);
- std::vector<string> pnames; //plot names
- string xname, temp; //plot name of X axis
+ std::vector<std::string> pnames; //plot names
+ std::string xname, temp; //plot name of X axis
//checking X axis name
ss >> xname;
#include <boost/uuid/sha1.hpp>
#endif
#include <boost/filesystem.hpp>
+#include <pcl/common/utils.h> // pcl::utils::ignore
#include <pcl/console/parse.h>
// Support for VTK 7.1 upwards
vtkSmartPointer<vtkPoints> poly_points = vtkSmartPointer<vtkPoints>::New ();
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud (new pcl::PointCloud<pcl::PointXYZ> ());
pcl::fromPCLPointCloud2 (poly_mesh.cloud, *point_cloud);
- poly_points->SetNumberOfPoints (point_cloud->points.size ());
+ poly_points->SetNumberOfPoints (point_cloud->size ());
- for (std::size_t i = 0; i < point_cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < point_cloud->size (); ++i)
{
- const pcl::PointXYZ& p = point_cloud->points[i];
+ const pcl::PointXYZ& p = (*point_cloud)[i];
poly_points->InsertPoint (i, p.x, p.y, p.z);
}
return (false);
vtkSmartPointer<vtkPoints> points = polydata->GetPoints ();
// Copy the new point array in
- vtkIdType nr_points = cloud->points.size ();
+ vtkIdType nr_points = cloud->size ();
points->SetNumberOfPoints (nr_points);
// Get a pointer to the beginning of the data array
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
}
else
{
for (vtkIdType i = 0; i < nr_points; ++i)
{
// Check if the point is invalid
- if (!isFinite (cloud->points[i]))
+ if (!isFinite ((*cloud)[i]))
continue;
lookup[i] = static_cast<int> (j);
- std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
+ std::copy (&(*cloud)[i].x, &(*cloud)[i].x + 3, &data[ptr]);
j++;
ptr += 3;
}
vtkSmartPointer<vtkPoints> poly_points = vtkSmartPointer<vtkPoints>::New ();
pcl::PointCloud<pcl::PointXYZ> point_cloud;
pcl::fromPCLPointCloud2 (polymesh.cloud, point_cloud);
- poly_points->SetNumberOfPoints (point_cloud.points.size ());
+ poly_points->SetNumberOfPoints (point_cloud.size ());
- for (std::size_t i = 0; i < point_cloud.points.size (); ++i)
- poly_points->InsertPoint (i, point_cloud.points[i].x, point_cloud.points[i].y, point_cloud.points[i].z);
+ for (std::size_t i = 0; i < point_cloud.size (); ++i)
+ poly_points->InsertPoint (i, point_cloud[i].x, point_cloud[i].y, point_cloud[i].z);
// Create a cell array to store the lines in and add the lines to it
vtkSmartPointer <vtkCellArray> cells = vtkSmartPointer<vtkCellArray>::New ();
colors->SetNumberOfComponents (3);
colors->SetName ("Colors");
poly_points->SetNumberOfPoints (cloud.size ());
- for (std::size_t i = 0; i < cloud.points.size (); ++i)
+ for (std::size_t i = 0; i < cloud.size (); ++i)
{
- const pcl::PointXYZRGB &p = cloud.points[i];
+ const pcl::PointXYZRGB &p = cloud[i];
poly_points->InsertPoint (i, p.x, p.y, p.z);
const unsigned char color[3] = { p.r, p.g, p.b };
colors->InsertNextTupleValue (color);
return (false);
}
convertToVtkMatrix (cloud->sensor_origin_, cloud->sensor_orientation_, transformation);
- poly_points->SetNumberOfPoints (cloud->points.size ());
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ poly_points->SetNumberOfPoints (cloud->size ());
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
- const pcl::PointXYZ &p = cloud->points[i];
+ const pcl::PointXYZ &p = (*cloud)[i];
poly_points->InsertPoint (i, p.x, p.y, p.z);
}
}
worldPicker->Pick (static_cast<double> (x), static_cast<double> (y), value, renderer);
worldPicker->GetPickPosition (coords);
- cloud->points[count_valid_depth_pixels].x = static_cast<float> (coords[0]);
- cloud->points[count_valid_depth_pixels].y = static_cast<float> (coords[1]);
- cloud->points[count_valid_depth_pixels].z = static_cast<float> (coords[2]);
- cloud->points[count_valid_depth_pixels].getVector4fMap () = backToRealScale_eigen
- * cloud->points[count_valid_depth_pixels].getVector4fMap ();
+ (*cloud)[count_valid_depth_pixels].x = static_cast<float> (coords[0]);
+ (*cloud)[count_valid_depth_pixels].y = static_cast<float> (coords[1]);
+ (*cloud)[count_valid_depth_pixels].z = static_cast<float> (coords[2]);
+ (*cloud)[count_valid_depth_pixels].getVector4fMap () = backToRealScale_eigen
+ * (*cloud)[count_valid_depth_pixels].getVector4fMap ();
count_valid_depth_pixels++;
}
}
style_->setUseVbos (use_vbos_);
#else
PCL_WARN ("[PCLVisualizer::setUseVbos] Has no effect when OpenGL version is ≥ 2\n");
- (void) use_vbos;
+ pcl::utils::ignore(use_vbos);
#endif
}
: pcl::visualization::PointCloudGeometryHandler<pcl::PCLPointCloud2>::PointCloudGeometryHandler (cloud)
{
field_x_idx_ = pcl::getFieldIndex (*cloud, "x");
- if (field_x_idx_ == -1)
+ if (field_x_idx_ == UNAVAILABLE)
return;
field_y_idx_ = pcl::getFieldIndex (*cloud, "y");
- if (field_y_idx_ == -1)
+ if (field_y_idx_ == UNAVAILABLE)
return;
field_z_idx_ = pcl::getFieldIndex (*cloud, "z");
- if (field_z_idx_ == -1)
+ if (field_z_idx_ == UNAVAILABLE)
return;
capable_ = true;
}
: pcl::visualization::PointCloudGeometryHandler<pcl::PCLPointCloud2>::PointCloudGeometryHandler (cloud)
{
field_x_idx_ = pcl::getFieldIndex (*cloud, "normal_x");
- if (field_x_idx_ == -1)
+ if (field_x_idx_ == UNAVAILABLE)
return;
field_y_idx_ = pcl::getFieldIndex (*cloud, "normal_y");
- if (field_y_idx_ == -1)
+ if (field_y_idx_ == UNAVAILABLE)
return;
field_z_idx_ = pcl::getFieldIndex (*cloud, "normal_z");
- if (field_z_idx_ == -1)
+ if (field_z_idx_ == UNAVAILABLE)
return;
capable_ = true;
}
: pcl::visualization::PointCloudGeometryHandler<pcl::PCLPointCloud2>::PointCloudGeometryHandler (cloud)
{
field_x_idx_ = pcl::getFieldIndex (*cloud, x_field_name);
- if (field_x_idx_ == -1)
+ if (field_x_idx_ == UNAVAILABLE)
return;
field_y_idx_ = pcl::getFieldIndex (*cloud, y_field_name);
- if (field_y_idx_ == -1)
+ if (field_y_idx_ == UNAVAILABLE)
return;
field_z_idx_ = pcl::getFieldIndex (*cloud, z_field_name);
- if (field_z_idx_ == -1)
+ if (field_z_idx_ == UNAVAILABLE)
return;
field_name_ = x_field_name + y_field_name + z_field_name;
capable_ = true;
{
for (std::size_t x=0; x<range_image.width; ++x)
{
- const pcl::BorderDescription& border_description = border_descriptions.points[y*range_image.width + x];
+ const pcl::BorderDescription& border_description = border_descriptions[y*range_image.width + x];
const pcl::BorderTraits& border_traits = border_description.traits;
if (border_traits[pcl::BORDER_TRAIT__OBSTACLE_BORDER])
{
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
cloud->points.resize (5);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
- cloud->points[i].x = float (i);
- cloud->points[i].y = float (i / 2);
- cloud->points[i].z = 0.0f;
+ (*cloud)[i].x = float (i);
+ (*cloud)[i].y = float (i / 2);
+ (*cloud)[i].z = 0.0f;
}
// Start the visualizer
p.addPolygon<PointXYZ> (cloud, 1.0, 0.0, 0.0, "polygon", 0);
p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 10, "polygon");
- p.addLine<PointXYZ, PointXYZ> (cloud->points[0], cloud->points[1], 0.0, 1.0, 0.0);
+ p.addLine<PointXYZ, PointXYZ> ((*cloud)[0], (*cloud)[1], 0.0, 1.0, 0.0);
p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 50, "line");
- p.addSphere<PointXYZ> (cloud->points[0], 1, 0.0, 1.0, 0.0);
+ p.addSphere<PointXYZ> ((*cloud)[0], 1, 0.0, 1.0, 0.0);
p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 5, "sphere");
// p.removePolygon ("poly");
p.addText ("text", 200, 200, 1.0, 0, 0, "text");
- p.addText3D ("text3D", cloud->points[0], 1.0, 1.0, 0.0, 0.0);
+ p.addText3D ("text3D", (*cloud)[0], 1.0, 1.0, 0.0, 0.0);
p.spin ();
p.removeCoordinateSystem ("first", 0);
p.spin ();
PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
cloud->points.resize (5);
- for (std::size_t i = 0; i < cloud->points.size (); ++i)
+ for (std::size_t i = 0; i < cloud->size (); ++i)
{
- cloud->points[i].x = float (i);
- cloud->points[i].y = float (i / 2);
- cloud->points[i].z = 0.0f;
+ (*cloud)[i].x = float (i);
+ (*cloud)[i].y = float (i / 2);
+ (*cloud)[i].z = 0.0f;
}
// Start the visualizer
p.addPolygon<PointXYZ> (cloud, 1.0, 0.0, 0.0, "polygon", leftPort);
p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 10, "polygon", leftPort);
- p.addLine<PointXYZ, PointXYZ> (cloud->points[0], cloud->points[1], 0.0, 1.0, 0.0, "line", leftPort);
+ p.addLine<PointXYZ, PointXYZ> ((*cloud)[0], (*cloud)[1], 0.0, 1.0, 0.0, "line", leftPort);
p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 50, "line", leftPort);
- p.addSphere<PointXYZ> (cloud->points[0], 1, 0.0, 1.0, 0.0, "sphere", leftPort);
+ p.addSphere<PointXYZ> ((*cloud)[0], 1, 0.0, 1.0, 0.0, "sphere", leftPort);
p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 5, "sphere", leftPort);
// p.removePolygon ("poly");
p.addText ("text", 200, 200, 1.0, 0, 0, "text", leftPort);
- p.addText3D ("text3D", cloud->points[0], 1.0, 1.0, 0.0, 0.0, "", rightPort);
+ p.addText3D ("text3D", (*cloud)[0], 1.0, 1.0, 0.0, 0.0, "", rightPort);
p.spin ();
p.removeCoordinateSystem ("first", 0);
p.spin ();
p.spin ();
p.removeCoordinateSystem ("second", 0);
p.spin ();
- p.addText3D ("text3D_to_remove", cloud->points[1], 1.0, 0.0, 1.0, 0.0, "", rightPort);
+ p.addText3D ("text3D_to_remove", (*cloud)[1], 1.0, 0.0, 1.0, 0.0, "", rightPort);
p.spin ();
p.removeText3D ("text3D_to_remove", rightPort);
p.spin ();