--- /dev/null
+cache:
+ - C:\Tools\vcpkg\installed\
+ - C:\Tools\ninja\ninja.exe
+
+configuration: Release
+
+environment:
+ VCPKG_DIR: C:\Tools\vcpkg
+ NINJA_DIR: C:\Tools\ninja
+ GENERATOR: Ninja
+ matrix:
+ - APPVEYOR_BUILD_WORKER_IMAGE: 'Visual Studio 2015'
+ PLATFORM: x86
+ VCVARSALL: '%ProgramFiles(x86)%\Microsoft Visual Studio 14.0\VC\vcvarsall.bat'
+ ARCHITECTURE: x86
+ - APPVEYOR_BUILD_WORKER_IMAGE: 'Visual Studio 2015'
+ PLATFORM: x64
+ VCVARSALL: '%ProgramFiles(x86)%\Microsoft Visual Studio 14.0\VC\vcvarsall.bat'
+ ARCHITECTURE: x86_amd64
+ #- APPVEYOR_BUILD_WORKER_IMAGE: 'Visual Studio 2017'
+ # PLATFORM: x86
+ # VCVARSALL: '%ProgramFiles(x86)%\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat'
+ # ARCHITECTURE: x86
+ #- APPVEYOR_BUILD_WORKER_IMAGE: 'Visual Studio 2017'
+ # PLATFORM: x64
+ # VCVARSALL: '%ProgramFiles(x86)%\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat'
+ # ARCHITECTURE: x86_amd64
+
+init:
+ - cd %VCPKG_DIR%
+ - git pull
+ - echo.set(VCPKG_BUILD_TYPE release)>> %VCPKG_DIR%\triplets\%PLATFORM%-windows.cmake
+ - .\bootstrap-vcpkg.bat
+ - vcpkg version
+ - cd %APPVEYOR_BUILD_FOLDER%
+
+install:
+ - vcpkg install
+ boost-system
+ boost-filesystem
+ boost-thread
+ boost-date-time
+ boost-iostreams
+ boost-chrono
+ boost-asio
+ boost-dynamic-bitset
+ boost-foreach
+ boost-graph
+ boost-interprocess
+ boost-multi-array
+ boost-ptr-container
+ boost-random
+ boost-signals2
+ eigen3
+ flann
+ qhull
+ gtest
+ --triplet %PLATFORM%-windows
+ - vcpkg list
+ - set PATH=%VCPKG_DIR%\installed\%PLATFORM%-windows\bin;%PATH%
+ - if not exist %NINJA_DIR%\ mkdir %NINJA_DIR%
+ - cd %NINJA_DIR%
+ - if not exist ninja.exe appveyor DownloadFile https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-win.zip
+ - if not exist ninja.exe 7z x ninja-win.zip
+ - set PATH=%NINJA_DIR%;%PATH%
+
+build:
+ parallel: true
+
+build_script:
+ # These tests will fails on Windows.
+ # Therefore, these tests are disabled until fixed.
+ # AppVeyor Log : https://ci.appveyor.com/project/PointCloudLibrary/pcl/build/1.0.267
+ # * common_eigen
+ # * feature_rift_estimation
+ # * feature_cppf_estimation
+ # * feature_pfh_estimation
+ - call "%VCVARSALL%" %ARCHITECTURE%
+ - cd %APPVEYOR_BUILD_FOLDER%
+ - mkdir build
+ - cd build
+ - cmake -G"%GENERATOR%"
+ -DCMAKE_C_COMPILER=cl.exe
+ -DCMAKE_CXX_COMPILER=cl.exe
+ -DCMAKE_TOOLCHAIN_FILE=%VCPKG_DIR%\scripts\buildsystems\vcpkg.cmake
+ -DVCPKG_APPLOCAL_DEPS=OFF
+ -DPCL_BUILD_WITH_BOOST_DYNAMIC_LINKING_WIN32=ON
+ -DPCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32=ON
+ -DPCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32=ON
+ -DPCL_NO_PRECOMPILE=OFF
+ -DBUILD_apps=OFF
+ -DBUILD_examples=OFF
+ -DBUILD_global_tests=ON
+ -DBUILD_simulation=OFF
+ -DBUILD_tools=OFF
+ ..
+ - cmake --build . --config %CONFIGURATION%
+ - ctest -C %CONFIGURATION% -V
+
--- /dev/null
+#! /usr/bin/env python3
+
+"""
+Software License Agreement (BSD License)
+
+ Point Cloud Library (PCL) - www.pointclouds.org
+ Copyright (c) 2018-, Open Perception, Inc.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the copyright holder(s) nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+"""
+
+import argparse
+from argparse import ArgumentParser
+from collections import defaultdict
+import getpass
+import json
+import os
+import pathlib
+import re
+import subprocess
+import sys
+
+import requests
+
+
+def find_pcl_folder():
+ folder = os.path.dirname(os.path.abspath(__file__))
+ folder = pathlib.Path(folder).parent
+ return str(folder)
+
+
+def find_pr_list(start: str, end: str):
+ """Returns all PR ids from a certain commit range. Inspired in
+ http://joey.aghion.com/find-the-github-pull-request-for-a-commit/
+ https://stackoverflow.com/questions/36433572/how-does-ancestry-path-work-with-git-log#36437843
+ """
+
+ # Let git generate the proper pr history
+ cmd = "git log --oneline " + start + ".." + end
+ cmd = cmd.split()
+ output = subprocess.run(cmd, cwd=FOLDER, stdout=subprocess.PIPE)
+ pr_commits = output.stdout.split(b"\n")
+
+ # Fetch ids for all merge requests from PRS
+ merge_re = re.compile("\S+ Merge pull request #(\d+) from \S+")
+ squash_re = re.compile("\(#(\d+)\)")
+
+ ids = []
+ for pr in pr_commits:
+
+ pr_s = str(pr)
+
+ # Match agains usual pattern
+ uid = None
+ match = merge_re.fullmatch(pr_s)
+
+ # Match agains squash pattern
+ if not match:
+ match = squash_re.search(pr_s)
+
+ # Abort
+ if not match:
+ continue
+
+ # Extract PR uid
+ uid = int(match.group(1))
+ ids.append(uid)
+
+ return ids
+
+
+def fetch_pr_info(pr_ids, auth):
+
+ prs_url = "https://api.github.com/repos/PointCloudLibrary/pcl/pulls/"
+ pr_info = []
+
+ sys.stdout.write("Fetching PR Info: {}%".format(0))
+ sys.stdout.flush()
+
+ for i, pr_id in enumerate(pr_ids):
+
+ # Fetch GitHub info
+ response = requests.get(prs_url + str(pr_id), auth=auth)
+ data = response.json()
+
+ if response.status_code != 200:
+ print(
+ "\nError: Failed to fetch PR info. Server reported '"
+ + data["message"]
+ + "'",
+ file=sys.stderr,
+ )
+ exit(code=1)
+
+ d = {"id": pr_id, "title": data["title"], "labels": data["labels"]}
+ pr_info.append(d)
+
+ # import pdb; pdb.set_trace()
+ sys.stdout.write(
+ "\rFetching PR Info: {:0.2f}%".format(100 * (i + 1) / len(pr_ids))
+ )
+ sys.stdout.flush()
+
+ print("")
+ return pr_info
+
+
+def extract_version(tag):
+ """Finds the corresponding version from a provided tag.
+ If the tag does not correspond to a suitable version tag, the original tag
+ is returned
+ """
+ version_re = re.compile("pcl-\S+")
+ res = version_re.fullmatch(tag)
+
+ # Not a usual version tag
+ if not res:
+ return tag
+
+ return tag[4:]
+
+
+def generate_text_content(tag, pr_info):
+
+ module_order = (
+ None,
+ "cmake",
+ "2d",
+ "common",
+ "cuda",
+ "features",
+ "filters",
+ "geometry",
+ "gpu",
+ "io",
+ "kdtree",
+ "keypoints",
+ "ml",
+ "octree",
+ "outofcore",
+ "people",
+ "recognition",
+ "registration",
+ "sample_consensus",
+ "search",
+ "segmentation",
+ "simulation",
+ "stereo",
+ "surface",
+ "apps",
+ "docs",
+ "tutorials",
+ "examples",
+ "tests",
+ "tools",
+ "ci",
+ )
+
+ module_titles = {
+ None: "Uncategorized",
+ "2d": "libpcl_2d",
+ "apps": "PCL Apps",
+ "cmake": "CMake",
+ "ci": "CI",
+ "common": "libpcl_common",
+ "cuda": "libpcl_cuda",
+ "docs": "PCL Docs",
+ "examples": "PCL Examples",
+ "features": "libpcl_features",
+ "filters": "libpcl_filters",
+ "gpu": "libpcl_gpu",
+ "io": "libpcl_io",
+ "kdtree": "libpcl_kdtree",
+ "keypoints": "libpcl_keypoints",
+ "ml": "libpcl_ml",
+ "octree": "libpcl_octree",
+ "outofcore": "libpcl_outofcore",
+ "people": "libpcl_people",
+ "recognition": "libpcl_recognition",
+ "registration": "libpcl_registration",
+ "sample_consensus": "libpcl_sample_consensus",
+ "search": "libpcl_search",
+ "segmentation": "libpcl_segmentation",
+ "simulation": "libpcl_simulation",
+ "stereo": "libpcl_stereo",
+ "surface": "libpcl_surface",
+ "tests": "PCL Tests",
+ "tools": "PCL Tools",
+ "tutorials": "PCL Tutorials",
+ "visualization": "libpcl_visualization",
+ }
+
+ changes_order = ("new-feature", "deprecation", "removal", "behavior", "api", "abi")
+
+ changes_titles = {
+ "new-feature": "New Features",
+ "deprecation": "Deprecated",
+ "removal": "Removed",
+ "behavior": "Behavioral changes",
+ "api": "API changes",
+ "abi": "ABI changes",
+ }
+
+ changes_description = {
+ "new-feature": "Newly added functionalities.",
+ "deprecation": "Deprecated code scheduled to be removed after two minor releases.",
+ "removal": "Removal of deprecated code.",
+ "behavior": "Changes in the expected default behavior.",
+ "api": "Changes to the API which didn't went through the proper deprecation and removal cycle.",
+ "abi": "Changes that cause ABI incompatibility but are still API compatible.",
+ }
+
+ changes_labels = {
+ "breaks API": "api",
+ "breaks ABI": "abi",
+ "behavior": "behavior",
+ "deprecation": "deprecation",
+ "removal": "removal",
+ }
+
+ # change_log content
+ clog = []
+
+ # Infer version from tag
+ version = extract_version(tag)
+
+ # Find the commit date for writting the Title
+ cmd = ("git log -1 --format=%ai " + tag).split()
+ output = subprocess.run(cmd, cwd=FOLDER, stdout=subprocess.PIPE)
+ date = output.stdout.split()[0].decode()
+ tokens = date.split("-")
+ clog += [
+ "## *= "
+ + version
+ + " ("
+ + tokens[2]
+ + "."
+ + tokens[1]
+ + "."
+ + tokens[0]
+ + ") =*"
+ ]
+
+ # Map each PR into the approriate module and changes
+ modules = defaultdict(list)
+ changes = defaultdict(list)
+ module_re = re.compile("module: \S+")
+ changes_re = re.compile("changes: ")
+ feature_re = re.compile("new feature")
+
+ for pr in pr_info:
+
+ pr["modules"] = []
+ pr["changes"] = []
+
+ for label in pr["labels"]:
+ if module_re.fullmatch(label["name"]):
+ module = label["name"][8:]
+ pr["modules"].append(module)
+ modules[module].append(pr)
+
+ elif changes_re.match(label["name"]):
+ key = changes_labels[label["name"][9:]]
+ pr["changes"].append(key)
+ changes[key].append(pr)
+
+ elif feature_re.fullmatch(label["name"]):
+ pr["changes"].append("new-feature")
+ changes["new-feature"].append(pr)
+
+ # No labels defaults to section None
+ if not pr["modules"]:
+ modules[None].append(pr)
+ continue
+
+ # Generate Changes Summary
+ for key in changes_order:
+
+ # Skip empty sections
+ if not changes[key]:
+ continue
+
+ clog += ["\n### `" + changes_titles[key] + ":`\n"]
+
+ clog += ["*" + changes_description[key] + "*\n"]
+
+ for pr in changes[key]:
+ prefix = "".join(["[" + k + "]" for k in pr["modules"]])
+ if prefix:
+ prefix = "**" + prefix + "** "
+ clog += [
+ "* "
+ + prefix
+ + pr["title"]
+ + " [[#"
+ + str(pr["id"])
+ + "]]"
+ + "(https://github.com/PointCloudLibrary/pcl/pull/"
+ + str(pr["id"])
+ + ")"
+ ]
+
+ # Traverse Modules and generate each section's content
+ clog += ["\n### `Modules:`"]
+ for key in module_order:
+
+ # Skip empty sections
+ if not modules[key]:
+ continue
+
+ # if key:
+ clog += ["\n#### `" + module_titles[key] + ":`\n"]
+
+ for pr in modules[key]:
+ prefix = "".join(["[" + k + "]" for k in pr["changes"]])
+ if prefix:
+ prefix = "**" + prefix + "** "
+ clog += [
+ "* "
+ + prefix
+ + pr["title"]
+ + " [[#"
+ + str(pr["id"])
+ + "]]"
+ + "(https://github.com/PointCloudLibrary/pcl/pull/"
+ + str(pr["id"])
+ + ")"
+ ]
+
+ return clog
+
+
+def parse_arguments():
+
+ parser = ArgumentParser(
+ description="Generate a change log between two "
+ "revisions.\n\nCheck https://github.com/PointCloudLibrary/pcl/wiki/Preparing-Releases#creating-the-change-log "
+ "for some additional examples on how to use the tool."
+ )
+ parser.add_argument(
+ "start",
+ help="The start (exclusive) " "revision/commit/tag to generate the log.",
+ )
+ parser.add_argument(
+ "end",
+ nargs="?",
+ default="HEAD",
+ help="The end "
+ "(inclusive) revision/commit/tag to generate the log. "
+ "(Defaults to HEAD)",
+ )
+ parser.add_argument(
+ "--username",
+ help="GitHub Account user name. If "
+ "specified it will perform requests with the provided credentials. "
+ "This is often required in order to overcome GitHub API's request "
+ "limits.",
+ )
+ meg = parser.add_mutually_exclusive_group()
+ meg.add_argument(
+ "--cache",
+ nargs="?",
+ const="pr_info.json",
+ metavar="FILE",
+ help="Caches the PR info extracted from GitHub into a JSON file. "
+ "(Defaults to 'pr_info.json')",
+ )
+ meg.add_argument(
+ "--from-cache",
+ nargs="?",
+ const="pr_info.json",
+ metavar="FILE",
+ help="Uses a previously generated PR info JSON cache "
+ "file to generate the change log. (Defaults to 'pr_info.json')",
+ )
+
+ # Parse arguments
+ args = parser.parse_args()
+ args.auth = None
+
+ if args.username:
+ password = getpass.getpass(prompt="Password for " + args.username + ": ")
+ args.auth = (args.username, password)
+
+ return args
+
+
+##
+## 'main'
+##
+
+FOLDER = find_pcl_folder()
+
+# Parse arguments
+args = parse_arguments()
+
+pr_info = None
+if not args.from_cache:
+
+ # Find all PRs since tag
+ prs = find_pr_list(start=args.start, end=args.end)
+
+ # Generate pr objects with title, labels from ids
+ pr_info = fetch_pr_info(prs, auth=args.auth)
+ if args.cache:
+ with open(args.cache, "w") as fp:
+ d = {"start": args.start, "end": args.end, "pr_info": pr_info}
+ fp.write(json.dumps(d))
+else:
+ # Load previously cached info
+ with open(args.from_cache) as fp:
+ d = json.loads(fp.read())
+ pr_info = d["pr_info"]
+ args.start = d["start"]
+ args.end = d["start"]
+
+
+# Generate text content based on changes
+clog = generate_text_content(tag=args.end, pr_info=pr_info)
+print("\n".join(clog))
--- /dev/null
+#! /bin/bash
+
+if ! hash cmake 2>/dev/null \
+ || ! hash abi-dumper 2>/dev/null \
+ || ! hash abi-compliance-checker 2>/dev/null; then
+ echo "This script requires cmake, abi-dumper and abi-compliance-checker"
+ echo "On Ubuntu: apt-get install cmake abi-dumper abi-compliance-checker"
+ exit 1
+fi
+
+usage ()
+{
+ echo "Usage: $0 [-o <output_folder> ] [-j <number of workers>] <old_rev> <new_rev>" 1>&2;
+ exit 1;
+}
+
+# Set defaults
+ROOT_FOLDER="$(pwd)/abi-reports"
+N_WORKERS=4
+
+# Parse arguments
+while getopts ":o:j:" o; do
+ case "${o}" in
+ o)
+ ROOT_FOLDER=${OPTARG}
+ ;;
+ j)
+ N_WORKERS=${OPTARG}
+ ;;
+ *)
+ usage
+ ;;
+ esac
+done
+
+# Shift positional arguments
+shift $((OPTIND-1))
+
+# Check if positional arguments were parsed
+OLD_VERSION=$1
+NEW_VERSION=$2
+
+if [ -z "${OLD_VERSION}" ] || [ -z "${NEW_VERSION}" ]; then
+ usage
+ exit 1
+fi
+
+# create the top folders
+mkdir -p "$ROOT_FOLDER"
+cd "$ROOT_FOLDER"
+PCL_FOLDER="$ROOT_FOLDER/pcl"
+
+if [ ! -d "$PCL_FOLDER" ]; then
+ # Clone if needed
+ git clone https://github.com/PointCloudLibrary/pcl.git
+fi
+
+function build_and_dump
+{
+ # Store current folder
+ local PWD=$(pwd)
+
+ # If it is a version prepend pcl-
+ local TAG=$1
+ if [ $(echo "$1" | grep "^[0-9]\+\.[0-9]\+\.[0-9]\+$") ]; then
+ TAG="pcl-$TAG"
+ fi
+
+ # Checkout correct version
+ cd "$PCL_FOLDER"
+ git checkout $TAG
+
+ # Escape version
+ local VERSION_ESCAPED=$(echo "$1" | sed -e 's/\./_/g')
+
+ # Create the install folders
+ local INSTALL_FOLDER="$ROOT_FOLDER/install/$VERSION_ESCAPED"
+ mkdir -p "$INSTALL_FOLDER"
+
+ # create the build folders
+ local BUILD_FOLDER="$ROOT_FOLDER/build/$VERSION_ESCAPED"
+ mkdir -p "$BUILD_FOLDER"
+ cd "$BUILD_FOLDER"
+
+ # Build
+ cmake -DCMAKE_BUILD_TYPE="Debug" \
+ -DCMAKE_CXX_FLAGS="-Og" \
+ -DCMAKE_C_FLAGS="-Og" \
+ -DCMAKE_INSTALL_PREFIX="$INSTALL_FOLDER" \
+ -DPCL_ONLY_CORE_POINT_TYPES=ON \
+ -DBUILD_tools=OFF \
+ -DBUILD_global_tests=OFF \
+ "$PCL_FOLDER"
+ # -DBUILD_2d=OFF \
+ # -DBUILD_features=OFF \
+ # -DBUILD_filters=OFF \
+ # -DBUILD_geometry=OFF \
+ # -DBUILD_io=OFF \
+ # -DBUILD_kdtree=OFF \
+ # -DBUILD_keypoints=OFF \
+ # -DBUILD_ml=OFF \
+ # -DBUILD_octree=OFF \
+ # -DBUILD_outofcore=OFF \
+ # -DBUILD_people=OFF \
+ # -DBUILD_recognition=OFF \
+ # -DBUILD_registration=OFF \
+ # -DBUILD_sample_consensus=OFF \
+ # -DBUILD_search=OFF \
+ # -DBUILD_segmentation=OFF \
+ # -DBUILD_stereo=OFF \
+ # -DBUILD_surface=OFF \
+ # -DBUILD_tracking=OFF \
+ # -DBUILD_visualization=OFF \
+ make -j${N_WORKERS} install
+
+ # create ABI dump folder
+ local ABI_DUMP_FOLDER="$ROOT_FOLDER/abi_dump/$VERSION_ESCAPED"
+ mkdir -p "$ABI_DUMP_FOLDER"
+
+ # Sweep through available modules
+ cd "$INSTALL_FOLDER/lib"
+ ls *.so | sed -e "s/^libpcl_//" -e "s/.so//" \
+ | awk '{print "libpcl_"$1".so -o '"$ABI_DUMP_FOLDER"'/"$1".dump -lver '"$1"'"}' \
+ | xargs -n5 -P${N_WORKERS} abi-dumper
+
+ # Restore folder
+ cd $PWD
+}
+
+build_and_dump "$OLD_VERSION"
+build_and_dump "$NEW_VERSION"
+
+
+# Move to root folder and generate reports
+OLD_VERSION_ESCAPED=$(echo "$OLD_VERSION" | sed -e 's/\./_/g')
+NEW_VERSION_ESCAPED=$(echo "$NEW_VERSION" | sed -e 's/\./_/g')
+OLD_INSTALL_FOLDER="$ROOT_FOLDER/install/$OLD_VERSION_ESCAPED"
+OLD_ABI_DUMP_FOLDER="$ROOT_FOLDER/abi_dump/$OLD_VERSION_ESCAPED"
+NEW_ABI_DUMP_FOLDER="$ROOT_FOLDER/abi_dump/$NEW_VERSION_ESCAPED"
+
+cd "$ROOT_FOLDER"
+find $OLD_INSTALL_FOLDER/lib -name '*.so' -printf '%P\n' | sed -e "s/^libpcl_//" -e "s/.so//" \
+ | awk '{print "-lib "$1" -old '"$OLD_ABI_DUMP_FOLDER"'/"$1".dump -new '"$NEW_ABI_DUMP_FOLDER"'/"$1".dump"}' \
+ | xargs -n6 -P${N_WORKERS} abi-compliance-checker
-:warning: This is a issue tracker, please use our mailing list for questions: www.pcl-users.org. :warning:
+<!--- WARNING: This is an issue tracker, please use our mailing list for questions: www.pcl-users.org. -->
<!--- Provide a general summary of the issue in the Title above -->
* Compiler:
* PCL Version:
+## Context
+<!--- How has this issue affected you? What are you trying to accomplish? -->
+<!--- Providing context helps us come up with a solution that is most useful in the real world -->
+
## Expected Behavior
<!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
-## Possible Solution
-<!--- Not obligatory, but suggest a fix/reason for the bug, -->
-<!--- or ideas how to implement the addition or change -->
-
## Code to Reproduce
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
-## Context
-<!--- How has this issue affected you? What are you trying to accomplish? -->
-<!--- Providing context helps us come up with a solution that is most useful in the real world -->
-
+## Possible Solution
+<!--- Not obligatory, but suggest a fix/reason for the bug, -->
+<!--- or ideas how to implement the addition or change -->
--- /dev/null
+# Configuration for probot-stale - https://github.com/probot/stale
+
+# Number of days of inactivity before an Issue or Pull Request becomes stale
+daysUntilStale: 60
+# Number of days of inactivity before a stale Issue or Pull Request is closed
+# False means never close
+daysUntilClose: false
+# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
+exemptLabels:
+ - "status: needs review"
+ - "status: needs testing"
+ - "status: needs decision"
+ - "status: ready to merge"
+# Label to use when marking as stale
+staleLabel: "status: stale"
+# Comment to post when marking as stale. Set to `false` to disable
+markComment: |
+ This pull request has been automatically marked as stale because it hasn't had
+ any activity in the past 60 days. Commenting or adding a new commit to the
+ pull request will revert this.
+
+ Come back whenever you have time. We look forward to your contribution.
+# Comment to post when removing the stale label. Set to `false` to disable
+unmarkComment: false
+# Comment to post when closing a stale Issue or Pull Request. Set to `false` to disable
+closeComment: false
+# Limit to only `issues` or `pulls`
+only: pulls
fi
}
-function build ()
-{
- case $CC in
- clang ) build_lib;;
- gcc ) build_lib_core;;
- esac
-}
-
-function build_lib ()
+function build_all ()
{
# A complete build
# Configure
mkdir $BUILD_DIR && cd $BUILD_DIR
cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
-DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DPCL_QT_VERSION=4 \
- -DBUILD_simulation=ON \
- -DBUILD_global_tests=OFF \
- -DBUILD_examples=OFF \
- -DBUILD_tools=OFF \
- -DBUILD_apps=OFF \
- -DBUILD_apps_3d_rec_framework=OFF \
- -DBUILD_apps_cloud_composer=OFF \
- -DBUILD_apps_in_hand_scanner=OFF \
- -DBUILD_apps_modeler=OFF \
- -DBUILD_apps_optronic_viewer=OFF \
- -DBUILD_apps_point_cloud_editor=OFF \
- $PCL_DIR
- # Build
- make -j2
-}
-
-function build_examples ()
-{
- # A complete build
- # Configure
- mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DPCL_QT_VERSION=4 \
+ -DPCL_QT_VERSION=5 \
-DBUILD_simulation=ON \
-DBUILD_global_tests=OFF \
-DBUILD_examples=ON \
- -DBUILD_tools=OFF \
- -DBUILD_apps=OFF \
- $PCL_DIR
- # Build
- make -j2
-}
-
-function build_tools ()
-{
- # A complete build
- # Configure
- mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DPCL_QT_VERSION=4 \
- -DBUILD_simulation=ON \
- -DBUILD_global_tests=OFF \
- -DBUILD_examples=OFF \
-DBUILD_tools=ON \
- -DBUILD_apps=OFF \
- $PCL_DIR
- # Build
- make -j2
-}
-
-function build_apps ()
-{
- # A complete build
- # Configure
- mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DPCL_QT_VERSION=4 \
- -DBUILD_simulation=OFF \
- -DBUILD_outofcore=OFF \
- -DBUILD_people=OFF \
- -DBUILD_global_tests=OFF \
- -DBUILD_examples=OFF \
- -DBUILD_tools=OFF \
-DBUILD_apps=ON \
-DBUILD_apps_3d_rec_framework=ON \
-DBUILD_apps_cloud_composer=ON \
-DBUILD_apps_in_hand_scanner=ON \
-DBUILD_apps_modeler=ON \
- -DBUILD_apps_optronic_viewer=OFF \
-DBUILD_apps_point_cloud_editor=ON \
$PCL_DIR
# Build
make -j2
}
-function build_lib_core ()
-{
- # A reduced build, only pcl_common
- # Configure
- mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DBUILD_2d=OFF \
- -DBUILD_features=OFF \
- -DBUILD_filters=OFF \
- -DBUILD_geometry=OFF \
- -DBUILD_global_tests=OFF \
- -DBUILD_io=OFF \
- -DBUILD_kdtree=OFF \
- -DBUILD_keypoints=OFF \
- -DBUILD_ml=OFF \
- -DBUILD_octree=OFF \
- -DBUILD_outofcore=OFF \
- -DBUILD_people=OFF \
- -DBUILD_recognition=OFF \
- -DBUILD_registration=OFF \
- -DBUILD_sample_consensus=OFF \
- -DBUILD_search=OFF \
- -DBUILD_segmentation=OFF \
- -DBUILD_stereo=OFF \
- -DBUILD_surface=OFF \
- -DBUILD_tools=OFF \
- -DBUILD_tracking=OFF \
- -DBUILD_visualization=OFF \
- $PCL_DIR
- # Build
- make -j2
-}
-
-function test_core ()
+function test_all ()
{
+ # A complete build
# Configure
mkdir $BUILD_DIR && cd $BUILD_DIR
cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
-DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DPCL_NO_PRECOMPILE=ON \
- -DBUILD_tools=OFF \
- -DBUILD_examples=OFF \
- -DBUILD_apps=OFF \
- -DBUILD_2d=ON \
- -DBUILD_features=ON \
- -DBUILD_filters=ON \
- -DBUILD_geometry=ON \
- -DBUILD_io=ON \
- -DBUILD_kdtree=ON \
- -DBUILD_keypoints=ON \
- -DBUILD_ml=OFF \
- -DBUILD_octree=ON \
- -DBUILD_outofcore=OFF \
- -DBUILD_people=OFF \
- -DBUILD_recognition=OFF \
- -DBUILD_registration=OFF \
- -DBUILD_sample_consensus=ON \
- -DBUILD_search=ON \
- -DBUILD_segmentation=OFF \
- -DBUILD_simulation=OFF \
- -DBUILD_stereo=OFF \
- -DBUILD_surface=OFF \
- -DBUILD_tracking=OFF \
- -DBUILD_visualization=OFF \
+ -DPCL_QT_VERSION=4 \
+ -DBUILD_simulation=ON \
-DBUILD_global_tests=ON \
- -DBUILD_tests_2d=ON \
- -DBUILD_tests_common=ON \
- -DBUILD_tests_features=ON \
- -DBUILD_tests_filters=OFF \
- -DBUILD_tests_geometry=ON \
- -DBUILD_tests_io=OFF \
- -DBUILD_tests_kdtree=ON \
- -DBUILD_tests_keypoints=ON \
- -DBUILD_tests_octree=ON \
- -DBUILD_tests_outofcore=OFF \
- -DBUILD_tests_people=OFF \
- -DBUILD_tests_recognition=OFF \
- -DBUILD_tests_registration=OFF \
- -DBUILD_tests_sample_consensus=ON \
- -DBUILD_tests_search=ON \
- -DBUILD_tests_segmentation=OFF \
- -DBUILD_tests_surface=OFF \
- -DBUILD_tests_visualization=OFF \
- $PCL_DIR
- # Build and run tests
- make -j2 tests
-}
-
-function test_ext_1 ()
-{
- # Configure
- mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DPCL_NO_PRECOMPILE=ON \
- -DBUILD_tools=OFF \
-DBUILD_examples=OFF \
- -DBUILD_apps=OFF \
- -DBUILD_2d=ON \
- -DBUILD_features=ON \
- -DBUILD_filters=ON \
- -DBUILD_geometry=ON \
- -DBUILD_io=ON \
- -DBUILD_kdtree=ON \
- -DBUILD_keypoints=OFF \
- -DBUILD_ml=OFF \
- -DBUILD_octree=ON \
- -DBUILD_outofcore=ON \
- -DBUILD_people=OFF \
- -DBUILD_recognition=OFF \
- -DBUILD_registration=ON \
- -DBUILD_sample_consensus=ON \
- -DBUILD_search=ON \
- -DBUILD_segmentation=OFF \
- -DBUILD_simulation=OFF \
- -DBUILD_stereo=OFF \
- -DBUILD_surface=ON \
- -DBUILD_tracking=OFF \
- -DBUILD_visualization=ON \
- -DBUILD_global_tests=ON \
- -DBUILD_tests_2d=OFF \
- -DBUILD_tests_common=OFF \
- -DBUILD_tests_features=OFF \
- -DBUILD_tests_filters=OFF \
- -DBUILD_tests_geometry=OFF \
- -DBUILD_tests_io=ON \
- -DBUILD_tests_kdtree=OFF \
- -DBUILD_tests_keypoints=OFF \
- -DBUILD_tests_octree=OFF \
- -DBUILD_tests_outofcore=ON \
- -DBUILD_tests_people=OFF \
- -DBUILD_tests_recognition=OFF \
- -DBUILD_tests_registration=ON \
- -DBUILD_tests_sample_consensus=OFF \
- -DBUILD_tests_search=OFF \
- -DBUILD_tests_segmentation=OFF \
- -DBUILD_tests_surface=ON \
- -DBUILD_tests_visualization=ON \
- $PCL_DIR
- # Build and run tests
- make -j2 tests
-}
-
-function test_ext_2 ()
-{
- # Configure
- mkdir $BUILD_DIR && cd $BUILD_DIR
- cmake -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_CXX_FLAGS="$CMAKE_CXX_FLAGS" \
- -DPCL_ONLY_CORE_POINT_TYPES=ON \
- -DPCL_NO_PRECOMPILE=ON \
-DBUILD_tools=OFF \
- -DBUILD_examples=OFF \
-DBUILD_apps=OFF \
- -DBUILD_2d=ON \
- -DBUILD_features=ON \
- -DBUILD_filters=ON \
- -DBUILD_geometry=ON \
- -DBUILD_io=ON \
- -DBUILD_kdtree=ON \
- -DBUILD_keypoints=OFF \
- -DBUILD_ml=ON \
- -DBUILD_octree=ON \
- -DBUILD_outofcore=OFF \
- -DBUILD_people=ON \
- -DBUILD_recognition=ON \
- -DBUILD_registration=ON \
- -DBUILD_sample_consensus=ON \
- -DBUILD_search=ON \
- -DBUILD_segmentation=ON \
- -DBUILD_simulation=OFF \
- -DBUILD_stereo=OFF \
- -DBUILD_surface=OFF \
- -DBUILD_tracking=OFF \
- -DBUILD_visualization=ON \
- -DBUILD_global_tests=ON \
- -DBUILD_tests_2d=OFF \
- -DBUILD_tests_common=OFF \
- -DBUILD_tests_features=OFF \
- -DBUILD_tests_filters=ON \
- -DBUILD_tests_geometry=OFF \
- -DBUILD_tests_io=OFF \
- -DBUILD_tests_kdtree=OFF \
- -DBUILD_tests_keypoints=OFF \
- -DBUILD_tests_octree=OFF \
- -DBUILD_tests_outofcore=OFF \
- -DBUILD_tests_people=ON \
- -DBUILD_tests_recognition=ON \
- -DBUILD_tests_registration=OFF \
- -DBUILD_tests_sample_consensus=OFF \
- -DBUILD_tests_search=OFF \
- -DBUILD_tests_segmentation=ON \
- -DBUILD_tests_surface=OFF \
- -DBUILD_tests_visualization=OFF \
$PCL_DIR
- # Build and run tests
+ # Build
make -j2 tests
}
+
function doc ()
{
- # Do not generate documentation for pull requests
- if [[ $TRAVIS_PULL_REQUEST != 'false' ]]; then exit; fi
# Install sphinx
- pip install --user sphinx pyparsing==2.1.9 sphinxcontrib-doxylink
+ pip3 install --user setuptools
+ pip3 install --user Jinja2==2.8.1 sphinx sphinxcontrib-doxylink
+
# Configure
mkdir $BUILD_DIR && cd $BUILD_DIR
cmake -DDOXYGEN_USE_SHORT_NAMES=OFF \
git config --global user.email "documentation@pointclouds.org"
git config --global user.name "PointCloudLibrary (via TravisCI)"
+ cd $DOC_DIR
+ git clone https://github.com/PointCloudLibrary/documentation.git .
+
+ # Generate documentation and tutorials
+ cd $BUILD_DIR
+ make doc tutorials advanced
+
+ # Do not push documentation in pull requests
+ if [[ $TRAVIS_EVENT_TYPE == 'pull_request' ]] ; then exit; fi
+
+ # update the remote url to git-ssh protocol for commit
+ git remote set-url origin git@github.com:PointCloudLibrary/documentation.git
+
if [ -z "$id_rsa_{1..23}" ]; then echo 'No $id_rsa_{1..23} found !' ; exit 1; fi
echo -n $id_rsa_{1..23} >> ~/.ssh/travis_rsa_64
echo -e "Host github.com\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config
- cd $DOC_DIR
- git clone git@github.com:PointCloudLibrary/documentation.git .
-
- # Generate documentation and tutorials
- cd $BUILD_DIR
- make doc tutorials advanced
-
# Upload to GitHub if generation succeeded
if [[ $? == 0 ]]; then
# Copy generated tutorials to the doc directory
case $1 in
before-install ) before_install;;
- build ) build;;
- build-examples ) build_examples;;
- build-tools ) build_tools;;
- build-apps ) build_apps;;
- test-core ) test_core;;
- test-ext-1 ) test_ext_1;;
- test-ext-2 ) test_ext_2;;
+ build ) build_all;;
+ test ) test_all;;
doc ) doc;;
esac
+
sudo: required
+dist: xenial
language: cpp
cache:
ccache: true
+git:
+ depth: 1
addons:
apt:
- sources:
- - kalakris-cmake
- - boost-latest
- - kubuntu-backports
- - sourceline: 'ppa:kedazo/doxygen-updates-precise'
- - sourceline: 'ppa:v-launchpad-jochen-sprickerhof-de/pcl'
packages:
- cmake
- - libboost1.55-all-dev
+ - libboost-filesystem-dev
+ - libboost-iostreams-dev
+ - libboost-thread-dev
+ - libboost-chrono-dev
- libeigen3-dev
- libgtest-dev
- doxygen-latex
- dvipng
- libusb-1.0-0-dev
- libqhull-dev
- - libvtk5-dev
+ - libvtk6-dev
+ - libvtk6-qt-dev
- libflann-dev
- doxygen
- - libqt4-dev
- - libqt4-opengl-dev
- - libvtk5-qt4-dev
+ - qtbase5-dev
+ - libqt5opengl5-dev
- libglew-dev
- libopenni-dev
+ - python3-pip
+ - libproj-dev #missing dependency from vtk?
before_install:
- bash .travis.sh before-install
- secure: LNsNoBvqY/jYDoBjWCE5cM+f1H8xOwSBc/tbWZo6E/jPRjUOLzXSicMMUMrlVto+bFzSUT8OVajV3XmoRx+Qntzv6bDSAGjdycvHd2YZQPn8BYrsFtR4So7SsJkF9FlxzbiOXaiSRpwGn7TP/DO7Neubrr4IS2ef4nWowGrnCE8=
- secure: PZivWbaCWFA2BFFY8n3UMxdEWjz7rBh568u9LF5LH3HgWADnfiwWzNriACqX9fhe7tSmDru5Bk978s+xPPAY9v24cfiDEX5a5MQ/XVr2rP48n3vlUDWERDhIodJ73F9F9GGZXToGdNz0MBUAHgiv7Lb0GYUfmOYzUJjWghngLBw=
+
jobs:
include:
- - stage: Core Build
- env: TASK="build"
+ - name: "Documentation"
compiler: gcc
- script: bash .travis.sh $TASK
- - env: TASK="build"
- compiler: clang
- script: bash .travis.sh $TASK
- - stage: Extended Build and Tests
- compiler: clang
- env: TASK="build-examples"
- script: bash .travis.sh $TASK
- - compiler: clang
- env: TASK="build-tools"
- script: bash .travis.sh $TASK
- # - compiler: clang
- # env: TASK="build-apps"
- # script: bash .travis.sh $TASK
- - compiler: gcc
env: TASK="doc"
script: bash .travis.sh $TASK
- - compiler: gcc
- env: TASK="test-core"
- script: bash .travis.sh $TASK
- - compiler: gcc
- env: TASK="test-ext-1"
+ - name: "Library, Examples, Tools, Apps"
+ compiler: clang
+ env: TASK="build"
script: bash .travis.sh $TASK
- - compiler: gcc
- env: TASK="test-ext-2"
+ - name: "Unit Tests"
+ compiler: gcc
+ env: TASK="test"
script: bash .travis.sh $TASK
+
+notifications:
+ email: false
set(SUBSYS_NAME 2d)
set(SUBSYS_DESC "Point cloud 2d")
-set(SUBSYS_DEPS common io filters)
+set(SUBSYS_DEPS common filters)
set(build TRUE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ON)
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include" ${VTK_INCLUDE_DIRECTORIES})
+ set(LIB_NAME "pcl_${SUBSYS_NAME}")
+ PCL_MAKE_PKGCONFIG_HEADER_ONLY("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}" "${SUBSYS_DEPS}" "" "" "" "")
+
#Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl" ${impl_incs})
/** \brief This function suppresses the edges which don't form a local maximum
* in the edge direction.
* \param[in] edges point cloud containing all the edges
- * \param[out] maxima point cloud containing the non-max supressed edges
+ * \param[out] maxima point cloud containing the non-max suppressed edges
* \param[in] tLow
*/
void
{
input_ = input;
}
+
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
}
#include <pcl/2d/impl/edge.hpp>
convolution_.filter (*smoothed_cloud);
//PCL_ERROR ("Gaussian blur: %g\n", tt.toc ()); tt.tic ();
- // Edge detection usign Sobel
+ // Edge detection using Sobel
pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>);
setInputCloud (smoothed_cloud);
detectEdgeSobel (*edges);
convolution_.filter (smoothed_cloud_y);
- // Edge detection usign Sobel
+ // Edge detection using Sobel
pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>);
sobelMagnitudeDirection (smoothed_cloud_x, smoothed_cloud_y, *edges.get ());
{
continue;
}
- // If one of the elements of the kernel and image dont match,
+ // If one of the elements of the kernel and image don't match,
// the output image is 0. So, move to the next point.
if ((*input_)(j + l - kernel_width / 2, i + k - kernel_height / 2).intensity != 1)
{
{
continue;
}
- // If one of the elements of the kernel and image dont match,
+ // If one of the elements of the kernel and image don't match,
// the output image is 0. So, move to the next point.
if ((*input_)(j + l - kernel_width / 2, i + k - kernel_height / 2).intensity < min || min == -1)
{
{
continue;
}
- // If one of the elements of the kernel and image dont match,
+ // If one of the elements of the kernel and image don't match,
// the output image is 0. So, move to the next point.
if ((*input_)(j + l - kernel_width / 2, i + k - kernel_height / 2).intensity > max || max == -1)
{
const pcl::PointCloud<PointT> &input1,
const pcl::PointCloud<PointT> &input2)
{
- const int height = (input1.height < input2.hieght) ? input1.height : input2.height;
+ const int height = (input1.height < input2.height) ? input1.height : input2.height;
const int width = (input1.width < input2.width) ? input1.width : input2.width;
output.width = width;
output.height = height;
const pcl::PointCloud<PointT> &input1,
const pcl::PointCloud<PointT> &input2)
{
- const int height = (input1.height < input2.hieght) ? input1.height : input2.height;
+ const int height = (input1.height < input2.height) ? input1.height : input2.height;
const int width = (input1.width < input2.width) ? input1.width : input2.width;
output.width = width;
output.height = height;
#include <pcl/point_types.h>
-#include <pcl/2d/Convolution.h>
-#include <pcl/2d/Edge.h>
-#include <pcl/2d/Kernel.h>
-#include <pcl/2d/Morphology.h>
+#include <pcl/2d/convolution.h>
+#include <pcl/2d/edge.h>
+#include <pcl/2d/kernel.h>
+#include <pcl/2d/morphology.h>
#include <pcl/pcl_base.h>
using namespace pcl;
# ChangeList
+## *= 1.9.0 (06.11.2018) =*
+
+### `New Features:`
+
+*Newly added functionalities.*
+
+* **[common][visualization]** Add Viridis color LUT [[#2420]](https://github.com/PointCloudLibrary/pcl/pull/2420)
+* **[octree]** Implementation of the iterator 'OctreeLeafNodeBreadthIterator'. [[#2204]](https://github.com/PointCloudLibrary/pcl/pull/2204)
+* **[octree]** Implementation of the iterator 'OctreeFixedDepthIterator'. [[#1983]](https://github.com/PointCloudLibrary/pcl/pull/1983)
+* **[ci]** Enable Global Tests on Windows CI [[#2137]](https://github.com/PointCloudLibrary/pcl/pull/2137)
+* **[features]** Add GASD global point cloud descriptor [[#1652]](https://github.com/PointCloudLibrary/pcl/pull/1652)
+* **[visualization]** Add overload to `PCLVisualizer::addText3D()` that allows specifying text orientation [[#2038]](https://github.com/PointCloudLibrary/pcl/pull/2038)
+* **[features]** FLARELocalReferenceFrameEstimation class added [[#1571]](https://github.com/PointCloudLibrary/pcl/pull/1571)
+* **[surface][tools]** Add new mls projection method. Deprecated `MovingLeastSquares::setPolynomialFit()`. [[#1960]](https://github.com/PointCloudLibrary/pcl/pull/1960)
+
+### `Deprecated:`
+
+*Deprecated code scheduled to be removed after two minor releases.*
+
+* **[octree]** Implementation of the iterator 'OctreeLeafNodeBreadthIterator'. [[#2204]](https://github.com/PointCloudLibrary/pcl/pull/2204)
+* **[common][segmentation]** Provide proper EuclideanClusterComparator method depreciation. New Pragma macro. New Deprecated type. [[#2096]](https://github.com/PointCloudLibrary/pcl/pull/2096)
+* **[io]** Add support pcl::PointXYZRGBA to pcl::VLPGrabber. Deprecate rgb signatures. [[#2102]](https://github.com/PointCloudLibrary/pcl/pull/2102)
+* **[surface][tools]** Add new mls projection method. Deprecated `MovingLeastSquares::setPolynomialFit()`. [[#1960]](https://github.com/PointCloudLibrary/pcl/pull/1960)
+
+### `Removed:`
+
+*Removal of deprecated code.*
+
+* **[filters][io][surface][visualization]** Removal of deprecated code in filters, io, surface and visualization modules [[#2077]](https://github.com/PointCloudLibrary/pcl/pull/2077)
+* **[common]** Remove deprecated ros headers [[#2075]](https://github.com/PointCloudLibrary/pcl/pull/2075)
+* **[registration]** Remove registration module deprecated methods [[#2076]](https://github.com/PointCloudLibrary/pcl/pull/2076)
+* **[sample_consensus]** Remove deprecated functions and variables from SAC module [[#2071]](https://github.com/PointCloudLibrary/pcl/pull/2071)
+* **[common]** Removal of PCA deprecated constructor [[#2070]](https://github.com/PointCloudLibrary/pcl/pull/2070)
+
+### `Behavioral changes:`
+
+*Changes in the expected default behavior.*
+
+* **[common]** PointCloudDepthAndRGBtoXYZRGBA: initialize with the default alpha value (fix #2476) [[#2533]](https://github.com/PointCloudLibrary/pcl/pull/2533)
+* **[octree]** Reverse octree's depth first iterator order [[#2332]](https://github.com/PointCloudLibrary/pcl/pull/2332)
+* **[common]** `PointXYZRGBL` `label` field is now default constructed to 0 [[#2462]](https://github.com/PointCloudLibrary/pcl/pull/2462)
+* **[io]** Fix PLYReader is_dense behavior [[#2133]](https://github.com/PointCloudLibrary/pcl/pull/2133)
+
+### `API changes:`
+
+*Changes to the API which didn't went through the proper deprecation and removal cycle.*
+
+* **[octree]** Implementation of the iterator 'OctreeLeafNodeBreadthIterator'. [[#2204]](https://github.com/PointCloudLibrary/pcl/pull/2204)
+* **[sample_consensus]** Const-qualify most of the methods in SAC model classes [[#2270]](https://github.com/PointCloudLibrary/pcl/pull/2270)
+* **[simulation]** Use GLuint rather than size_t to represent OpenGL indices. [[#2238]](https://github.com/PointCloudLibrary/pcl/pull/2238)
+* **[visualization]** Fix access specifier in `PointCloudColorHandlerRGBAField` [[#2226]](https://github.com/PointCloudLibrary/pcl/pull/2226)
+* **[docs]** Misc. typos (cont.) [[#2215]](https://github.com/PointCloudLibrary/pcl/pull/2215)
+* **[octree]** OctreeIterators special member revision [[#2108]](https://github.com/PointCloudLibrary/pcl/pull/2108)
+* **[io]** Add support pcl::PointXYZRGBA to pcl::VLPGrabber. Deprecate rgb signatures. [[#2102]](https://github.com/PointCloudLibrary/pcl/pull/2102)
+* **[surface][tools]** Add new mls projection method. Deprecated `MovingLeastSquares::setPolynomialFit()`. [[#1960]](https://github.com/PointCloudLibrary/pcl/pull/1960)
+* **[surface]** Add ability to cache mls results [[#1952]](https://github.com/PointCloudLibrary/pcl/pull/1952)
+
+### `ABI changes:`
+
+*Changes that cause ABI incompatibility but are still API compatible.*
+
+* **[surface]** Missing pcl::MovingLeastSquaresOMP declaration without /openmp [[#2324]](https://github.com/PointCloudLibrary/pcl/pull/2324)
+* **[common][filters][surface]** Improved docstrings and error messages [[#2300]](https://github.com/PointCloudLibrary/pcl/pull/2300)
+* **[common]** Modified `GlasbeyLUT` indexing type to `size_t` [[#2297]](https://github.com/PointCloudLibrary/pcl/pull/2297)
+* **[octree]** Implementation of the iterator 'OctreeFixedDepthIterator'. [[#1983]](https://github.com/PointCloudLibrary/pcl/pull/1983)
+* **[common][segmentation]** Provide proper EuclideanClusterComparator method depreciation. New Pragma macro. New Deprecated type. [[#2096]](https://github.com/PointCloudLibrary/pcl/pull/2096)
+* **[gpu]** Allow specifying decimation step in convertToTsdfCloud [[#2099]](https://github.com/PointCloudLibrary/pcl/pull/2099)
+* **[apps]** More warning suppression in pcl apps [[#2080]](https://github.com/PointCloudLibrary/pcl/pull/2080)
+* **[io]** Removed unused member from ply_parser [[#2066]](https://github.com/PointCloudLibrary/pcl/pull/2066)
+* **[filters]** Fixes remove_indices in UniformSampling [[#1902]](https://github.com/PointCloudLibrary/pcl/pull/1902)
+* **[visualization]** Add accessor for current rendering framerate in PCLVisualizer [[#1974]](https://github.com/PointCloudLibrary/pcl/pull/1974)
+* **[simulation]** Redo: Simulation: enable returning of organized point clouds [[#1687]](https://github.com/PointCloudLibrary/pcl/pull/1687)
+* **[registration]** Added option to specify translation and rotation convergence deltas in ICP and NDT algorithms. [[#1724]](https://github.com/PointCloudLibrary/pcl/pull/1724)
+
+### `Modules:`
+
+#### `Uncategorized:`
+
+* Change Log generation tool. Automates change log generation. [[#2396]](https://github.com/PointCloudLibrary/pcl/pull/2396)
+* Compatibility reports generation script [[#2410]](https://github.com/PointCloudLibrary/pcl/pull/2410)
+* Update logo [[#2547]](https://github.com/PointCloudLibrary/pcl/pull/2547)
+* Never close stale issues/prs [[#2400]](https://github.com/PointCloudLibrary/pcl/pull/2400)
+* Fix typos in the whole codebase [[#2217]](https://github.com/PointCloudLibrary/pcl/pull/2217)
+* Fixed typo and rearragend items in the issue template [[#2197]](https://github.com/PointCloudLibrary/pcl/pull/2197)
+* Change Stale daysTillClose to 100 years [[#2166]](https://github.com/PointCloudLibrary/pcl/pull/2166)
+* set stale daysUntilClose to a really big number [[#2162]](https://github.com/PointCloudLibrary/pcl/pull/2162)
+* Stale set up [[#2101]](https://github.com/PointCloudLibrary/pcl/pull/2101)
+
+#### `CMake:`
+
+* Fix checks for user-provided CXX flags [[#2579]](https://github.com/PointCloudLibrary/pcl/pull/2579)
+* Fix FLANN path to lower case [[#2576]](https://github.com/PointCloudLibrary/pcl/pull/2576)
+* Use pkg-config to find Flann [[#2563]](https://github.com/PointCloudLibrary/pcl/pull/2563)
+* Update FindBoost versions [[#2558]](https://github.com/PointCloudLibrary/pcl/pull/2558)
+* Add PCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32 option [[#2552]](https://github.com/PointCloudLibrary/pcl/pull/2552)
+* Fix app/CMakeLists to enable Apps under Windows [[#2550]](https://github.com/PointCloudLibrary/pcl/pull/2550)
+* When configuring with WITH_DOCS, but Doxygen is not available, prevent generation. [[#2516]](https://github.com/PointCloudLibrary/pcl/pull/2516)
+* CMake: Do not include test targets in PCLConfig.cmake [[#2458]](https://github.com/PointCloudLibrary/pcl/pull/2458)
+* CMake Set temporarily the policy CMP0074 to OLD [[#2454]](https://github.com/PointCloudLibrary/pcl/pull/2454)
+* prevent GCC flags propagating to NVCC [[#2430]](https://github.com/PointCloudLibrary/pcl/pull/2430)
+* Mark visualization as an optional dependency of tools [[#2439]](https://github.com/PointCloudLibrary/pcl/pull/2439)
+* Do not mark imported libraries as GLOBAL in PCLConfig [[#2435]](https://github.com/PointCloudLibrary/pcl/pull/2435)
+* Intel fixes [[#2432]](https://github.com/PointCloudLibrary/pcl/pull/2432)
+* Export `-march=native` for Clang and prevent it from being included during cross compilation. [[#2416]](https://github.com/PointCloudLibrary/pcl/pull/2416)
+* Do not search for PCL components that have been found already [[#2428]](https://github.com/PointCloudLibrary/pcl/pull/2428)
+* Move SSE compiler options to `PCL_COMPILE_OPTIONS`. Expose PCL as a CMake imported target. [[#2100]](https://github.com/PointCloudLibrary/pcl/pull/2100)
+* Add Visual Studio compiler option /FS for Ninja build [[#2414]](https://github.com/PointCloudLibrary/pcl/pull/2414)
+* Use rpath in the target's install name [[#2241]](https://github.com/PointCloudLibrary/pcl/pull/2241)
+* Improve QHull finder script [[#2344]](https://github.com/PointCloudLibrary/pcl/pull/2344)
+* Fix link order issue with boost [[#2236]](https://github.com/PointCloudLibrary/pcl/pull/2236)
+* Mark found PCL component libraries and include dirs as advanced [[#2235]](https://github.com/PointCloudLibrary/pcl/pull/2235)
+* Prevent search for disabled optional dependencies in targets. [[#2229]](https://github.com/PointCloudLibrary/pcl/pull/2229)
+* Fix installation rules for ml module [[#2192]](https://github.com/PointCloudLibrary/pcl/pull/2192)
+* Fix conditional branch of Visual C++ 2017 [[#2121]](https://github.com/PointCloudLibrary/pcl/pull/2121)
+* Add *_USE_STATIC options to PCLConfig [[#2086]](https://github.com/PointCloudLibrary/pcl/pull/2086)
+* Add search path suffixes for Vcpkg [[#2085]](https://github.com/PointCloudLibrary/pcl/pull/2085)
+* Update finder scripts for Ensenso, OpenNI, and OpenNI2 [[#2061]](https://github.com/PointCloudLibrary/pcl/pull/2061)
+* Fix PACKAGE to include cmake/Modules directory [[#2053]](https://github.com/PointCloudLibrary/pcl/pull/2053)
+* Unifies Find scripts in PCLConfig [[#1421]](https://github.com/PointCloudLibrary/pcl/pull/1421)
+* CUDA 9 Arch Flags [[#2047]](https://github.com/PointCloudLibrary/pcl/pull/2047)
+* Suppress log when PCL_FIND_QUIETLY is turned on. [[#2032]](https://github.com/PointCloudLibrary/pcl/pull/2032)
+* fix /MP option not generated for Visual Studio. [[#2031]](https://github.com/PointCloudLibrary/pcl/pull/2031)
+* Generate pkgconfig for 2d module [[#1979]](https://github.com/PointCloudLibrary/pcl/pull/1979)
+* Update Find Boost [[#1972]](https://github.com/PointCloudLibrary/pcl/pull/1972)
+* Added CUDA compute capability 5.3 [[#1929]](https://github.com/PointCloudLibrary/pcl/pull/1929)
+* Fix issue with finding pcl deployed out of path [[#1923]](https://github.com/PointCloudLibrary/pcl/pull/1923)
+* Add new gtest path [[#1920]](https://github.com/PointCloudLibrary/pcl/pull/1920)
+
+#### `libpcl_2d:`
+
+* Avoid huge index jumps in `RandomSample`. Remove `io` dependency from `2d`. [[#2141]](https://github.com/PointCloudLibrary/pcl/pull/2141)
+* Fix header names [[#2079]](https://github.com/PointCloudLibrary/pcl/pull/2079)
+* Generate pkgconfig for 2d module [[#1979]](https://github.com/PointCloudLibrary/pcl/pull/1979)
+
+#### `libpcl_common:`
+
+* Fix docstrings [[#2591]](https://github.com/PointCloudLibrary/pcl/pull/2591)
+* Throw an early exception to prevent divide by zero error (#2481) [[#2530]](https://github.com/PointCloudLibrary/pcl/pull/2530)
+* Relax requirements in eigen22d test. Always provide a normalized result in `pcl::transformPlane`. [[#2503]](https://github.com/PointCloudLibrary/pcl/pull/2503)
+* **[behavior]** PointCloudDepthAndRGBtoXYZRGBA: initialize with the default alpha value (fix #2476) [[#2533]](https://github.com/PointCloudLibrary/pcl/pull/2533)
+* Throw `UnorganizedPointCloudException` in `PointCloud::at` [[#2521]](https://github.com/PointCloudLibrary/pcl/pull/2521)
+* Add missing const specifier for getters in `PCLBase`. [[#2502]](https://github.com/PointCloudLibrary/pcl/pull/2502)
+* swap the header in pcl::PointCloud::swap [[#2499]](https://github.com/PointCloudLibrary/pcl/pull/2499)
+* Add header guard and copyright info to polynomial_calculations.hpp [[#2500]](https://github.com/PointCloudLibrary/pcl/pull/2500)
+* Add `header` to the print output of `PointCloud` [[#2498]](https://github.com/PointCloudLibrary/pcl/pull/2498)
+* Fix force recalculation option in `BivariatePolynomialT::calculateGradient` [[#2479]](https://github.com/PointCloudLibrary/pcl/pull/2479)
+* Fix various errors and typos in the docstrings and tutorials [[#2486]](https://github.com/PointCloudLibrary/pcl/pull/2486)
+* Fix a bug in `PointRGBtoI` color conversion [[#2475]](https://github.com/PointCloudLibrary/pcl/pull/2475)
+* Provide `operator<<` for `Intensity32u` point type [[#2467]](https://github.com/PointCloudLibrary/pcl/pull/2467)
+* **[behavior]** `PointXYZRGBL` `label` field is now default constructed to 0 [[#2462]](https://github.com/PointCloudLibrary/pcl/pull/2462)
+* Add some missing eigen alignment operators [[#2433]](https://github.com/PointCloudLibrary/pcl/pull/2433)
+* Intel fixes [[#2432]](https://github.com/PointCloudLibrary/pcl/pull/2432)
+* **[new-feature]** Add Viridis color LUT [[#2420]](https://github.com/PointCloudLibrary/pcl/pull/2420)
+* Remove malloc header to restore builds on BSDs [[#2374]](https://github.com/PointCloudLibrary/pcl/pull/2374)
+* Add support for multiple extensions in `parse_file_extension_argument ()`. [[#2347]](https://github.com/PointCloudLibrary/pcl/pull/2347)
+* Improve speed of `transformPointCloud/WithNormals()` functions [[#2247]](https://github.com/PointCloudLibrary/pcl/pull/2247)
+* Add RGB constructor that takes R, G, and B components [[#2329]](https://github.com/PointCloudLibrary/pcl/pull/2329)
+* **[abi]** Improved docstrings and error messages [[#2300]](https://github.com/PointCloudLibrary/pcl/pull/2300)
+* **[abi]** Modified `GlasbeyLUT` indexing type to `size_t` [[#2297]](https://github.com/PointCloudLibrary/pcl/pull/2297)
+* Add GASDSignatures to `PCL_POINT_TYPES` and `PCL_FEATURE_POINTTYPES` macros. [[#2295]](https://github.com/PointCloudLibrary/pcl/pull/2295)
+* [PARSE] Constness of the API [[#2224]](https://github.com/PointCloudLibrary/pcl/pull/2224)
+* Fix two "unreachable code" warnings in `pca.hpp` [[#2219]](https://github.com/PointCloudLibrary/pcl/pull/2219)
+* Fix covariance calculation in PCA [[#2130]](https://github.com/PointCloudLibrary/pcl/pull/2130)
+* **[abi][deprecation]** Provide proper EuclideanClusterComparator method depreciation. New Pragma macro. New Deprecated type. [[#2096]](https://github.com/PointCloudLibrary/pcl/pull/2096)
+* **[removal]** Remove deprecated ros headers [[#2075]](https://github.com/PointCloudLibrary/pcl/pull/2075)
+* Suppress (maybe) uninitialized warning [[#2073]](https://github.com/PointCloudLibrary/pcl/pull/2073)
+* **[removal]** Removal of PCA deprecated constructor [[#2070]](https://github.com/PointCloudLibrary/pcl/pull/2070)
+* [gcc] fixes -Wimplicit-fallthrough: common/io.h [[#2041]](https://github.com/PointCloudLibrary/pcl/pull/2041)
+* Include pcl/point_cloud.h and pcl/point_types.h headers. [[#1962]](https://github.com/PointCloudLibrary/pcl/pull/1962)
+* Add test for macro _USE_MATH_DEFINES. [[#1956]](https://github.com/PointCloudLibrary/pcl/pull/1956)
+* instantiate: remove duplicate macro definition. Fixes #1924. [[#1925]](https://github.com/PointCloudLibrary/pcl/pull/1925)
+
+#### `libpcl_cuda:`
+
+* add support for latest Turing gpu and cuda 10 [[#2560]](https://github.com/PointCloudLibrary/pcl/pull/2560)
+* Fix compilation issues with CUDA 9.1 [[#2212]](https://github.com/PointCloudLibrary/pcl/pull/2212)
+* Fix some CUDA 9 related errors [[#2181]](https://github.com/PointCloudLibrary/pcl/pull/2181)
+* Added CUDA compute capability 5.3 [[#1929]](https://github.com/PointCloudLibrary/pcl/pull/1929)
+
+#### `libpcl_features:`
+
+* Solve issues with failing features tests [[#2544]](https://github.com/PointCloudLibrary/pcl/pull/2544)
+* Update the OpenMP implementations of normal and FPFH estimation [[#2278]](https://github.com/PointCloudLibrary/pcl/pull/2278)
+* Make `MomentOfInertia` instantiations consistent with the rest of the library [[#2266]](https://github.com/PointCloudLibrary/pcl/pull/2266)
+* Docstring corrections [[#2143]](https://github.com/PointCloudLibrary/pcl/pull/2143)
+* Improve Doxygen comments for HistogramInterpolationMethod [[#2111]](https://github.com/PointCloudLibrary/pcl/pull/2111)
+* **[new-feature]** Add GASD global point cloud descriptor [[#1652]](https://github.com/PointCloudLibrary/pcl/pull/1652)
+* Suppress (maybe) uninitialized warning [[#2073]](https://github.com/PointCloudLibrary/pcl/pull/2073)
+* **[new-feature]** FLARELocalReferenceFrameEstimation class added [[#1571]](https://github.com/PointCloudLibrary/pcl/pull/1571)
+* fix missing include file: from_meshes.h is using pcl::Vertices in it [[#2009]](https://github.com/PointCloudLibrary/pcl/pull/2009)
+* Typo [[#1968]](https://github.com/PointCloudLibrary/pcl/pull/1968)
+
+#### `libpcl_filters:`
+
+* Corrections to CovarianceSampling class and corresponding test [[#2512]](https://github.com/PointCloudLibrary/pcl/pull/2512)
+* Add the missing const modifier in `Filter::getRemovedIndices`. [[#2523]](https://github.com/PointCloudLibrary/pcl/pull/2523)
+* Add const modifiers to getters of pcl::PassThrough [[#2524]](https://github.com/PointCloudLibrary/pcl/pull/2524)
+* Add const specifiers for getters in VoxelGrid. [[#2526]](https://github.com/PointCloudLibrary/pcl/pull/2526)
+* Copy the pose info from the input cloud to the output cloud in NaN removal functions [[#2522]](https://github.com/PointCloudLibrary/pcl/pull/2522)
+* Fix misc. typos in tutorials and docstrings [[#2529]](https://github.com/PointCloudLibrary/pcl/pull/2529)
+* Fix various errors and typos in the docstrings and tutorials [[#2486]](https://github.com/PointCloudLibrary/pcl/pull/2486)
+* Add some missing eigen alignment operators [[#2433]](https://github.com/PointCloudLibrary/pcl/pull/2433)
+* Add PointNormal to ExtractIndices Instantiate Types [[#2389]](https://github.com/PointCloudLibrary/pcl/pull/2389)
+* **[abi]** Improved docstrings and error messages [[#2300]](https://github.com/PointCloudLibrary/pcl/pull/2300)
+* Public access to `VoxelGrid` boost pointer. [[#2205]](https://github.com/PointCloudLibrary/pcl/pull/2205)
+* Add const qualifiers to getters in `filter_indices.h` [[#2193]](https://github.com/PointCloudLibrary/pcl/pull/2193)
+* Avoid huge index jumps in `RandomSample`. Remove `io` dependency from `2d`. [[#2141]](https://github.com/PointCloudLibrary/pcl/pull/2141)
+* **[removal]** Removal of deprecated code in filters, io, surface and visualization modules [[#2077]](https://github.com/PointCloudLibrary/pcl/pull/2077)
+* Suppress unused parameter warning [[#2074]](https://github.com/PointCloudLibrary/pcl/pull/2074)
+* Suppress sign compare warnings [[#2068]](https://github.com/PointCloudLibrary/pcl/pull/2068)
+* Transformation Fix for BoxClipper3D [[#1961]](https://github.com/PointCloudLibrary/pcl/pull/1961)
+* **[abi]** Fixes remove_indices in UniformSampling [[#1902]](https://github.com/PointCloudLibrary/pcl/pull/1902)
+* Inherit StatisticalOutlierRemoval<PCLPointCloud2> from FilterIndices [[#1663]](https://github.com/PointCloudLibrary/pcl/pull/1663)
+
+#### `libpcl_gpu:`
+
+* Remove sm_72 from CUDA 9.0 [[#2567]](https://github.com/PointCloudLibrary/pcl/pull/2567)
+* Fix compilation issues with CUDA 9.1 [[#2212]](https://github.com/PointCloudLibrary/pcl/pull/2212)
+* Fix compilation error in `gpu_people` code [[#2199]](https://github.com/PointCloudLibrary/pcl/pull/2199)
+* Fix some CUDA 9 related errors [[#2181]](https://github.com/PointCloudLibrary/pcl/pull/2181)
+* **[abi]** Allow specifying decimation step in convertToTsdfCloud [[#2099]](https://github.com/PointCloudLibrary/pcl/pull/2099)
+* Fix the incorrect include directory. [[#2024]](https://github.com/PointCloudLibrary/pcl/pull/2024)
+* need to include instantiate.hpp to use PCL_INSTANTIATE [[#1943]](https://github.com/PointCloudLibrary/pcl/pull/1943)
+* Added CUDA compute capability 5.3 [[#1929]](https://github.com/PointCloudLibrary/pcl/pull/1929)
+* Fix issue #1674 [[#1926]](https://github.com/PointCloudLibrary/pcl/pull/1926)
+
+#### `libpcl_io:`
+
+* Suppress miscelanious warnings [[#2556]](https://github.com/PointCloudLibrary/pcl/pull/2556)
+* vtk2mesh: Add parsing support to the new RGBA scalar field added in vtk8 [[#2492]](https://github.com/PointCloudLibrary/pcl/pull/2492)
+* Fix various errors and typos in the docstrings and tutorials [[#2486]](https://github.com/PointCloudLibrary/pcl/pull/2486)
+* Improved obj file parsing efficiency. Make parsing robust against situations where there are more normals than points. Added unit tests. [[#2450]](https://github.com/PointCloudLibrary/pcl/pull/2450)
+* `pcl::PCDReader::readHeader()` change `nr_points` type to `size_t` to avoid possible `int32` overflow [[#2408]](https://github.com/PointCloudLibrary/pcl/pull/2408)
+* Fix raw_fallocate for Android and deal with unsupported filesystems. [[#2363]](https://github.com/PointCloudLibrary/pcl/pull/2363)
+* Add low_level_io.h to the header list of the io module [[#2356]](https://github.com/PointCloudLibrary/pcl/pull/2356)
+* Created header for low level I/O helpers. Fix for `::posix_fallocate` on Mac OSX [[#2354]](https://github.com/PointCloudLibrary/pcl/pull/2354)
+* Added warnings when the input data is too large for compressed pcds [[#2323]](https://github.com/PointCloudLibrary/pcl/pull/2323)
+* Allocate disk space with posix_fallocate before mmapping. [[#2325]](https://github.com/PointCloudLibrary/pcl/pull/2325)
+* Fix cmake warning: Logical block closes with mis-matching arguments [[#2320]](https://github.com/PointCloudLibrary/pcl/pull/2320)
+* Added PCL_IO_ENABLE_MAND_LOCKING cmake flag. [[#2315]](https://github.com/PointCloudLibrary/pcl/pull/2315)
+* Added missing 8 bytes to compressed binary pcd length. [[#2281]](https://github.com/PointCloudLibrary/pcl/pull/2281)
+* Remove useless size check in PLYReader::endHeaderCallback() [[#2246]](https://github.com/PointCloudLibrary/pcl/pull/2246)
+* **[behavior]** Fix PLYReader is_dense behavior [[#2133]](https://github.com/PointCloudLibrary/pcl/pull/2133)
+* `EnsensoGrabber` `uint` is undefined in Visual studio. [[#2223]](https://github.com/PointCloudLibrary/pcl/pull/2223)
+* Add protection from invalid WIDTH values in PCD reader [[#2195]](https://github.com/PointCloudLibrary/pcl/pull/2195)
+* `PLYReader` Cast cloud point step as 64-bit integer [[#2161]](https://github.com/PointCloudLibrary/pcl/pull/2161)
+* `OpenNI2Device` Add device sensor check for IR and depth modesetting [[#2150]](https://github.com/PointCloudLibrary/pcl/pull/2150)
+* Adds a check for when CreateFileMappingA fails [[#2146]](https://github.com/PointCloudLibrary/pcl/pull/2146)
+* `PCDWriter`changed `toff` to `size_t` in `writeBinaryCompressed` [[#2144]](https://github.com/PointCloudLibrary/pcl/pull/2144)
+* Prevent POINTS field parsing before point_step is specified [[#2131]](https://github.com/PointCloudLibrary/pcl/pull/2131)
+* Check COUNT value specified in PCD files [[#2126]](https://github.com/PointCloudLibrary/pcl/pull/2126)
+* Prevent mmapping more than the original PCD file size [[#2125]](https://github.com/PointCloudLibrary/pcl/pull/2125)
+* **[api][deprecation]** Add support pcl::PointXYZRGBA to pcl::VLPGrabber. Deprecate rgb signatures. [[#2102]](https://github.com/PointCloudLibrary/pcl/pull/2102)
+* **[removal]** Removal of deprecated code in filters, io, surface and visualization modules [[#2077]](https://github.com/PointCloudLibrary/pcl/pull/2077)
+* Suppress strict alias warning [[#2072]](https://github.com/PointCloudLibrary/pcl/pull/2072)
+* Suppress unused parameter warnings [[#2067]](https://github.com/PointCloudLibrary/pcl/pull/2067)
+* **[abi]** Removed unused member from ply_parser [[#2066]](https://github.com/PointCloudLibrary/pcl/pull/2066)
+* Suppress control reaches end of non-void function in io.h [[#2057]](https://github.com/PointCloudLibrary/pcl/pull/2057)
+* Modify STRICT_ALIGN because macro expansion w/defined is undefined [[#2043]](https://github.com/PointCloudLibrary/pcl/pull/2043)
+* Add necessary boost headers to pcl/io to build in CUDA mode [[#2025]](https://github.com/PointCloudLibrary/pcl/pull/2025)
+* Fix MSVC compile issue related with ssize_t [[#2027]](https://github.com/PointCloudLibrary/pcl/pull/2027)
+* Adds in-memory PCD serialization/deserialization; de-duplicates PCDReader::readHeader(). (take #2) [[#1986]](https://github.com/PointCloudLibrary/pcl/pull/1986)
+
+#### `libpcl_kdtree:`
+
+* Consistent ptr typedefs for kd tree flann [[#1607]](https://github.com/PointCloudLibrary/pcl/pull/1607)
+
+#### `libpcl_keypoints:`
+
+* Add `TrajkovicKeypoint2D/3D` to CMake build [[#2179]](https://github.com/PointCloudLibrary/pcl/pull/2179)
+
+#### `libpcl_ml:`
+
+* Fix installation rules for ml module [[#2192]](https://github.com/PointCloudLibrary/pcl/pull/2192)
+
+#### `libpcl_octree:`
+
+* **[behavior]** Reverse octree's depth first iterator order [[#2332]](https://github.com/PointCloudLibrary/pcl/pull/2332)
+* Fix various errors and typos in the docstrings and tutorials [[#2486]](https://github.com/PointCloudLibrary/pcl/pull/2486)
+* Make test conditions consistent with `OctreePointCloudSearch::boxSearch()` implementation. [[#2457]](https://github.com/PointCloudLibrary/pcl/pull/2457)
+* `octree_key.h` suppress GCC 8 ignored-qualifier warning [[#2405]](https://github.com/PointCloudLibrary/pcl/pull/2405)
+* **[api][deprecation][new-feature]** Implementation of the iterator 'OctreeLeafNodeBreadthIterator'. [[#2204]](https://github.com/PointCloudLibrary/pcl/pull/2204)
+* **[abi][new-feature]** Implementation of the iterator 'OctreeFixedDepthIterator'. [[#1983]](https://github.com/PointCloudLibrary/pcl/pull/1983)
+* Octree Iterator begin/end method and added tests [[#2174]](https://github.com/PointCloudLibrary/pcl/pull/2174)
+* Remove parametrization of end iterators [[#2168]](https://github.com/PointCloudLibrary/pcl/pull/2168)
+* Fix docstrings in octree test [[#2173]](https://github.com/PointCloudLibrary/pcl/pull/2173)
+* **[api]** OctreeIterators special member revision [[#2108]](https://github.com/PointCloudLibrary/pcl/pull/2108)
+* Remove unused variable from octree_viewer [[#2069]](https://github.com/PointCloudLibrary/pcl/pull/2069)
+* Silence compile warning by removing superfluous call to std::max() [[#2014]](https://github.com/PointCloudLibrary/pcl/pull/2014)
+* [OCTREE] Add bounding box checks in isVoxelOccupiedAtPoint() and deleteVoxelAtPoint() [[#1976]](https://github.com/PointCloudLibrary/pcl/pull/1976)
+
+#### `libpcl_outofcore:`
+
+* Explictly use mt19937 random generator for boost 1.67. [[#2338]](https://github.com/PointCloudLibrary/pcl/pull/2338)
+* Fixed queryBBIncludes_subsample [[#1988]](https://github.com/PointCloudLibrary/pcl/pull/1988)
+
+#### `libpcl_people:`
+
+* Misleading indentation [[#2034]](https://github.com/PointCloudLibrary/pcl/pull/2034)
+
+#### `libpcl_recognition:`
+
+* Relax threshold in Hough3DGrouping test [[#2507]](https://github.com/PointCloudLibrary/pcl/pull/2507)
+* Add some missing eigen alignment operators [[#2433]](https://github.com/PointCloudLibrary/pcl/pull/2433)
+* Setting the resolution of the occupancy grid [[#2273]](https://github.com/PointCloudLibrary/pcl/pull/2273)
+* Inline helper function gcCorrespSorter() [[#2248]](https://github.com/PointCloudLibrary/pcl/pull/2248)
+* Misleading indentation [[#2034]](https://github.com/PointCloudLibrary/pcl/pull/2034)
+
+#### `libpcl_registration:`
+
+* Remove std::binary_function from Registration [[#2599]](https://github.com/PointCloudLibrary/pcl/pull/2599)
+* Suppress miscelanious warnings [[#2556]](https://github.com/PointCloudLibrary/pcl/pull/2556)
+* Relax precision requirements on TransformationEstimationLM test. [[#2497]](https://github.com/PointCloudLibrary/pcl/pull/2497)
+* Relax rejector threshold in JointIterativeClosestPoint test. [[#2496]](https://github.com/PointCloudLibrary/pcl/pull/2496)
+* Add some missing eigen alignment operators [[#2433]](https://github.com/PointCloudLibrary/pcl/pull/2433)
+* Remove explicit initialization of `update_visualizer_` in `Registration` [[#2423]](https://github.com/PointCloudLibrary/pcl/pull/2423)
+* **[removal]** Remove registration module deprecated methods [[#2076]](https://github.com/PointCloudLibrary/pcl/pull/2076)
+* Suppress (maybe) uninitialized warning [[#2073]](https://github.com/PointCloudLibrary/pcl/pull/2073)
+* Remove unreachable code in DefaultConvergenceCriteria [[#1967]](https://github.com/PointCloudLibrary/pcl/pull/1967)
+* **[abi]** Added option to specify translation and rotation convergence deltas in ICP and NDT algorithms. [[#1724]](https://github.com/PointCloudLibrary/pcl/pull/1724)
+
+#### `libpcl_sample_consensus:`
+
+* Revise direction test in SampleConsensusModelParallelLine/RANSAC to be consistent with set tolerance. [[#2491]](https://github.com/PointCloudLibrary/pcl/pull/2491)
+* Fix error in SampleConsensusModelLine::isSampleGood [[#2488]](https://github.com/PointCloudLibrary/pcl/pull/2488)
+* **[api]** Const-qualify most of the methods in SAC model classes [[#2270]](https://github.com/PointCloudLibrary/pcl/pull/2270)
+* **[removal]** Remove deprecated functions and variables from SAC module [[#2071]](https://github.com/PointCloudLibrary/pcl/pull/2071)
+
+#### `libpcl_search:`
+
+* Correct testPoint for organized nearest neighbor search [[#2198]](https://github.com/PointCloudLibrary/pcl/pull/2198)
+
+#### `libpcl_segmentation:`
+
+* Add some missing eigen alignment operators [[#2433]](https://github.com/PointCloudLibrary/pcl/pull/2433)
+* Add setter/getter for search method in ConditionalEuclideanClustering [[#2437]](https://github.com/PointCloudLibrary/pcl/pull/2437)
+* Increase threshold for expected value in test_non_linear [[#2424]](https://github.com/PointCloudLibrary/pcl/pull/2424)
+* Avoid infinite recursion in getPointCloudDifference [[#2402]](https://github.com/PointCloudLibrary/pcl/pull/2402)
+* Correct setting of is_dense flag in SegmentDifferences. Deprecate unused parameter in method. [[#2380]](https://github.com/PointCloudLibrary/pcl/pull/2380)
+* Dereference shared_ptr, fix for GCC8 [[#2299]](https://github.com/PointCloudLibrary/pcl/pull/2299)
+* **[abi][deprecation]** Provide proper EuclideanClusterComparator method depreciation. New Pragma macro. New Deprecated type. [[#2096]](https://github.com/PointCloudLibrary/pcl/pull/2096)
+* Removed normal related accessors and types from EuclideanClusterComparator [[#1542]](https://github.com/PointCloudLibrary/pcl/pull/1542)
+
+#### `libpcl_simulation:`
+
+* Add `const` qualifiers to `RangeLikelihood` getters. [[#2411]](https://github.com/PointCloudLibrary/pcl/pull/2411)
+* **[api]** Use GLuint rather than size_t to represent OpenGL indices. [[#2238]](https://github.com/PointCloudLibrary/pcl/pull/2238)
+* Support both RGB and RGBA colors in mesh loading [[#2036]](https://github.com/PointCloudLibrary/pcl/pull/2036)
+* **[abi]** Redo: Simulation: enable returning of organized point clouds [[#1687]](https://github.com/PointCloudLibrary/pcl/pull/1687)
+* Simulation: more access to camera parameters [[#1650]](https://github.com/PointCloudLibrary/pcl/pull/1650)
+
+#### `libpcl_surface:`
+
+* Fixed memory leak in Poisson's BSplineData [[#2572]](https://github.com/PointCloudLibrary/pcl/pull/2572)
+* Suppress miscelanious warnings [[#2556]](https://github.com/PointCloudLibrary/pcl/pull/2556)
+* Add some missing eigen alignment operators [[#2433]](https://github.com/PointCloudLibrary/pcl/pull/2433)
+* Make pcl::MovingLeastSquares thread-safe [[#2418]](https://github.com/PointCloudLibrary/pcl/pull/2418)
+* **[abi]** Missing pcl::MovingLeastSquaresOMP declaration without /openmp [[#2324]](https://github.com/PointCloudLibrary/pcl/pull/2324)
+* **[abi]** Improved docstrings and error messages [[#2300]](https://github.com/PointCloudLibrary/pcl/pull/2300)
+* opennurbs: fix `ON_Curve::EvaluatePoint` calculation [[#2185]](https://github.com/PointCloudLibrary/pcl/pull/2185)
+* **[removal]** Removal of deprecated code in filters, io, surface and visualization modules [[#2077]](https://github.com/PointCloudLibrary/pcl/pull/2077)
+* Suppress (maybe) uninitialized warning [[#2073]](https://github.com/PointCloudLibrary/pcl/pull/2073)
+* Suppress sign compare warnings [[#2068]](https://github.com/PointCloudLibrary/pcl/pull/2068)
+* Fix incorrect Ptr/ConstPtr typedefs in MovingLeastSquaresOMP [[#2055]](https://github.com/PointCloudLibrary/pcl/pull/2055)
+* Replace float indices with Eigen Index [[#2017]](https://github.com/PointCloudLibrary/pcl/pull/2017)
+* **[api][deprecation][new-feature]** Add new mls projection method. Deprecated `MovingLeastSquares::setPolynomialFit()`. [[#1960]](https://github.com/PointCloudLibrary/pcl/pull/1960)
+* Avoid phantom surfces in marching cubes hoppe [[#1874]](https://github.com/PointCloudLibrary/pcl/pull/1874)
+* **[api]** Add ability to cache mls results [[#1952]](https://github.com/PointCloudLibrary/pcl/pull/1952)
+
+#### `PCL Apps:`
+
+* Suppress miscelanious warnings [[#2556]](https://github.com/PointCloudLibrary/pcl/pull/2556)
+* Fix 3d_rec_framework compilation error [[#2495]](https://github.com/PointCloudLibrary/pcl/pull/2495)
+* Fix compilation issue in point cloud editor. [[#2490]](https://github.com/PointCloudLibrary/pcl/pull/2490)
+* `demean_cloud` correct usage message. [[#2443]](https://github.com/PointCloudLibrary/pcl/pull/2443)
+* Do not use deprecated method in openni_mls_smoothing app [[#2421]](https://github.com/PointCloudLibrary/pcl/pull/2421)
+* add windows.h for includes GL/gl.h; handle cancel for denoiseWidget. [[#2267]](https://github.com/PointCloudLibrary/pcl/pull/2267)
+* Add missing dependecy to apps [[#2251]](https://github.com/PointCloudLibrary/pcl/pull/2251)
+* Suppress the final set of warnings in pcl apps [[#2082]](https://github.com/PointCloudLibrary/pcl/pull/2082)
+* **[abi]** More warning suppression in pcl apps [[#2080]](https://github.com/PointCloudLibrary/pcl/pull/2080)
+
+#### `PCL Docs:`
+
+* Fix misc. typos in tutorials and docstrings [[#2529]](https://github.com/PointCloudLibrary/pcl/pull/2529)
+* Fix various errors and typos in the docstrings and tutorials [[#2486]](https://github.com/PointCloudLibrary/pcl/pull/2486)
+* Docstring typos' corrections. [[#2449]](https://github.com/PointCloudLibrary/pcl/pull/2449)
+* `demean_cloud` correct usage message. [[#2443]](https://github.com/PointCloudLibrary/pcl/pull/2443)
+* Set IMAGE_PATH explicitly in Doxygen config [[#2442]](https://github.com/PointCloudLibrary/pcl/pull/2442)
+* Switch to using client-based search in Doxygen [[#2391]](https://github.com/PointCloudLibrary/pcl/pull/2391)
+* **[api]** Misc. typos (cont.) [[#2215]](https://github.com/PointCloudLibrary/pcl/pull/2215)
+* doc: misc. typos [[#2213]](https://github.com/PointCloudLibrary/pcl/pull/2213)
+* Add url to API/ABI compatibity report [[#2116]](https://github.com/PointCloudLibrary/pcl/pull/2116)
+* Improve Doxygen comments for HistogramInterpolationMethod [[#2111]](https://github.com/PointCloudLibrary/pcl/pull/2111)
+* Update organized.h [[#1965]](https://github.com/PointCloudLibrary/pcl/pull/1965)
+* Typo [[#1968]](https://github.com/PointCloudLibrary/pcl/pull/1968)
+* Fixed spelling and grammar errors [[#1959]](https://github.com/PointCloudLibrary/pcl/pull/1959)
+* Fixed error in documentation. [[#1957]](https://github.com/PointCloudLibrary/pcl/pull/1957)
+
+#### `PCL Tutorials:`
+
+* Fix dataset link in conditional euclidean clustering tutorial [[#2546]](https://github.com/PointCloudLibrary/pcl/pull/2546)
+* Fix dead links in Walkthrough tutorial [[#2532]](https://github.com/PointCloudLibrary/pcl/pull/2532)
+* Simplify explanation of PointXYZ structure in "Writing PCD" tutorial [[#2534]](https://github.com/PointCloudLibrary/pcl/pull/2534)
+* Fix misc. typos in tutorials and docstrings [[#2529]](https://github.com/PointCloudLibrary/pcl/pull/2529)
+* Fix a dead link to Radu Rusu's dissertation in the tutorial. [[#2508]](https://github.com/PointCloudLibrary/pcl/pull/2508)
+* Fix various errors and typos in the docstrings and tutorials [[#2486]](https://github.com/PointCloudLibrary/pcl/pull/2486)
+* Fix link to Institut Maupertuis's ensenso_extrinsic_calibration repo [[#2447]](https://github.com/PointCloudLibrary/pcl/pull/2447)
+* Add settings for hypothesis verification [[#2274]](https://github.com/PointCloudLibrary/pcl/pull/2274)
+* Fix ICP tutorial [[#2244]](https://github.com/PointCloudLibrary/pcl/pull/2244)
+* Fix error in example code for estimate set of surface for a subset of points in the input dataset [[#2203]](https://github.com/PointCloudLibrary/pcl/pull/2203)
+* Update message about legacy point cloud types in tutorial [[#2175]](https://github.com/PointCloudLibrary/pcl/pull/2175)
+* Add descriptor unpacking to GASD tutorial [[#2167]](https://github.com/PointCloudLibrary/pcl/pull/2167)
+* Fix convert to `Eigen::Map<const Eigen::Vector3f>` from Normal of `pcl::PointXYZINormal` [[#2128]](https://github.com/PointCloudLibrary/pcl/pull/2128)
+* Fix the tutorial qt_visualizer compilation issue: qt4 -> qt5. [[#2051]](https://github.com/PointCloudLibrary/pcl/pull/2051)
+* Fix several documentation typos [[#2020]](https://github.com/PointCloudLibrary/pcl/pull/2020)
+* Replace literal include of wrong CMakeLists file with correct script [[#1971]](https://github.com/PointCloudLibrary/pcl/pull/1971)
+* Update Ensenso tutorial for Ensenso X devices [[#1933]](https://github.com/PointCloudLibrary/pcl/pull/1933)
+
+#### `PCL Examples:`
+
+* Suppress strict alias warning [[#2072]](https://github.com/PointCloudLibrary/pcl/pull/2072)
+* Suppress (maybe) uninitialized warning [[#2073]](https://github.com/PointCloudLibrary/pcl/pull/2073)
+* Fix CPC/LCCP segmentation examples for VTK 7.1 [[#2063]](https://github.com/PointCloudLibrary/pcl/pull/2063)
+
+#### `PCL Tests:`
+
+* Corrections to Windows unit tests. [[#2596]](https://github.com/PointCloudLibrary/pcl/pull/2596)
+* Relax eigen22f test criteria [[#2553]](https://github.com/PointCloudLibrary/pcl/pull/2553)
+* Solve issues with failing features tests [[#2544]](https://github.com/PointCloudLibrary/pcl/pull/2544)
+* Relax requirements in eigen22d test. Always provide a normalized result in `pcl::transformPlane`. [[#2503]](https://github.com/PointCloudLibrary/pcl/pull/2503)
+* Enable tests_2d and tests_io in AppVeyor. [[#2505]](https://github.com/PointCloudLibrary/pcl/pull/2505)
+* Relax threshold in Hough3DGrouping test [[#2507]](https://github.com/PointCloudLibrary/pcl/pull/2507)
+* Relax precision requirements on TransformationEstimationLM test. [[#2497]](https://github.com/PointCloudLibrary/pcl/pull/2497)
+* Relax rejector threshold in JointIterativeClosestPoint test. [[#2496]](https://github.com/PointCloudLibrary/pcl/pull/2496)
+* vtk2mesh: Add parsing support to the new RGBA scalar field added in vtk8 [[#2492]](https://github.com/PointCloudLibrary/pcl/pull/2492)
+* Revise direction test in SampleConsensusModelParallelLine/RANSAC to be consistent with set tolerance. [[#2491]](https://github.com/PointCloudLibrary/pcl/pull/2491)
+* Make test conditions consistent with `OctreePointCloudSearch::boxSearch()` implementation. [[#2457]](https://github.com/PointCloudLibrary/pcl/pull/2457)
+* Increase threshold for expected value in test_non_linear [[#2424]](https://github.com/PointCloudLibrary/pcl/pull/2424)
+* Replace floating point numerals when using `boost::posix_time`. Fix compatibility with Boost 1.67. [[#2422]](https://github.com/PointCloudLibrary/pcl/pull/2422)
+* Cleanup and improve unit test coverage for transformPointCloud functions [[#2245]](https://github.com/PointCloudLibrary/pcl/pull/2245)
+* Fixes and new assertion macro in "pcl_tests.h" [[#2237]](https://github.com/PointCloudLibrary/pcl/pull/2237)
+* Add new gtest path [[#1920]](https://github.com/PointCloudLibrary/pcl/pull/1920)
+
+#### `PCL Tools:`
+
+* Allow the `pcl_uniform_sampling` tool to deal with several formats (PCD, PLY and VTK) as input or output point cloud [[#2348]](https://github.com/PointCloudLibrary/pcl/pull/2348)
+* mesh_sampling tool: Add support for colors [[#2257]](https://github.com/PointCloudLibrary/pcl/pull/2257)
+* Error message on non-recognized feature names [[#2178]](https://github.com/PointCloudLibrary/pcl/pull/2178)
+* Suppress sign compare warnings [[#2068]](https://github.com/PointCloudLibrary/pcl/pull/2068)
+* [OCTREE] Compute accurately the centroids of octree in 'pcl_octree_viewer' [[#1981]](https://github.com/PointCloudLibrary/pcl/pull/1981)
+* **[api][deprecation][new-feature]** Add new mls projection method. Deprecated `MovingLeastSquares::setPolynomialFit()`. [[#1960]](https://github.com/PointCloudLibrary/pcl/pull/1960)
+* [OCTREE] Fix pcl_octree_viewer [[#1973]](https://github.com/PointCloudLibrary/pcl/pull/1973)
+* [OCTREE] Remove a useless field in octree_viewer. [[#1980]](https://github.com/PointCloudLibrary/pcl/pull/1980)
+
+#### `CI:`
+
+* Disable Travis email notifications. Update PCL logo endpoint. [[#2535]](https://github.com/PointCloudLibrary/pcl/pull/2535)
+* Migrate Travis to the new travis-ci.com platform [[#2538]](https://github.com/PointCloudLibrary/pcl/pull/2538)
+* Enable tests_2d and tests_io in AppVeyor. [[#2505]](https://github.com/PointCloudLibrary/pcl/pull/2505)
+* Fix docs on Travis CI. [[#2441]](https://github.com/PointCloudLibrary/pcl/pull/2441)
+* Disable SSE flags in AppVeyor. [[#2438]](https://github.com/PointCloudLibrary/pcl/pull/2438)
+* Split (yet again) Travis test job into two and tweak timings in building apps [[#2182]](https://github.com/PointCloudLibrary/pcl/pull/2182)
+* **[new-feature]** Enable Global Tests on Windows CI [[#2137]](https://github.com/PointCloudLibrary/pcl/pull/2137)
+* Travis merge test jobs. Expose VTK include directory for the visualization module. [[#2163]](https://github.com/PointCloudLibrary/pcl/pull/2163)
+* Remove unnecessary PPAs and packages from Travis [[#2153]](https://github.com/PointCloudLibrary/pcl/pull/2153)
+* AppVeyor - Change to simple style of specify triplet [[#2135]](https://github.com/PointCloudLibrary/pcl/pull/2135)
+* Initial Appveyor CI integration [[#2083]](https://github.com/PointCloudLibrary/pcl/pull/2083)
+* Change Travis to use pip3 for installing sphinx [[#2124]](https://github.com/PointCloudLibrary/pcl/pull/2124)
+* [TRAVIS] Enable the build of apps. [[#2012]](https://github.com/PointCloudLibrary/pcl/pull/2012)
+* [TRAVIS] Enable the build of tools. [[#2007]](https://github.com/PointCloudLibrary/pcl/pull/2007)
+* Disable tools build in CI. [[#2003]](https://github.com/PointCloudLibrary/pcl/pull/2003)
+
+
## *= 1.8.1 (08.08.2017) =*
* Replaced `make_shared` invocations on aligned allocated vars
* Removed unnecessary mutex locking in `TimeTrigger::registerCallback`
[[#1087]](https://github.com/PointCloudLibrary/pcl/pull/1087)
* Updated PCL exception types to have nothrow copy constructor and copy
- assigment operator
+ assignment operator
[[#1119]](https://github.com/PointCloudLibrary/pcl/pull/1119)
* Fixed a bug with `PCA` not triggering recomputation when input indices are
changed
* Added a new `MetaRegistration` class that allows to register a stream of
clouds where each cloud is aligned to the conglomerate of all previous clouds
[[#1426]](https://github.com/PointCloudLibrary/pcl/pull/1426)
-* Fixed segmentation fault occuring in `CorrespondenceRejectorSurfaceNormal`
+* Fixed segmentation fault occurring in `CorrespondenceRejectorSurfaceNormal`
[[#1536]](https://github.com/PointCloudLibrary/pcl/pull/1536)
* Use aligned allocator in vectors of MatchingCandidate
[[#1552]](https://github.com/PointCloudLibrary/pcl/pull/1552)
* Added a setDimension function to concave hull, so users can specify desired dimensionality of the resulting hull. If no dimension is specified, one will be automatically determined.
* Fixed bug #692 - MovingLeastSquares indices issues
* Added curve fitting and trimming of surfaces to examples/surface/example_nurbs_fitting_surface.cpp
-* Added iterative fitting routines for curve fitting surface::on_nurbs::Triangulation - added convertion functions for nurbs curve to line-polygon - added convertion functions for nurbs surface and curve to PolyMesh
+* Added iterative fitting routines for curve fitting surface::on_nurbs::Triangulation - added conversion functions for nurbs curve to line-polygon - added conversion functions for nurbs surface and curve to PolyMesh
* Added flag to enable/disable usage of UmfPack for fast solving of sparse systems of equations - added triangulation functions to convert ON_NurbsSurface to pcl::PolygonMesh
* Added bug fix in ConcaveHull, thanks to summer.icecream
* Added marching cubes using RBF and Hoppe SDF
* Pushed new functions that perform texture-mapping on meshes.
* Fix: issue #646 (vtk_smoothing not copied)
-* Added new functionalities to TextureMapping: Find occlusions based on raytracing in octrees, UV mapping based on Texture resolution and camera focal lenght.
+* Added new functionalities to TextureMapping: Find occlusions based on raytracing in octrees, UV mapping based on Texture resolution and camera focal length.
* Relaxing dimensionality check threshold on concave_hull, so that 3D hulls should no longer be calculated on 2D input.
* Added poisson filter
* added a normal space sampling filter
* fixed bug #433: `pcl::CropBox` doesn't update `width` and `height` member of output point cloud
* fixed bug #423 `CropBox` + `VoxelGrid` filters, order changes behaviour (using openni grabber)
-* add `CropHull` filter for filtering points based on a 2D or 3D convex or concave hull. The ray-polygon intersection test is used, which relys on closed polygons/surfaces
+* add `CropHull` filter for filtering points based on a 2D or 3D convex or concave hull. The ray-polygon intersection test is used, which relies on closed polygons/surfaces
* added clipper3D interface and a draft plane_clipper3D implementation
* removed spurious old `ColorFilter` class (obsolete, and it was never implemented - the skeleton was just lurking around)
* better Doxygen documentation to `PassThrough`, `VoxelGrid` and `Filter`
### `lipcl_keypoints`
* added refine method for `Harris3D` corner detector
-* rewrote big parts of the NARF keypoint extraction. Hopfully fixing some stability issues. Unfortunately still pretty slow for high resolution point clouds.
+* rewrote big parts of the NARF keypoint extraction. Hopefully fixing some stability issues. Unfortunately still pretty slow for high resolution point clouds.
* fixed bug #461 (SIFT Keypoint result cloud fields not complete); cleaned up the line-wrapping in the error/warning messages
### `libpcl_surface`
* fixed a bug in `getRejectedQueryIndices`, wrong output when order of correspondences have been changed
* moved `getRejectedQueryIndices` to pcl/common/correspondence.h
* added more doxygen documentation to the registration components
-* marked all `getRemainingCorrespondences`-functions as DEPRECATED, we sould replace them with purely stateless version outside the class body
+* marked all `getRemainingCorrespondences`-functions as DEPRECATED, we should replace them with purely stateless version outside the class body
* fixed a const missing in `PolynomialCalculationsT` (#388 - thanks Julian!)
* add `PCL_DEPRECATED` macro, closes #354.
* added `PointXYZHSV` type and the conversions for it
* fixed bug in getRejectedQueryIndices, wrong output when order of correspondences have been changed
* moved getRejectedQueryIndices to pcl/common/correspondence.h
* added more doxygen documentation to the registration components
- * marked all "getRemainingCorrespondences"-functions as DEPRECATED, we sould replace them with purely stateless version outside the class body
+ * marked all "getRemainingCorrespondences"-functions as DEPRECATED, we should replace them with purely stateless version outside the class body
* Update: remove ciminpack dependency and rely on eigen for LM
* Fixed a bug in ICP-NL by modifying `WarpPointRigid` to preserve the value of the 4th coordinate when warping; Re-enabled missing unit tests for ICP and ICP-NL
* Added point-to-plane ICP
-* added nr_iterations_ and max_iterations_ to the initializer list (were mising)
+* added nr_iterations_ and max_iterations_ to the initializer list (were missing)
* Fixed bugs in `WarpPointRigid3D` and `TransformationEstimationLM`
* fixed a problem where if no correspondences would be found via `nearestKSearch`, the RANSAC-based rejection scheme would fail (thanks to James for reporting this)
* changed the default LM implementation to use L2 instead of L2SQR
* added a new generalized field value filter (_pcl::ConditionalRemoval_)
* cleaned up normal estimation through integral images
* PCL rift.hpp and point_types.hpp fixed for Windows/VS2010
- * fixed all _is_dense_ occurances
+ * fixed all _is_dense_ occurrences
* *unmanged all _Eigen3::_ calls to _Eigen::_*
* changed all _!isnan_ checks to _isfinite_ in order to catch INF/-INF values
* added vtk_io tools for saving VTK data from _PolygonMesh_ structures
* Added Correspondence as a structure representing correspondences/matches (similar to OpenCV's DMatch) containing query and match indices as well as the distance between them respective points.
* Added _CorrespondenceEstimation_ for determining closest point correspondence, feature correspondences and reciprocal point correspondences.
* Added _CorrespondenceRejection_ and derivations for rejecting correspondences, e.g., based on distance, removing 1-to-n correspondences, RANSAC-based outlier removal (+transformation estimation).
- * Further splitted up registration.h and added transformation estimation classes, e.g., for estimating rigid transformation using SVD.
+ * Further split up registration.h and added transformation estimation classes, e.g., for estimating rigid transformation using SVD.
* Added ```sensor_msgs::Image image; pcl::toROSMsg (cloud, image);```See tools/convert_pcd_image.cpp for a sample.
* Added a new point type, _PointWithScale_, to store the output of _SIFTKeypoint_ detection.
* Fixed a minor bug in the error-checking in _SIFTKeypoint::setScales(...)_
* Added RSD (Radius Signature Descriptor) feature.
* Updated the entire PCL tree to use FLANN as a default _KdTree_
* Optimized the _KdTreeFLANN::radiusSearch_ so it doesn't do any memory allocation if the indices and distances vectors passed in are pre-allocated to the point cloud size.
- * Changed _KdTree::radiusSearch_ method signature to return int (number of neighbors found) intead of bool
+ * Changed _KdTree::radiusSearch_ method signature to return int (number of neighbors found) instead of bool
* added new _pcl::View<T>_ class that holds a _PointCloud_, an _Image_, and a _CameraInfo_ message (r34575)
* Moving the _Surface_ reconstruction framework to the new structure (r34547)
* Moving the _Segmentation_ framework to the new structure (r34546)
cmake_policy(SET CMP0020 NEW) # Automatically link Qt executables to qtmain target on Windows
endif()
+if(POLICY CMP0042)
+ # Uses Mac OS X @rpath in the target's install name
+ cmake_policy(SET CMP0042 NEW)
+endif()
+
if(POLICY CMP0048)
cmake_policy(SET CMP0048 OLD) # do not use VERSION option in project() command
endif()
cmake_policy(SET CMP0054 OLD) # Silent warnings about quoted variables
endif()
+if(POLICY CMP0074)
+ # TODO:
+ # 1. Find*.cmake modules need to be individually verified.
+ # 2. PCLConfig.cmake needs to be changed.
+ cmake_policy(SET CMP0074 OLD)
+endif()
+
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "possible configurations" FORCE)
# In case the user does not setup CMAKE_BUILD_TYPE, assume it's RelWithDebInfo
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "build type default to RelWithDebInfo, set to Release to improve performance" FORCE)
-endif("${CMAKE_BUILD_TYPE}" STREQUAL "")
+endif()
project(PCL)
string(TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER)
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/" ${CMAKE_MODULE_PATH})
# ---[ Include pkgconfig
-include (FindPkgConfig)
+include(FindPkgConfig)
# ---[ Release/Debug specific flags
if(CMAKE_BUILD_TYPE STREQUAL "Release" OR CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
set(CMAKE_COMPILER_IS_MSVC 1)
endif()
+# Create a variable with expected default CXX flags
+# This will be used further down the road to check if the user explicitly provided CXX flags
+if(CMAKE_COMPILER_IS_MSVC)
+ set(CMAKE_CXX_FLAGS_DEFAULT "/DWIN32 /D_WINDOWS /W3 /GR /EHsc")
+else()
+ set(CMAKE_CXX_FLAGS_DEFAULT "")
+endif()
+
include("${PCL_SOURCE_DIR}/cmake/pcl_verbosity.cmake")
include("${PCL_SOURCE_DIR}/cmake/pcl_targets.cmake")
include("${PCL_SOURCE_DIR}/cmake/pcl_options.cmake")
if(CMAKE_TIMING_VERBOSE AND UNIX)
set_property(GLOBAL PROPERTY RULE_MESSAGES OFF)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CMAKE_SOURCE_DIR}/cmake/custom_output.sh")
-endif(CMAKE_TIMING_VERBOSE AND UNIX)
+endif()
# check for SSE flags
include("${PCL_SOURCE_DIR}/cmake/pcl_find_sse.cmake")
-if (PCL_ENABLE_SSE)
+if(PCL_ENABLE_SSE AND "${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
PCL_CHECK_FOR_SSE()
-endif (PCL_ENABLE_SSE)
+endif()
# ---[ Unix/Darwin/Windows specific flags
if(CMAKE_COMPILER_IS_GNUCXX)
- if("${CMAKE_CXX_FLAGS}" STREQUAL "")
- SET(CMAKE_CXX_FLAGS "-Wall -Wextra -Wno-unknown-pragmas -fno-strict-aliasing -Wno-format-extra-args -Wno-sign-compare -Wno-invalid-offsetof -Wno-conversion ${SSE_FLAGS}")
+ if("${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
+ SET(CMAKE_CXX_FLAGS "-Wall -Wextra -Wno-unknown-pragmas -fno-strict-aliasing -Wno-format-extra-args -Wno-sign-compare -Wno-invalid-offsetof -Wno-conversion ${SSE_FLAGS_STR}")
# Enable -Wabi for GCC > 4.3, and -Wno-deprecated for GCC < 4.3
# to disable a lot of warnings which are not fixable
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
- if (GCC_VERSION VERSION_GREATER 4.3)
+ if(GCC_VERSION VERSION_GREATER 4.3)
message(STATUS "-- GCC > 4.3 found, enabling -Wabi")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wabi")
else()
message(STATUS "-- GCC < 4.3 found, enabling -Wno-deprecated")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated")
- endif ()
+ endif()
endif()
if(NOT ANDROID)
if(WIN32)
if(PCL_SHARED_LIBS)
SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--export-all-symbols -Wl,--enable-auto-import")
- if (MINGW)
+ if(MINGW)
add_definitions("-DBOOST_THREAD_USE_LIB")
SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--allow-multiple-definition")
endif()
- else(PCL_SHARED_LIBS)
+ else()
add_definitions("-DBOOST_LIB_DIAGNOSTIC -DBOOST_THREAD_USE_LIB")
- endif(PCL_SHARED_LIBS)
+ endif()
endif()
endif()
if(CMAKE_COMPILER_IS_MSVC)
add_definitions("-DBOOST_ALL_NO_LIB -D_SCL_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS -DNOMINMAX -DPCL_ONLY_CORE_POINT_TYPES /bigobj ${SSE_DEFINITIONS}")
- if("${CMAKE_CXX_FLAGS}" STREQUAL " /DWIN32 /D_WINDOWS /W3 /GR /EHsc") # Check against default flags
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj /EHsc /fp:precise /wd4800 /wd4521 /wd4251 /wd4275 /wd4305 /wd4355 ${SSE_FLAGS}")
+
+ if("${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj /fp:precise /wd4800 /wd4521 /wd4251 /wd4275 /wd4305 /wd4355 ${SSE_FLAGS_STR}")
# Add extra code generation/link optimizations
if(CMAKE_MSVC_CODE_LINK_OPTIMIZATION)
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
SET(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG /OPT:REF")
SET(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
- endif(CMAKE_MSVC_CODE_LINK_OPTIMIZATION)
+ endif()
# /MANIFEST:NO") # please, don't disable manifest generation, otherwise crash at start for vs2008
- if( MSVC_VERSION GREATER 1500 AND ${CMAKE_VERSION} VERSION_GREATER "2.8.6")
+ if(MSVC_VERSION GREATER 1500 AND ${CMAKE_VERSION} VERSION_GREATER "2.8.6")
include(ProcessorCount)
ProcessorCount(N)
if(NOT N EQUAL 0)
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP${N} ")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP${N} ")
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP${N}")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP${N}")
endif()
endif()
endif()
+
+ if(CMAKE_GENERATOR STREQUAL "Ninja")
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /FS")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FS")
+ endif()
endif()
if(CMAKE_COMPILER_IS_PATHSCALE)
- if("${CMAKE_CXX_FLAGS}" STREQUAL "")
+ if("${CMAKE_CXX_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
SET(CMAKE_CXX_FLAGS "-Wno-uninitialized -zerouv -pthread -mp")
endif()
if("${CMAKE_SHARED_LINKER_FLAGS}" STREQUAL "")
endif()
if(CMAKE_COMPILER_IS_CLANG)
- if("${CMAKE_C_FLAGS}" STREQUAL "")
+ if("${CMAKE_C_FLAGS}" STREQUAL "${CMAKE_CXX_FLAGS_DEFAULT}")
SET(CMAKE_C_FLAGS "-Qunused-arguments")
endif()
if("${CMAKE_CXX_FLAGS}" STREQUAL "")
- SET(CMAKE_CXX_FLAGS "-ftemplate-depth=1024 -Qunused-arguments -Wno-invalid-offsetof ${SSE_FLAGS}") # Unfortunately older Clang versions do not have this: -Wno-unnamed-type-template-args
+ SET(CMAKE_CXX_FLAGS "-ftemplate-depth=1024 -Qunused-arguments -Wno-invalid-offsetof ${SSE_FLAGS_STR}") # Unfortunately older Clang versions do not have this: -Wno-unnamed-type-template-args
if(APPLE AND WITH_CUDA AND CUDA_FOUND)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
endif()
endif()
include("${PCL_SOURCE_DIR}/cmake/pcl_utils.cmake")
-set(PCL_VERSION "1.8.1" CACHE STRING "PCL version")
+set(PCL_VERSION "1.9.0" CACHE STRING "PCL version")
DISSECT_VERSION()
GET_OS_INFO()
SET_INSTALL_DIRS()
if(WIN32)
set(PCL_RESOURCES_DIR "${PCL_SOURCE_DIR}/resources")
set(PCL_POINTCLOUDS_DIR "${PCL_RESOURCES_DIR}/pointclouds")
-endif(WIN32)
+endif()
set(PCL_OUTPUT_LIB_DIR "${PCL_BINARY_DIR}/${LIB_INSTALL_DIR}")
set(PCL_OUTPUT_BIN_DIR "${PCL_BINARY_DIR}/${BIN_INSTALL_DIR}")
make_directory("${PCL_OUTPUT_LIB_DIR}")
make_directory("${PCL_OUTPUT_BIN_DIR}")
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PCL_OUTPUT_LIB_DIR}")
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${PCL_OUTPUT_BIN_DIR}")
if(WIN32)
+ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${PCL_OUTPUT_BIN_DIR}")
foreach(config ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER ${config} CONFIG)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${CONFIG} "${PCL_OUTPUT_LIB_DIR}")
# ---[ Windows requires DLLs (shared libraries) to be installed in the same directory as executables
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${CONFIG} "${PCL_OUTPUT_BIN_DIR}")
endforeach(config)
-else(WIN32)
- set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PCL_OUTPUT_LIB_DIR}")
- set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${PCL_OUTPUT_BIN_DIR}")
+else()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${PCL_OUTPUT_LIB_DIR}")
-endif(WIN32)
+endif()
# Add an "uninstall" target
configure_file("${PCL_SOURCE_DIR}/cmake/uninstall_target.cmake.in"
if(OPENMP_FOUND)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
- message (STATUS "Found OpenMP")
+ message(STATUS "Found OpenMP")
if(MSVC)
if(MSVC_VERSION EQUAL 1500)
set(OPENMP_DLL VCOMP90)
set(OPENMP_DLL VCOMP120)
elseif(MSVC_VERSION EQUAL 1900)
set(OPENMP_DLL VCOMP140)
- elseif(MSVC_VERSION EQUAL 1910)
+ elseif(MSVC_VERSION MATCHES "^191[0-9]$")
set(OPENMP_DLL VCOMP140)
endif()
if(OPENMP_DLL)
set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} /DELAYLOAD:${OPENMP_DLL}D.dll")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DELAYLOAD:${OPENMP_DLL}.dll")
- else(OPENMP_DLL)
+ else()
message(WARNING "Delay loading flag for OpenMP DLL is invalid.")
- endif(OPENMP_DLL)
+ endif()
endif(MSVC)
-else(OPENMP_FOUND)
- message (STATUS "Not found OpenMP")
+else()
+ message(STATUS "Not found OpenMP")
endif()
# Eigen (required)
# FLANN (required)
if(NOT PCL_SHARED_LIBS OR ((WIN32 AND NOT MINGW) AND NOT PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32))
set(FLANN_USE_STATIC ON)
-endif(NOT PCL_SHARED_LIBS OR ((WIN32 AND NOT MINGW) AND NOT PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32))
+endif()
find_package(FLANN 1.7.0 REQUIRED)
include_directories(${FLANN_INCLUDE_DIRS})
find_package(libusb-1.0)
if(LIBUSB_1_FOUND)
include_directories("${LIBUSB_1_INCLUDE_DIR}")
- endif(LIBUSB_1_FOUND)
-endif(WITH_LIBUSB)
+ endif()
+endif()
# Dependencies for different grabbers
PCL_ADD_GRABBER_DEPENDENCY("OpenNI" "OpenNI grabber support")
PCL_ADD_GRABBER_DEPENDENCY("RSSDK" "RealSense SDK support")
# metslib
-if (PKG_CONFIG_FOUND)
+if(PKG_CONFIG_FOUND)
pkg_check_modules(METSLIB metslib)
- if (METSLIB_FOUND)
- set (HAVE_METSLIB ON)
+ if(METSLIB_FOUND)
+ set(HAVE_METSLIB ON)
include_directories(${METSLIB_INCLUDE_DIRS})
else()
include_directories("${PCL_SOURCE_DIR}/recognition/include/pcl/recognition/3rdparty/")
option(WITH_PNG "PNG file support" TRUE)
if(WITH_PNG)
find_package(PNG)
- if (PNG_FOUND)
- set (HAVE_PNG ON)
+ if(PNG_FOUND)
+ set(HAVE_PNG ON)
include_directories("${PNG_INCLUDE_DIR}")
- endif(PNG_FOUND)
-endif(WITH_PNG)
+ endif()
+endif()
# Qhull
option(WITH_QHULL "Include convex-hull operations" TRUE)
if(WITH_QHULL)
- if(NOT PCL_SHARED_LIBS OR WIN32)
+ if(NOT PCL_SHARED_LIBS OR ((WIN32 AND NOT MINGW) AND NOT PCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32))
set(QHULL_USE_STATIC ON)
- endif(NOT PCL_SHARED_LIBS OR WIN32)
+ endif()
find_package(Qhull)
if(QHULL_FOUND)
include_directories(${QHULL_INCLUDE_DIRS})
- endif(QHULL_FOUND)
-endif(WITH_QHULL)
+ endif()
+endif()
# Cuda
option(WITH_CUDA "Build NVIDIA-CUDA support" TRUE)
if(WITH_CUDA)
include("${PCL_SOURCE_DIR}/cmake/pcl_find_cuda.cmake")
-endif(WITH_CUDA)
+endif()
option(WITH_QT "Build QT Front-End" TRUE)
if(WITH_QT)
set(PCL_QT_VERSION 5 CACHE STRING "Which QT version to use")
if("${PCL_QT_VERSION}" STREQUAL "4")
find_package(Qt4)
- if (QT4_FOUND)
+ if(QT4_FOUND)
include("${QT_USE_FILE}")
- endif (QT4_FOUND)
+ endif()
elseif("${PCL_QT_VERSION}" STREQUAL "5")
include(cmake/pcl_find_qt5.cmake)
else()
message(SEND_ERROR "PCL_QT_VERSION must be 4 or 5")
endif()
-endif(WITH_QT)
+endif()
# Find VTK
option(WITH_VTK "Build VTK-Visualizations" TRUE)
set(VTK_RENDERING_BACKEND "OpenGL")
endif()
message(STATUS "VTK_MAJOR_VERSION ${VTK_MAJOR_VERSION}, rendering backend: ${VTK_RENDERING_BACKEND}")
- if (PCL_SHARED_LIBS OR
- (NOT (PCL_SHARED_LIBS) AND NOT (VTK_BUILD_SHARED_LIBS)))
+ if(PCL_SHARED_LIBS OR (NOT (PCL_SHARED_LIBS) AND NOT (VTK_BUILD_SHARED_LIBS)))
set(VTK_FOUND TRUE)
- find_package (QVTK)
- if (${VTK_MAJOR_VERSION} VERSION_LESS "6.0")
+ find_package(QVTK)
+ if(${VTK_MAJOR_VERSION} VERSION_LESS "6.0")
message(STATUS "VTK found (include: ${VTK_INCLUDE_DIRS}, lib: ${VTK_LIBRARY_DIRS})")
link_directories(${VTK_LIBRARY_DIRS})
- else(${VTK_MAJOR_VERSION} VERSION_LESS "6.0")
- include (${VTK_USE_FILE})
+ else()
+ include(${VTK_USE_FILE})
message(STATUS "VTK found (include: ${VTK_INCLUDE_DIRS}, lib: ${VTK_LIBRARIES}")
- endif (${VTK_MAJOR_VERSION} VERSION_LESS "6.0")
- if (APPLE)
- option (VTK_USE_COCOA "Use Cocoa for VTK render windows" ON)
- MARK_AS_ADVANCED (VTK_USE_COCOA)
- endif (APPLE)
+ endif()
+ if(APPLE)
+ option(VTK_USE_COCOA "Use Cocoa for VTK render windows" ON)
+ MARK_AS_ADVANCED(VTK_USE_COCOA)
+ endif()
if(${VTK_RENDERING_BACKEND} STREQUAL "OpenGL")
set(VTK_RENDERING_BACKEND_OPENGL_VERSION "1")
elseif(${VTK_RENDERING_BACKEND} STREQUAL "OpenGL2")
set(VTK_RENDERING_BACKEND_OPENGL_VERSION "2")
endif()
set(HAVE_VTK ON)
- else ()
+ else()
set(VTK_FOUND OFF)
set(HAVE_VTK OFF)
- message ("Warning: You are to build PCL in STATIC but VTK is SHARED!")
- message ("Warning: VTK disabled!")
- endif ()
- endif(VTK_FOUND)
-else(WITH_VTK AND NOT ANDROID)
+ message("Warning: You are to build PCL in STATIC but VTK is SHARED!")
+ message("Warning: VTK disabled!")
+ endif()
+ endif()
+else()
set(VTK_FOUND OFF)
set(HAVE_VTK OFF)
-endif(WITH_VTK AND NOT ANDROID)
+endif()
#Find PCAP
option(WITH_PCAP "pcap file capabilities in Velodyne HDL driver" TRUE)
if(WITH_PCAP)
find_package(Pcap)
-endif(WITH_PCAP)
+endif()
# OpenGL and GLUT
option(WITH_OPENGL "Support for OpenGL" TRUE)
if(WITH_OPENGL)
include("${PCL_SOURCE_DIR}/cmake/pcl_find_gl.cmake")
-endif(WITH_OPENGL)
+endif()
# Boost (required)
include("${PCL_SOURCE_DIR}/cmake/pcl_find_boost.cmake")
PCL_MAKE_CPACK_INPUT()
set(CPACK_PROJECT_CONFIG_FILE "${PCL_CPACK_CFG_FILE}")
include(CPack)
-endif(CPACK_GENERATOR)
+endif()
### ---[ Make a pretty picture of the dependency graph
include("${PCL_SOURCE_DIR}/cmake/dep_graph.cmake")
MAKE_DEP_GRAPH()
# ------------------------------------------------------------------------------------
# Helper to use PCL from outside project
#
-# target_link_libraries(my_fabulous_target PCL_XXX_LIBRARIES) where XXX is the
-# upper cased xxx from :
+# target_link_libraries(my_fabulous_target PCL_XXX_LIBRARIES) where XXX is the
+# upper cased xxx from :
# @PCLCONFIG_AVAILABLE_COMPONENTS_LIST@
#
# PCL_INCLUDE_DIRS is filled with PCL and available 3rdparty headers
# PCL_LIBRARY_DIRS is filled with PCL components libraries install directory and
# 3rdparty libraries paths
-#
+#
# www.pointclouds.org
#------------------------------------------------------------------------------------
+if(POLICY CMP0074)
+ # TODO: update *_ROOT variables to be PCL_*_ROOT or equivalent.
+ # CMP0074 directly affects how Find* modules work and *_ROOT variables. Since
+ # this is a config file that will be consumed by parent projects with (likely)
+ # NEW behavior, we need to push a policy stack.
+ cmake_policy(PUSH)
+ cmake_policy(SET CMP0074 OLD)
+endif()
+
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/Modules")
+
### ---[ some useful macros
macro(pcl_report_not_found _reason)
unset(PCL_FOUND)
unset(PCL_LIBRARIES)
+ unset(PCL_COMPONENTS)
unset(PCL_INCLUDE_DIRS)
unset(PCL_LIBRARY_DIRS)
unset(PCL_DEFINITIONS)
endif(NOT PCL_FIND_QUIETLY)
endmacro(pcl_message)
-# Remove duplicate libraries
+# Remove duplicate libraries
macro(pcl_remove_duplicate_libraries _unfiltered_libraries _filtered_libraries)
set(${_filtered_libraries})
set(_debug_libraries)
else(${CMAKE_VERSION} VERSION_LESS 2.8.5)
set(Boost_ADDITIONAL_VERSIONS
"@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@.@Boost_SUBMINOR_VERSION@" "@Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@"
+ "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65"
"1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
"1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55"
"1.54.0" "1.54" "1.53.0" "1.53" "1.52.0" "1.52" "1.51.0" "1.51"
elseif(NOT EIGEN_ROOT)
get_filename_component(EIGEN_ROOT "@EIGEN_INCLUDE_DIRS@" ABSOLUTE)
endif(PCL_ALL_IN_ONE_INSTALLER)
- if(PKG_CONFIG_FOUND)
- pkg_check_modules(PC_EIGEN eigen3)
- endif(PKG_CONFIG_FOUND)
- find_path(EIGEN_INCLUDE_DIRS Eigen/Core
- HINTS ${PC_EIGEN_INCLUDEDIR} ${PC_EIGEN_INCLUDE_DIRS}
- "${EIGEN_ROOT}" "$ENV{EIGEN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/Eigen3" "$ENV{PROGRAMW6432}/Eigen3"
- "$ENV{PROGRAMFILES}/Eigen" "$ENV{PROGRAMW6432}/Eigen"
- PATH_SUFFIXES eigen3 include/eigen3 include)
- find_package_handle_standard_args(eigen DEFAULT_MSG EIGEN_INCLUDE_DIRS)
+ find_package(Eigen)
set(EIGEN_DEFINITIONS ${EIGEN_DEFINITIONS})
endmacro(find_eigen)
elseif(NOT QHULL_ROOT)
get_filename_component(QHULL_ROOT "@QHULL_INCLUDE_DIRS@" PATH)
endif(PCL_ALL_IN_ONE_INSTALLER)
- set(QHULL_MAJOR_VERSION 6)
-
- find_path(QHULL_INCLUDE_DIRS
- NAMES libqhull/libqhull.h qhull.h
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/qhull" "$ENV{PROGRAMW6432}/qhull"
- PATH_SUFFIXES qhull src/libqhull libqhull include)
-
- # Most likely we are on windows so prefer static libraries over shared ones (Mourad's recommend)
- find_library(QHULL_LIBRARY
- NAMES "@QHULL_LIBRARY_NAME@"
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/qhull" "$ENV{PROGRAMW6432}/qhull"
- PATH_SUFFIXES project build bin lib)
-
- find_library(QHULL_LIBRARY_DEBUG
- NAMES "@QHULL_LIBRARY_DEBUG_NAME@"
- HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/qhull" "$ENV{PROGRAMW6432}/qhull"
- PATH_SUFFIXES project build bin lib)
-
- find_package_handle_standard_args(qhull DEFAULT_MSG QHULL_LIBRARY QHULL_INCLUDE_DIRS)
-
- if(QHULL_FOUND)
- get_filename_component(QHULL_LIBRARY_PATH ${QHULL_LIBRARY} PATH)
- if(QHULL_LIBRARY_DEBUG)
- set(QHULL_LIBRARIES optimized ${QHULL_LIBRARY} debug ${QHULL_LIBRARY_DEBUG})
- get_filename_component(QHULL_LIBRARY_DEBUG_PATH ${QHULL_LIBRARY_DEBUG} PATH)
- set(QHULL_LIBRARY_DIRS ${QHULL_LIBRARY_PATH} ${QHULL_LIBRARY_DEBUG_PATH})
- else(QHULL_LIBRARY_DEBUG)
- set(QHULL_LIBRARIES ${QHULL_LIBRARY})
- set(QHULL_LIBRARY_DIRS ${QHULL_LIBRARY_PATH})
- endif(QHULL_LIBRARY_DEBUG)
- set(QHULL_DEFINITIONS)
- get_filename_component(qhull_lib ${QHULL_LIBRARY} NAME_WE)
- if(NOT "${qhull_lib}" MATCHES "qhullstatic")
- set(QHULL_DEFINITIONS ${QHULL_DEFINITIONS} -Dqh_QHpointer)
- if(MSVC)
- set(QHULL_DEFINITIONS ${QHULL_DEFINITIONS} -Dqh_QHpointer_dllimport)
- endif(MSVC)
- endif(NOT "${qhull_lib}" MATCHES "qhullstatic")
- endif(QHULL_FOUND)
+
+ set(QHULL_USE_STATIC @QHULL_USE_STATIC@)
+ find_package(Qhull)
endmacro(find_qhull)
#remove this as soon as libopenni is shipped with FindOpenni.cmake
macro(find_openni)
+ if(PCL_FIND_QUIETLY)
+ set(OpenNI_FIND_QUIETLY TRUE)
+ endif()
+
if(NOT OPENNI_ROOT AND ("@HAVE_OPENNI@" STREQUAL "TRUE"))
set(OPENNI_INCLUDE_DIRS_HINT "@OPENNI_INCLUDE_DIRS@")
get_filename_component(OPENNI_LIBRARY_HINT "@OPENNI_LIBRARY@" PATH)
endif()
- if(PKG_CONFIG_FOUND)
- pkg_check_modules(PC_OPENNI libopenni)
- endif(PKG_CONFIG_FOUND)
- find_path(OPENNI_INCLUDE_DIRS XnStatus.h
- HINTS ${PC_OPENNI_INCLUDEDIR} ${PC_OPENNI_INCLUDE_DIRS}
- "${OPENNI_ROOT}" "$ENV{OPENNI_ROOT}"
- PATHS "$ENV{OPEN_NI_INCLUDE}" "${OPENNI_INCLUDE_DIRS_HINT}"
- PATH_SUFFIXES include/openni Include)
- #add a hint so that it can find it without the pkg-config
- set(OPENNI_SUFFIX)
- if(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
- set(OPENNI_SUFFIX 64)
- endif(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
- find_library(OPENNI_LIBRARY
- NAMES OpenNI64 OpenNI
- HINTS ${PC_OPENNI_LIBDIR} ${PC_OPENNI_LIBRARY_DIRS}
- "${OPENNI_ROOT}" "$ENV{OPENNI_ROOT}"
- PATHS "$ENV{OPEN_NI_LIB${OPENNI_SUFFIX}}" "${OPENNI_LIBRARY_HINT}"
- PATH_SUFFIXES lib Lib Lib64)
-
- find_package_handle_standard_args(openni DEFAULT_MSG OPENNI_LIBRARY OPENNI_INCLUDE_DIRS)
-
- if(OPENNI_FOUND)
- get_filename_component(OPENNI_LIBRARY_PATH ${OPENNI_LIBRARY} PATH)
- set(OPENNI_LIBRARY_DIRS ${OPENNI_LIBRARY_PATH})
- set(OPENNI_LIBRARIES "${OPENNI_LIBRARY}")
- endif(OPENNI_FOUND)
+ find_package(OpenNI)
endmacro(find_openni)
#remove this as soon as libopenni2 is shipped with FindOpenni2.cmake
get_filename_component(OPENNI2_LIBRARY_HINT "@OPENNI2_LIBRARY@" PATH)
endif()
- set(OPENNI2_SUFFIX)
- if(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
- set(OPENNI2_SUFFIX 64)
- endif(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
-
- if(PKG_CONFIG_FOUND)
- if(PCL_FIND_QUIETLY)
- pkg_check_modules(PC_OPENNI2 QUIET libopenni2)
- else()
- pkg_check_modules(PC_OPENNI2 libopenni2)
- endif()
- endif(PKG_CONFIG_FOUND)
-
- find_path(OPENNI2_INCLUDE_DIRS OpenNI.h
- HINTS /usr/include/openni2 /usr/include/ni2 /usr/local/include/openni2 /usr/local/include/ni2
- PATHS "$ENV{OPENNI2_INCLUDE${OPENNI2_SUFFIX}}" "${OPENNI2_INCLUDE_DIRS_HINT}"
- PATH_SUFFIXES openni openni2 include Include)
-
- find_library(OPENNI2_LIBRARY
- NAMES OpenNI2 # No suffix needed on Win64
- HINTS /usr/lib /usr/local/lib/ni2
- PATHS "$ENV{OPENNI2_LIB${OPENNI2_SUFFIX}}" "${OPENNI2_LIBRARY_HINT}"
- PATH_SUFFIXES lib Lib Lib64)
-
- include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(OpenNI2 DEFAULT_MSG OPENNI2_LIBRARY OPENNI2_INCLUDE_DIRS)
-
- if(OPENNI2_FOUND)
- get_filename_component(OPENNI2_LIBRARY_PATH ${OPENNI2_LIBRARY} PATH)
- set(OPENNI2_LIBRARY_DIRS ${OPENNI2_LIBRARY_PATH})
- set(OPENNI2_LIBRARIES "${OPENNI2_LIBRARY}")
- set(OPENNI2_REDIST_DIR $ENV{OPENNI2_REDIST${OPENNI2_SUFFIX}})
- endif(OPENNI2_FOUND)
+ find_package(OpenNI2)
endmacro(find_openni2)
#remove this as soon as the Ensenso SDK is shipped with FindEnsenso.cmake
get_filename_component(ENSENSO_ABI_HINT "@ENSENSO_INCLUDE_DIR@" PATH)
endif()
- find_path(ENSENSO_INCLUDE_DIR nxLib.h
- HINTS ${ENSENSO_ABI_HINT}
- /opt/ensenso/development/c
- "$ENV{PROGRAMFILES}/Ensenso/development/c" "$ENV{PROGRAMW6432}/Ensenso/development/c"
- PATH_SUFFIXES include/)
-
- find_library(ENSENSO_LIBRARY QUIET NAMES NxLib64 NxLib32 nxLib64 nxLib32
- HINTS ${ENSENSO_ABI_HINT}
- "$ENV{PROGRAMFILES}/Ensenso/development/c" "$ENV{PROGRAMW6432}/Ensenso/development/c"
- PATH_SUFFIXES lib/)
-
- include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(ensenso DEFAULT_MSG
- ENSENSO_LIBRARY ENSENSO_INCLUDE_DIR)
-
- if(ENSENSO_FOUND)
- get_filename_component(ENSENSO_LIBRARY_PATH ${ENSENSO_LIBRARY} PATH)
- set(ENSENSO_INCLUDE_DIRS ${ENSENSO_INCLUDE_DIR})
- set(ENSENSO_LIBRARY_DIRS ${ENSENSO_LIBRARY_PATH})
- set(ENSENSO_LIBRARIES "${ENSENSO_LIBRARY}")
- set(ENSENSO_REDIST_DIR $ENV{ENSENSO_REDIST${ENSENSO_SUFFIX}})
- endif(ENSENSO_FOUND)
+ find_package(Ensenso)
endmacro(find_ensenso)
#remove this as soon as the davidSDK is shipped with FinddavidSDK.cmake
get_filename_component(DAVIDSDK_ABI_HINT @DAVIDSDK_INCLUDE_DIR@ PATH)
endif()
- find_path(DAVIDSDK_INCLUDE_DIR david.h
- HINTS ${DAVIDSDK_ABI_HINT}
- /usr/local/include/davidSDK
- "$ENV{PROGRAMFILES}/davidSDK" "$ENV{PROGRAMW6432}/davidSDK"
- PATH_SUFFIXES include/)
-
- find_library(DAVIDSDK_LIBRARY QUIET NAMES davidSDK
- HINTS ${DAVIDSDK_ABI_HINT}
- "$ENV{PROGRAMFILES}/davidSDK" "$ENV{PROGRAMW6432}/davidSDK"
- PATH_SUFFIXES lib/)
-
- include(FindPackageHandleStandardArgs)
- find_package_handle_standard_args(DAVIDSDK DEFAULT_MSG
- DAVIDSDK_LIBRARY DAVIDSDK_INCLUDE_DIR)
-
- if(DAVIDSDK_FOUND)
- get_filename_component(DAVIDSDK_LIBRARY_PATH ${DAVIDSDK_LIBRARY} PATH)
- set(DAVIDSDK_INCLUDE_DIRS ${DAVIDSDK_INCLUDE_DIR})
- set(DAVIDSDK_LIBRARY_DIRS ${DAVIDSDK_LIBRARY_PATH})
- set(DAVIDSDK_LIBRARIES "${DAVIDSDK_LIBRARY}")
- set(DAVIDSDK_REDIST_DIR $ENV{DAVIDSDK_REDIST${DAVIDSDK_SUFFIX}})
- endif(DAVIDSDK_FOUND)
+ find_package(davidSDK)
endmacro(find_davidSDK)
macro(find_dssdk)
if(NOT DSSDK_DIR AND ("@HAVE_DSSDK@" STREQUAL "TRUE"))
get_filename_component(DSSDK_DIR_HINT "@DSSDK_INCLUDE_DIRS@" PATH)
endif()
- find_path(DSSDK_DIR include/DepthSenseVersion.hxx
- HINTS ${DSSDK_DIR_HINT}
- "$ENV{DEPTHSENSESDK32}"
- "$ENV{DEPTHSENSESDK64}"
- PATHS "$ENV{PROGRAMFILES}/SoftKinetic/DepthSenseSDK"
- "$ENV{PROGRAMW6432}/SoftKinetic/DepthSenseSDK"
- "C:/Program Files (x86)/SoftKinetic/DepthSenseSDK"
- "C:/Program Files/SoftKinetic/DepthSenseSDK"
- "/opt/softkinetic/DepthSenseSDK"
- DOC "DepthSense SDK directory")
- set(_DSSDK_INCLUDE_DIRS ${DSSDK_DIR}/include)
- if(MSVC)
- set(DSSDK_LIBRARIES_NAMES DepthSense)
- else()
- set(DSSDK_LIBRARIES_NAMES DepthSense DepthSensePlugins turbojpeg)
- endif()
- foreach(LIB ${DSSDK_LIBRARIES_NAMES})
- find_library(DSSDK_LIBRARY_${LIB}
- NAMES "${LIB}"
- PATHS "${DSSDK_DIR}/lib/" NO_DEFAULT_PATH)
- list(APPEND _DSSDK_LIBRARIES ${DSSDK_LIBRARY_${LIB}})
- mark_as_advanced(DSSDK_LIBRARY_${LIB})
- endforeach()
- find_package_handle_standard_args(DSSDK DEFAULT_MSG _DSSDK_LIBRARIES _DSSDK_INCLUDE_DIRS)
- if(DSSDK_FOUND)
- set(DSSDK_LIBRARIES ${_DSSDK_LIBRARIES})
- mark_as_advanced(DSSDK_LIBRARIES)
- set(DSSDK_INCLUDE_DIRS ${_DSSDK_INCLUDE_DIRS})
- mark_as_advanced(DSSDK_INCLUDE_DIRS)
- endif()
+
+ find_package(DSSDK)
endmacro(find_dssdk)
macro(find_rssdk)
if(NOT RSSDK_DIR AND ("@HAVE_RSSDK@" STREQUAL "TRUE"))
get_filename_component(RSSDK_DIR_HINT "@RSSDK_INCLUDE_DIRS@" PATH)
endif()
- find_path(RSSDK_DIR include/pxcversion.h
- HINTS ${RSSDK_DIR_HINT}
- PATHS "$ENV{RSSDK_DIR}"
- "$ENV{PROGRAMFILES}/Intel/RSSDK"
- "$ENV{PROGRAMW6432}/Intel/RSSDK"
- "C:/Program Files (x86)/Intel/RSSDK"
- "C:/Program Files/Intel/RSSDK"
- DOC "RealSense SDK directory")
- set(_RSSDK_INCLUDE_DIRS ${RSSDK_DIR}/include)
- set(RSSDK_RELEASE_NAME libpxc.lib)
- set(RSSDK_DEBUG_NAME libpxc_d.lib)
- set(RSSDK_SUFFIX Win32)
- if(CMAKE_SIZEOF_VOID_P EQUAL 8)
- set(RSSDK_SUFFIX x64)
- endif()
- find_library(RSSDK_LIBRARY
- NAMES ${RSSDK_RELEASE_NAME}
- PATHS "${RSSDK_DIR}/lib/" NO_DEFAULT_PATH
- PATH_SUFFIXES ${RSSDK_SUFFIX})
- find_library(RSSDK_LIBRARY_DEBUG
- NAMES ${RSSDK_DEBUG_NAME} ${RSSDK_RELEASE_NAME}
- PATHS "${RSSDK_DIR}/lib/" NO_DEFAULT_PATH
- PATH_SUFFIXES ${RSSDK_SUFFIX})
- if(NOT RSSDK_LIBRARY_DEBUG)
- set(RSSDK_LIBRARY_DEBUG ${RSSDK_LIBRARY})
- endif()
- set(_RSSDK_LIBRARIES optimized ${RSSDK_LIBRARY} debug ${RSSDK_LIBRARY_DEBUG})
- mark_as_advanced(RSSDK_LIBRARY RSSDK_LIBRARY_DEBUG)
- find_package_handle_standard_args(RSSDK DEFAULT_MSG _RSSDK_LIBRARIES _RSSDK_INCLUDE_DIRS)
- if(RSSDK_FOUND)
- set(RSSDK_LIBRARIES ${_RSSDK_LIBRARIES})
- mark_as_advanced(RSSDK_LIBRARIES)
- set(RSSDK_INCLUDE_DIRS ${_RSSDK_INCLUDE_DIRS})
- mark_as_advanced(RSSDK_INCLUDE_DIRS)
- endif()
+
+ find_package(RSSDK)
endmacro(find_rssdk)
#remove this as soon as flann is shipped with FindFlann.cmake
elseif(NOT FLANN_ROOT)
get_filename_component(FLANN_ROOT "@FLANN_INCLUDE_DIRS@" PATH)
endif(PCL_ALL_IN_ONE_INSTALLER)
- if(PKG_CONFIG_FOUND)
- pkg_check_modules(PC_FLANN flann)
- endif(PKG_CONFIG_FOUND)
-
- find_path(FLANN_INCLUDE_DIRS flann/flann.hpp
- HINTS ${PC_FLANN_INCLUDEDIR} ${PC_FLANN_INCLUDE_DIRS}
- "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/flann 1.6.9" "$ENV{PROGRAMW6432}/flann 1.6.9"
- "$ENV{PROGRAMFILES}/flann" "$ENV{PROGRAMW6432}/flann"
- PATH_SUFFIXES include)
-
- find_library(FLANN_LIBRARY
- NAMES flann_cpp_s flann_cpp
- HINTS ${PC_FLANN_LIBDIR} ${PC_FLANN_LIBRARY_DIRS} "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/flann 1.6.9" "$ENV{PROGRAMW6432}/flann 1.6.9"
- "$ENV{PROGRAMFILES}/flann" "$ENV{PROGRAMW6432}/flann"
- PATH_SUFFIXES lib)
-
- find_library(FLANN_LIBRARY_DEBUG
- NAMES flann_cpp_s-gd flann_cpp-gd flann_cpp_s flann_cpp
- HINTS ${PC_FLANN_LIBDIR} ${PC_FLANN_LIBRARY_DIRS} "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/flann 1.6.9" "$ENV{PROGRAMW6432}/flann 1.6.9"
- "$ENV{PROGRAMFILES}/flann" "$ENV{PROGRAMW6432}/flann"
- PATH_SUFFIXES lib)
-
- find_package_handle_standard_args(Flann DEFAULT_MSG FLANN_LIBRARY FLANN_INCLUDE_DIRS)
- if(FLANN_FOUND)
- get_filename_component(FLANN_LIBRARY_PATH ${FLANN_LIBRARY} PATH)
- if(FLANN_LIBRARY_DEBUG)
- get_filename_component(FLANN_LIBRARY_DEBUG_PATH ${FLANN_LIBRARY_DEBUG} PATH)
- set(FLANN_LIBRARY_DIRS ${FLANN_LIBRARY_PATH} ${FLANN_LIBRARY_DEBUG_PATH})
- set(FLANN_LIBRARIES optimized ${FLANN_LIBRARY} debug ${FLANN_LIBRARY_DEBUG})
- else(FLANN_LIBRARY_DEBUG)
- set(FLANN_LIBRARY_DIRS ${FLANN_LIBRARY_PATH})
- set(FLANN_LIBRARIES ${FLANN_LIBRARY})
- endif(FLANN_LIBRARY_DEBUG)
- if("${FLANN_LIBRARY}" MATCHES "flann_cpp_s")
- set(FLANN_DEFINITIONS ${FLANN_DEFINITIONS} -DFLANN_STATIC)
- endif("${FLANN_LIBRARY}" MATCHES "flann_cpp_s")
- endif(FLANN_FOUND)
+
+ set(FLANN_USE_STATIC @FLANN_USE_STATIC@)
+ find_package(FLANN)
endmacro(find_flann)
macro(find_VTK)
endmacro(find_libusb)
macro(find_glew)
-IF (WIN32)
-
- IF(CYGWIN)
-
- FIND_PATH( GLEW_INCLUDE_DIR GL/glew.h)
-
- FIND_LIBRARY( GLEW_GLEW_LIBRARY glew32
- ${OPENGL_LIBRARY_DIR}
- /usr/lib/w32api
- /usr/X11R6/lib
- )
-
-
- ELSE(CYGWIN)
-
- FIND_PATH( GLEW_INCLUDE_DIR GL/glew.h
- $ENV{GLEW_ROOT}/include
- )
-
- FIND_LIBRARY( GLEW_GLEW_LIBRARY
- NAMES glew glew32 glew32s
- PATHS
- $ENV{GLEW_ROOT}/lib
- ${OPENGL_LIBRARY_DIR}
- )
-
- ENDIF(CYGWIN)
-
-ELSE (WIN32)
-
- IF (APPLE)
-
- FIND_PATH( GLEW_INCLUDE_DIR GL/glew.h
- /usr/local/include
- /System/Library/Frameworks/GLEW.framework/Versions/A/Headers
- ${OPENGL_LIBRARY_DIR}
- )
-
- FIND_LIBRARY( GLEW_GLEW_LIBRARY GLEW
- /usr/local/lib
- /usr/openwin/lib
- /usr/X11R6/lib
- )
-
- if(NOT GLEW_GLEW_LIBRARY)
- SET(GLEW_GLEW_LIBRARY "-framework GLEW" CACHE STRING "GLEW library for OSX")
- SET(GLEW_cocoa_LIBRARY "-framework Cocoa" CACHE STRING "Cocoa framework for OSX")
- endif()
- ELSE (APPLE)
-
- FIND_PATH( GLEW_INCLUDE_DIR GL/glew.h
- /usr/include/GL
- /usr/openwin/share/include
- /usr/openwin/include
- /usr/X11R6/include
- /usr/include/X11
- /opt/graphics/OpenGL/include
- /opt/graphics/OpenGL/contrib/libglew
- )
-
- FIND_LIBRARY( GLEW_GLEW_LIBRARY GLEW
- /usr/openwin/lib
- /usr/X11R6/lib
- )
-
- ENDIF (APPLE)
-
-ENDIF (WIN32)
-
-SET( GLEW_FOUND FALSE )
-IF(GLEW_INCLUDE_DIR)
- IF(GLEW_GLEW_LIBRARY)
- # Is -lXi and -lXmu required on all platforms that have it?
- # If not, we need some way to figure out what platform we are on.
- SET( GLEW_LIBRARIES
- ${GLEW_GLEW_LIBRARY}
- ${GLEW_cocoa_LIBRARY}
- )
- SET( GLEW_FOUND TRUE )
-
-#The following deprecated settings are for backwards compatibility with CMake1.4
- SET (GLEW_LIBRARY ${GLEW_LIBRARIES})
- SET (GLEW_INCLUDE_PATH ${GLEW_INCLUDE_DIR})
-
- ENDIF(GLEW_GLEW_LIBRARY)
-ENDIF(GLEW_INCLUDE_DIR)
-
-IF(GLEW_FOUND)
- IF(NOT GLEW_FIND_QUIETLY)
- MESSAGE(STATUS "Found Glew: ${GLEW_LIBRARIES}")
- ENDIF(NOT GLEW_FIND_QUIETLY)
- IF(GLEW_GLEW_LIBRARY MATCHES glew32s)
- list(APPEND PCL_DEFINITIONS "-DGLEW_STATIC")
- ENDIF(GLEW_GLEW_LIBRARY MATCHES glew32s)
-ELSE(GLEW_FOUND)
- IF(GLEW_FIND_REQUIRED)
- MESSAGE(FATAL_ERROR "Could not find Glew")
- ENDIF(GLEW_FIND_REQUIRED)
-ENDIF(GLEW_FOUND)
+ find_package(GLEW)
endmacro(find_glew)
# Finds each component external libraries if any
# |--> _lib found ==> include the headers,
# | link to its library directories or include _lib_USE_FILE
# `--> _lib not found
-# |--> _lib is optional ==> disable it (thanks to the guardians)
+# |--> _lib is optional ==> disable it (thanks to the guardians)
# | and warn
# `--> _lib is required
-# |--> component is required explicitely ==> error
+# |--> component is required explicitly ==> error
# `--> component is induced ==> warn and remove it
# from the list
set(PCL_LIBRARY_DIRS "${PCL_DIR}/@LIB_INSTALL_DIR@")
set(PCL_SOURCES_TREE "@CMAKE_SOURCE_DIR@")
else(EXISTS "${PCL_ROOT}/include/pcl-${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}/pcl/pcl_config.h")
- pcl_report_not_found("PCL can not be found on this machine")
+ pcl_report_not_found("PCL can not be found on this machine")
endif(EXISTS "${PCL_ROOT}/include/pcl-${PCL_VERSION_MAJOR}.${PCL_VERSION_MINOR}/pcl/pcl_config.h")
#set a suffix for debug libraries
set(PCL_RELEASE_SUFFIX "@CMAKE_RELEASE_POSTFIX@")
#set SSE flags used compiling PCL
-list(APPEND PCL_DEFINITIONS "@PCLCONFIG_SSE_DEFINITIONS@")
+list(APPEND PCL_DEFINITIONS @PCLCONFIG_SSE_DEFINITIONS@)
+list(APPEND PCL_COMPILE_OPTIONS @PCLCONFIG_SSE_COMPILE_OPTIONS@)
set(pcl_all_components @PCLCONFIG_AVAILABLE_COMPONENTS@ )
list(LENGTH pcl_all_components PCL_NB_COMPONENTS)
set(PCL_TO_FIND_COMPONENTS ${pcl_all_components})
set(PCL_FIND_ALL 1)
else(PCL_FIND_COMPONENTS_LENGTH EQUAL PCL_NB_COMPONENTS)
- set(PCL_TO_FIND_COMPONENTS ${PCL_FIND_COMPONENTS})
+ set(PCL_TO_FIND_COMPONENTS ${PCL_FIND_COMPONENTS})
endif(PCL_FIND_COMPONENTS_LENGTH EQUAL PCL_NB_COMPONENTS)
else(PCL_FIND_COMPONENTS)
set(PCL_TO_FIND_COMPONENTS ${pcl_all_components})
compute_dependencies(PCL_TO_FIND_COMPONENTS)
+# We do not need to find components that have been found already, e.g. during previous invocation
+# of find_package(PCL). Filter them out.
+foreach(component ${PCL_TO_FIND_COMPONENTS})
+ string(TOUPPER "${component}" COMPONENT)
+ if(NOT PCL_${COMPONENT}_FOUND)
+ list(APPEND _PCL_TO_FIND_COMPONENTS ${component})
+ endif()
+endforeach()
+set(PCL_TO_FIND_COMPONENTS ${_PCL_TO_FIND_COMPONENTS})
+unset(_PCL_TO_FIND_COMPONENTS)
+
+if(NOT PCL_TO_FIND_COMPONENTS)
+ return()
+endif()
+
# compute external dependencies per component
foreach(component ${PCL_TO_FIND_COMPONENTS})
foreach(opt ${pcl_${component}_opt_dep})
endforeach(opt)
foreach(ext ${pcl_${component}_ext_dep})
find_external_library(${component} ${ext} REQUIRED)
- endforeach(ext)
+ endforeach(ext)
endforeach(component)
foreach(component ${PCL_TO_FIND_COMPONENTS})
pcl_message(STATUS "looking for PCL_${COMPONENT}")
string(REGEX REPLACE "^cuda_(.*)$" "\\1" cuda_component "${component}")
- string(REGEX REPLACE "^gpu_(.*)$" "\\1" gpu_component "${component}")
-
+ string(REGEX REPLACE "^gpu_(.*)$" "\\1" gpu_component "${component}")
+
find_path(PCL_${COMPONENT}_INCLUDE_DIR
NAMES pcl/${component}
pcl/apps/${component}
PATH_SUFFIXES
${component}/include
apps/${component}/include
- cuda/${cuda_component}/include
+ cuda/${cuda_component}/include
gpu/${gpu_component}/include
DOC "path to ${component} headers"
NO_DEFAULT_PATH)
+ mark_as_advanced(PCL_${COMPONENT}_INCLUDE_DIR)
if(PCL_${COMPONENT}_INCLUDE_DIR)
list(APPEND PCL_${COMPONENT}_INCLUDE_DIRS "${PCL_${COMPONENT}_INCLUDE_DIR}")
else(PCL_${COMPONENT}_INCLUDE_DIR)
#pcl_message("No include directory found for pcl_${component}.")
endif(PCL_${COMPONENT}_INCLUDE_DIR)
-
+
# Skip find_library for header only modules
list(FIND pcl_header_only_components ${component} _is_header_only)
if(_is_header_only EQUAL -1)
HINTS ${PCL_LIBRARY_DIRS}
DOC "path to ${pcl_component} library"
NO_DEFAULT_PATH)
- get_filename_component(${component}_library_path
+ get_filename_component(${component}_library_path
${PCL_${COMPONENT}_LIBRARY}
PATH)
+ mark_as_advanced(PCL_${COMPONENT}_LIBRARY)
find_library(PCL_${COMPONENT}_LIBRARY_DEBUG ${pcl_component}${PCL_DEBUG_SUFFIX}
- HINTS ${PCL_LIBRARY_DIRS}
+ HINTS ${PCL_LIBRARY_DIRS}
DOC "path to ${pcl_component} library debug"
NO_DEFAULT_PATH)
+ mark_as_advanced(PCL_${COMPONENT}_LIBRARY_DEBUG)
+
if(PCL_${COMPONENT}_LIBRARY_DEBUG)
- get_filename_component(${component}_library_path_debug
+ get_filename_component(${component}_library_path_debug
${PCL_${COMPONENT}_LIBRARY_DEBUG}
PATH)
endif(PCL_${COMPONENT}_LIBRARY_DEBUG)
PCL_${COMPONENT}_LIBRARY PCL_${COMPONENT}_INCLUDE_DIR)
else(_is_header_only EQUAL -1)
find_package_handle_standard_args(PCL_${COMPONENT} DEFAULT_MSG
- PCL_${COMPONENT}_INCLUDE_DIR)
+ PCL_${COMPONENT}_INCLUDE_DIR)
endif(_is_header_only EQUAL -1)
-
+
if(PCL_${COMPONENT}_FOUND)
if(NOT "${PCL_${COMPONENT}_INCLUDE_DIRS}" STREQUAL "")
- list(REMOVE_DUPLICATES PCL_${COMPONENT}_INCLUDE_DIRS)
+ set(_filtered "")
+ foreach(_inc ${PCL_${COMPONENT}_INCLUDE_DIRS})
+ if(EXISTS ${_inc})
+ list(APPEND _filtered "${_inc}")
+ endif()
+ endforeach()
+ list(REMOVE_DUPLICATES _filtered)
+ set(PCL_${COMPONENT}_INCLUDE_DIRS ${_filtered})
+ list(APPEND PCL_INCLUDE_DIRS ${_filtered})
endif(NOT "${PCL_${COMPONENT}_INCLUDE_DIRS}" STREQUAL "")
- list(APPEND PCL_INCLUDE_DIRS ${PCL_${COMPONENT}_INCLUDE_DIRS})
mark_as_advanced(PCL_${COMPONENT}_INCLUDE_DIRS)
if(_is_header_only EQUAL -1)
list(APPEND PCL_DEFINITIONS ${PCL_${COMPONENT}_DEFINITIONS})
list(APPEND PCL_LIBRARY_DIRS ${component_library_path})
if(PCL_${COMPONENT}_LIBRARY_DEBUG)
- list(APPEND PCL_${COMPONENT}_LIBRARIES optimized ${PCL_${COMPONENT}_LIBRARY} debug ${PCL_${COMPONENT}_LIBRARY_DEBUG})
list(APPEND PCL_LIBRARY_DIRS ${component_library_path_debug})
- else(PCL_${COMPONENT}_LIBRARY_DEBUG)
- list(APPEND PCL_${COMPONENT}_LIBRARIES ${PCL_${COMPONENT}_LIBRARY})
- endif(PCL_${COMPONENT}_LIBRARY_DEBUG)
- list(APPEND PCL_LIBRARIES ${PCL_${COMPONENT}_LIBRARIES})
+ endif()
+ list(APPEND PCL_COMPONENTS ${pcl_component})
mark_as_advanced(PCL_${COMPONENT}_LIBRARY PCL_${COMPONENT}_LIBRARY_DEBUG)
- endif(_is_header_only EQUAL -1)
+ endif()
# Append internal dependencies
foreach(int_dep ${pcl_${component}_int_dep})
string(TOUPPER "${int_dep}" INT_DEP)
if(PCL_${INT_DEP}_FOUND)
list(APPEND PCL_${COMPONENT}_INCLUDE_DIRS ${PCL_${INT_DEP}_INCLUDE_DIRS})
if(PCL_${INT_DEP}_LIBRARIES)
- list(APPEND PCL_${COMPONENT}_LIBRARIES "${PCL_${INT_DEP}_LIBRARIES}")
- endif(PCL_${INT_DEP}_LIBRARIES)
+ list(APPEND PCL_${COMPONENT}_LINK_LIBRARIES "${PCL_${INT_DEP}_LIBRARIES}")
+ endif(PCL_${INT_DEP}_LIBRARIES)
endif(PCL_${INT_DEP}_FOUND)
endforeach(int_dep)
+ if(_is_header_only EQUAL -1)
+ add_library(${pcl_component} @PCL_LIB_TYPE@ IMPORTED)
+ if(PCL_${COMPONENT}_LIBRARY_DEBUG)
+ set_target_properties(${pcl_component}
+ PROPERTIES
+ IMPORTED_CONFIGURATIONS "RELEASE;DEBUG"
+ IMPORTED_LOCATION_RELEASE "${PCL_${COMPONENT}_LIBRARY}"
+ IMPORTED_LOCATION_DEBUG "${PCL_${COMPONENT}_LIBRARY_DEBUG}"
+ IMPORTED_IMPLIB_RELEASE "${PCL_${COMPONENT}_LIBRARY}"
+ IMPORTED_IMPLIB_DEBUG "${PCL_${COMPONENT}_LIBRARY_DEBUG}"
+ )
+ else()
+ set_target_properties(${pcl_component}
+ PROPERTIES
+ IMPORTED_LOCATION "${PCL_${COMPONENT}_LIBRARY}"
+ IMPORTED_IMPLIB "${PCL_${COMPONENT}_LIBRARY}"
+ )
+ endif()
+ foreach(def ${PCL_DEFINITIONS})
+ string(REPLACE " " ";" def2 ${def})
+ string(REGEX REPLACE "^-D" "" def3 "${def2}")
+ list(APPEND definitions ${def3})
+ endforeach()
+ if(CMAKE_VERSION VERSION_LESS 3.3)
+ set_target_properties(${pcl_component}
+ PROPERTIES
+ INTERFACE_COMPILE_DEFINITIONS "${definitions}"
+ INTERFACE_COMPILE_OPTIONS "${PCL_COMPILE_OPTIONS}"
+ INTERFACE_INCLUDE_DIRECTORIES "${PCL_${COMPONENT}_INCLUDE_DIRS}"
+ INTERFACE_LINK_LIBRARIES "${PCL_${COMPONENT}_LINK_LIBRARIES}"
+ )
+ else()
+ set_target_properties(${pcl_component}
+ PROPERTIES
+ INTERFACE_COMPILE_DEFINITIONS "${definitions}"
+ INTERFACE_COMPILE_OPTIONS "$<$<COMPILE_LANGUAGE:CXX>:${PCL_COMPILE_OPTIONS}>"
+ INTERFACE_INCLUDE_DIRECTORIES "${PCL_${COMPONENT}_INCLUDE_DIRS}"
+ INTERFACE_LINK_LIBRARIES "${PCL_${COMPONENT}_LINK_LIBRARIES}"
+ )
+ endif()
+ set(PCL_${COMPONENT}_LIBRARIES ${pcl_component})
+ endif()
endif(PCL_${COMPONENT}_FOUND)
endforeach(component)
list(REMOVE_DUPLICATES PCL_DEFINITIONS)
endif(NOT "${PCL_DEFINITIONS}" STREQUAL "")
-pcl_remove_duplicate_libraries(PCL_LIBRARIES PCL_DEDUP_LIBRARIES)
-set(PCL_LIBRARIES ${PCL_DEDUP_LIBRARIES})
+pcl_remove_duplicate_libraries(PCL_COMPONENTS PCL_LIBRARIES)
+
# Add 3rd party libraries, as user code might include our .HPP implementations
list(APPEND PCL_LIBRARIES ${BOOST_LIBRARIES} ${QHULL_LIBRARIES} ${OPENNI_LIBRARIES} ${OPENNI2_LIBRARIES} ${ENSENSO_LIBRARIES} ${davidSDK_LIBRARIES} ${DSSDK_LIBRARIES} ${RSSDK_LIBRARIES} ${FLANN_LIBRARIES} ${VTK_LIBRARIES})
find_package_handle_standard_args(PCL DEFAULT_MSG PCL_LIBRARIES PCL_INCLUDE_DIRS)
mark_as_advanced(PCL_LIBRARIES PCL_INCLUDE_DIRS PCL_LIBRARY_DIRS)
+if(POLICY CMP0074)
+ cmake_policy(POP)
+endif()
# Point Cloud Library
-<img src="http://ns50.pointclouds.org/assets/images/contents/logos/pcl/pcl_horz_large_pos.png" align="center" height="100">
+<img src="pcl.png" align="center" height="100">
Continuous integration
----------------------
[![Release][release-image]][releases]
[![License][license-image]][license]
-[release-image]: https://img.shields.io/badge/release-1.8.0-green.svg?style=flat
+[release-image]: https://img.shields.io/badge/release-1.9.0-green.svg?style=flat
[releases]: https://github.com/PointCloudLibrary/pcl/releases
[license-image]: https://img.shields.io/badge/license-BSD-green.svg?style=flat
[license]: https://github.com/PointCloudLibrary/pcl/blob/master/LICENSE.txt
-[](https://travis-ci.org/PointCloudLibrary/pcl)
+[](https://travis-ci.com/PointCloudLibrary/pcl)
+[](https://ci.appveyor.com/project/PointCloudLibrary/pcl/branch/master)
Description
-----------
Documentation
-------------
- [Tutorials](http://www.pointclouds.org/documentation/tutorials/)
-- [PCL trunk documentation](http://docs.pointclouds.org/trunk/) (generated 2 times a week)
+- [PCL trunk documentation](http://docs.pointclouds.org/trunk/) (updated daily)
Contributing
------------
------
For general questions on how to use the PCL, please use the [pcl-users](http://www.pcl-users.org/) mailing list (do not forget to subscribe before posting).
To report issues, please read [CONTRIBUTING.md#bug-reports](https://github.com/PointCloudLibrary/pcl/blob/master/CONTRIBUTING.md#bug-reports).
+
+API/ABI Compatibility Report
+------
+For details about API/ABI changes over the timeline please check PCL's page at [ABI Laboratory](https://abi-laboratory.pro/tracker/timeline/pcl/).
if (out->points.size () == 0)
{
- PCL_WARN("NORMAL estimator: Cloud has no points after voxel grid, wont be able to compute normals!\n");
+ PCL_WARN("NORMAL estimator: Cloud has no points after voxel grid, won't be able to compute normals!\n");
return;
}
bool use_cache_;
- std::map<std::pair<std::string, int>, Eigen::Matrix4f, std::less<std::pair<std::string, int> >, Eigen::aligned_allocator<std::pair<std::pair<
- std::string, int>, Eigen::Matrix4f> > > poses_cache_;
+ std::map<std::pair<std::string, int>, Eigen::Matrix4f,
+ std::less<std::pair<std::string, int> >,
+ Eigen::aligned_allocator<std::pair<const std::pair<std::string, int>, Eigen::Matrix4f> > > poses_cache_;
std::map<std::pair<std::string, int>, Eigen::Vector3f > centroids_cache_;
std::vector<int> indices_;
bool use_single_categories_;
bool use_cache_;
- std::map<std::pair<std::string, int>, Eigen::Matrix4f, std::less<std::pair<std::string, int> >, Eigen::aligned_allocator<std::pair<std::pair<
- std::string, int>, Eigen::Matrix4f> > > poses_cache_;
+ std::map<std::pair<std::string, int>, Eigen::Matrix4f,
+ std::less<std::pair<std::string, int> >,
+ Eigen::aligned_allocator<std::pair<const std::pair<std::string, int>, Eigen::Matrix4f> > > poses_cache_;
std::map<std::pair<std::string, int>, Eigen::Vector3f> centroids_cache_;
std::vector<int> indices_;
typedef std::pair<std::string, int> mv_pair;
mv_pair pair_model_view = std::make_pair (model.id_, view_id);
- std::map<mv_pair, Eigen::Matrix4f, std::less<mv_pair>, Eigen::aligned_allocator<std::pair<mv_pair, Eigen::Matrix4f> > >::iterator it =
- poses_cache_.find (pair_model_view);
+ std::map<mv_pair, Eigen::Matrix4f,
+ std::less<mv_pair>,
+ Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f> > >::iterator it = poses_cache_.find (pair_model_view);
if (it != poses_cache_.end ())
{
typedef std::pair<std::string, int> mv_pair;
mv_pair pair_model_view = std::make_pair (model.id_, view_id);
- std::map<mv_pair, Eigen::Matrix4f, std::less<mv_pair>, Eigen::aligned_allocator<std::pair<mv_pair, Eigen::Matrix4f> > >::iterator it =
- poses_cache_.find (pair_model_view);
+ std::map<mv_pair, Eigen::Matrix4f,
+ std::less<mv_pair>,
+ Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f> > >::iterator it = poses_cache_.find (pair_model_view);
if (it != poses_cache_.end ())
{
nearestKSearch (single_categories_index_[it->second], histogram, NN_, indices, distances);
//gather NN-search results
double score = 0;
- for (size_t i = 0; i < NN_; ++i)
+ for (size_t i = 0; i < (size_t) NN_; ++i)
{
score = distances[0][i];
index_score is;
typedef std::pair<std::string, int> mv_pair;
mv_pair pair_model_view = std::make_pair (model.id_, view_id);
- std::map<mv_pair, Eigen::Matrix4f, std::less<mv_pair>, Eigen::aligned_allocator<std::pair<mv_pair, Eigen::Matrix4f> > >::iterator it =
- poses_cache_.find (pair_model_view);
+ std::map<mv_pair, Eigen::Matrix4f,
+ std::less<mv_pair>,
+ Eigen::aligned_allocator<std::pair<const mv_pair, Eigen::Matrix4f> > >::iterator it = poses_cache_.find (pair_model_view);
if (it != poses_cache_.end ())
{
std::vector<int> indices_;
bool use_cache_;
- std::map<std::pair<std::string, int>, Eigen::Matrix4f, std::less<std::pair<std::string, int> >, Eigen::aligned_allocator<std::pair<std::pair<
- std::string, int>, Eigen::Matrix4f> > > poses_cache_;
+ std::map<std::pair<std::string, int>, Eigen::Matrix4f,
+ std::less<std::pair<std::string, int> >,
+ Eigen::aligned_allocator<std::pair<const std::pair<std::string, int>, Eigen::Matrix4f> > > poses_cache_;
std::map<std::pair<std::string, int>, typename pcl::PointCloud<PointInT>::Ptr> keypoints_cache_;
float threshold_accept_model_hypothesis_;
/**
* \brief Initializes the FLANN structure from the provided source
- * It does training for the models that havent been trained yet
+ * It does training for the models that haven't been trained yet
*/
void
*/
template<typename U, typename V>
inline ResultType
- accum_dist (const U& a, const V& b, int dim) const
+ accum_dist (const U& a, const V& b, int) const
{
//printf("New code being used, accum_dist\n");
ResultType min0;
{
std::cout << files[i] << std::endl;
if (scene != -1)
- if (scene != i)
+ if ((size_t) scene != i)
continue;
std::stringstream file;
set(SUBSYS_NAME apps)
set(SUBSYS_DESC "Application examples/samples that show how PCL works")
-set(SUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search recognition ml stereo)
+set(SUBSYS_DEPS common geometry io filters sample_consensus segmentation visualization kdtree features surface octree registration keypoints tracking search recognition ml stereo 2d)
set(DEFAULT FALSE)
PCL_SUBSYS_OPTION(build "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
# OpenGL and GLUT
if(OPENGL_FOUND AND GLUT_FOUND)
- include_directories("${OPENGL_INCLUDE_DIR}")
+ if(NOT WIN32)
+ include_directories("${OPENGL_INCLUDE_DIR}")
+ endif()
include_directories("${GLUT_INCLUDE_DIR}")
PCL_ADD_EXECUTABLE_OPT_BUNDLE(pcl_grabcut_2d "${SUBSYS_NAME}" src/grabcut_2d.cpp)
target_link_libraries (pcl_grabcut_2d pcl_common pcl_io pcl_segmentation pcl_search ${GLUT_LIBRARIES} ${OPENGL_LIBRARIES})
/** \brief Stores the state of the current tree view in item_treestate_map_ */
void
storeTreeState ();
- /** \brief Retores the state of \param model 's view from item_treestate_map_ */
+ /** \brief Restores the state of \param model 's view from item_treestate_map_ */
void
restoreTreeState ();
/** \brief Removes the extra tabs the item might have */
std::vector<pcl::PointIndices> boundary_indices;
mps.segmentAndRefine (regions, model_coefficients, inlier_indices, labels, label_indices, boundary_indices);
- std::vector<bool> plane_labels;
- plane_labels.resize (label_indices.size (), false);
- for (size_t i = 0; i < label_indices.size (); i++)
- {
- if (label_indices[i].indices.size () > min_plane_size)
- {
- plane_labels[i] = true;
- }
- }
+ boost::shared_ptr<std::set<uint32_t> > plane_labels = boost::make_shared<std::set<uint32_t> > ();
+ for (size_t i = 0; i < label_indices.size (); ++i)
+ if (label_indices[i].indices.size () > (size_t) min_plane_size)
+ plane_labels->insert (i);
typename PointCloud<PointT>::CloudVectorType clusters;
- typename EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label>::Ptr euclidean_cluster_comparator = boost::make_shared <EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label> > ();
+ typename EuclideanClusterComparator<PointT, pcl::Label>::Ptr euclidean_cluster_comparator =
+ boost::make_shared <EuclideanClusterComparator<PointT, pcl::Label> > ();
euclidean_cluster_comparator->setInputCloud (input_cloud);
euclidean_cluster_comparator->setLabels (labels);
euclidean_cluster_comparator->setExcludeLabels (plane_labels);
pcl::IndicesPtr extracted_indices (new std::vector<int> ());
for (size_t i = 0; i < euclidean_label_indices.size (); i++)
{
- if (euclidean_label_indices[i].indices.size () >= min_cluster_size)
+ if (euclidean_label_indices[i].indices.size () >= (size_t) min_cluster_size)
{
typename PointCloud<PointT>::Ptr cluster = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
pcl::copyPointCloud (*input_cloud,euclidean_label_indices[i].indices,*cluster);
for (size_t i = 0; i < label_indices.size (); i++)
{
- if (label_indices[i].indices.size () >= min_plane_size)
+ if (label_indices[i].indices.size () >= (size_t) min_plane_size)
{
typename PointCloud<PointT>::Ptr plane = boost::shared_ptr<PointCloud<PointT> > (new PointCloud<PointT>);
pcl::copyPointCloud (*input_cloud,label_indices[i].indices,*plane);
{
qDebug () << "Redo in MergeCloudCommand ";
last_was_undo_ = false;
- //There is only one output_pair, but thats ok
+ //There is only one output_pair, but that's ok
foreach (OutputPair output_pair, output_data_)
{
//Replace the input with the output for this pair
QVariant
pcl::cloud_composer::CloudItem::data (int role) const
{
- // Check if we're trying to get something which is template dependant, if so, create the template if it hasn't been set
+ // Check if we're trying to get something which is template dependent, if so, create the template if it hasn't been set
if ( (role == ItemDataRole::CLOUD_TEMPLATED || role == ItemDataRole::KD_TREE_SEARCH) && !template_cloud_set_)
{
qCritical () << "Attempted to access templated types which are not set!!";
{
- //Create the plotter and QVTKWidget if it doesnt exist
+ //Create the plotter and QVTKWidget if it doesn't exist
if (!plot_)
{
plot_ = boost::shared_ptr<pcl::visualization::PCLPlotter> (new pcl::visualization::PCLPlotter);
{
if (type != PointTypeFlags::NONE)
{
- switch (type)
+ switch ((uint8_t) type)
{
case (PointTypeFlags::XYZ):
return this->performTemplatedAction<pcl::PointXYZ> (input_data);
void
pcl::cloud_composer::SelectionEvent::findIndicesInItem (CloudItem* cloud_item, pcl::PointIndices::Ptr indices)
{
- // WE DONT NEED TO DO THIS SEARCH BECAUSE WE HAVE A 1-1 CORRESPONDENCE VTK TO PCL
+ // WE DON'T NEED TO DO THIS SEARCH BECAUSE WE HAVE A 1-1 CORRESPONDENCE VTK TO PCL
// THIS IS ONLY THE CASE FOR CLOUDS WITH NO NANs
//Go through every point in the selected data set and find the matching index in this cloud
//pcl::search::KdTree<pcl::PointXYZ>::Ptr search = cloud_item->data (KD_TREE_SEARCH).value <pcl::search::Search<pcl::PointXYZ>::Ptr> ();
depth_pixel = static_cast<unsigned short*>(depth_image->GetScalarPointer (depth_dims[0]-1,depth_dims[1]-1,0));
color_pixel = static_cast<unsigned char*> (rgb_image->GetScalarPointer (depth_dims[0]-1,depth_dims[1]-1,0));
- for (int y=0; y<cloud->height; ++y)
+ for (uint32_t y=0; y<cloud->height; ++y)
{
- for (int x=0; x<cloud->width; ++x, --depth_pixel, color_pixel-=3)
+ for (uint32_t x=0; x<cloud->width; ++x, --depth_pixel, color_pixel-=3)
{
PointXYZRGB new_point;
// uint8_t* p_i = &(cloud_blob->data[y * cloud_blob->row_step + x * cloud_blob->point_step]);
for (int i=0; i < to_copy->rowCount (); ++i){
QList <QStandardItem*> new_row;
QStandardItem* parent = to_copy->item(i,0);
- QModelIndex parent_index = to_copy->index(i,0);
qDebug () << "Copying "<<parent->text()<< " cols ="<<to_copy->columnCount ();
new_row.append (parent->clone ());
for (int j=1; j < to_copy->columnCount (); ++j)
{
if (type != PointTypeFlags::NONE)
{
- switch (type)
+ switch ((uint8_t) type)
{
case (PointTypeFlags::XYZ):
return this->performTemplatedAction<pcl::PointXYZ> (input_data);
{
if (type != PointTypeFlags::NONE)
{
- switch (type)
+ switch ((uint8_t) type)
{
case (PointTypeFlags::XYZ):
return this->performTemplatedAction<pcl::PointXYZ> (input_data);
{
if (type != PointTypeFlags::NONE)
{
- switch (type)
+ switch ((uint8_t) type)
{
case (PointTypeFlags::XYZ | PointTypeFlags::RGB):
return this->performTemplatedAction<pcl::PointXYZRGB> (input_data);
// Check the distance threshold
if (squared_distance [0] < squared_distance_threshold)
{
- if (index [0] >= cloud_model_selected->size ())
+ if ((size_t) index [0] >= cloud_model_selected->size ())
{
std::cerr << "ERROR in icp.cpp: Segfault!\n";
std::cerr << " Trying to access index " << index [0] << " >= " << cloud_model_selected->size () << std::endl;
// \ / //
// 5 //
- for (int i=0; i<boundary.size (); ++i)
+ for (size_t i=0; i<boundary.size (); ++i)
{
// The vertices on the boundary
vi_a = mesh.getOriginatingVertexIndex (boundary [i]);
return 1;
else
{
- //compute distance and check aginst max_dist
+ //compute distance and check against max_dist
if ((p1.getVector3fMap () - p2.getVector3fMap ()).norm () <= max_dist)
{
p2.intensity = p1.intensity;
pcl::RGBPlaneCoefficientComparator<PointT, pcl::Normal>::Ptr rgb_comparator_;
pcl::RGBPlaneCoefficientComparator<PointT, pcl::Normal> rgb_comp_;
pcl::EdgeAwarePlaneComparator<PointT, pcl::Normal>::Ptr edge_aware_comparator_;
- pcl::EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label>::Ptr euclidean_cluster_comparator_;
+ pcl::EuclideanClusterComparator<PointT, pcl::Label>::Ptr euclidean_cluster_comparator_;
public Q_SLOTS:
void toggleCapturePressed()
/** \brief Indicate that the timeoutSlot needs to reload the pointcloud */
bool cloud_modified_;
- /** \brief Indicate that files should play continiously */
+ /** \brief Indicate that files should play continuously */
bool play_mode_;
/** \brief In play mode only update if speed_counter_ == speed_value */
unsigned int speed_counter_;
{
namespace apps
{
- /** \brief @b Class to render synthetic views of a 3D mesh using a tesselated sphere
+ /** \brief @b Class to render synthetic views of a 3D mesh using a tessellated sphere
* NOTE: This class should replace renderViewTesselatedSphere from pcl::visualization.
* Some extensions are planned in the near future to this class like removal of duplicated views for
* symmetrical objects, generation of RGB synthetic clouds when RGB available on mesh, etc.
campos_constraints_func_ = bb;
}
- /* \brief Indicates wether to generate organized or unorganized data
+ /* \brief Indicates whether to generate organized or unorganized data
* \param b organized/unorganized
*/
void
resolution_ = res;
}
- /* \brief Wether to use the vertices or triangle centers of the tesselated sphere
+ /* \brief Whether to use the vertices or triangle centers of the tessellated sphere
* \param use true indicates to use vertices, false triangle centers
*/
radius_sphere_ = radius;
}
- /* \brief Wether to compute the entropies (level of occlusions for each view)
+ /* \brief Whether to compute the entropies (level of occlusions for each view)
* \param compute true to compute entropies, false otherwise
*/
void
compute_entropy_ = compute;
}
- /* \brief How many times the icosahedron should be tesselated. Results in more or less camera positions and generated views.
- * \param level amount of tesselation
+ /* \brief How many times the icosahedron should be tessellated. Results in more or less camera positions and generated views.
+ * \param level amount of tessellation
*/
void
setTesselationLevel (int level)
std::map<std::string, Parameter*>::iterator currentParameter = parameter_map_.begin();
size_t currentRow = 0;
- while(currentRow < index.row() && currentParameter != parameter_map_.end()) {
+ while(currentRow < (size_t) index.row() && currentParameter != parameter_map_.end()) {
++ currentParameter;
++ currentRow;
}
virtual void next ();
Q_SIGNALS:
- /** \brief Ommitted when a filter is created. */
+ /** \brief Omitted when a filter is created. */
void filterCreated ();
protected:
# include <OpenGL/gl.h>
# include <OpenGL/glu.h>
#else
+#if _WIN32
+// Need this to pull in APIENTRY, etc.
+#include "windows.h"
+#endif
# include <GL/gl.h>
# include <GL/glu.h>
#endif
/// The internal representation of the cloud
Cloud3D cloud_;
- /// @breif A weak pointer pointing to the selection object.
+ /// @brief A weak pointer pointing to the selection object.
/// @details This implementation uses the weak pointer to allow for a lazy
/// update of the cloud if the selection object is destroyed.
boost::weak_ptr<Selection> selection_wk_ptr_;
/// (false) when displaying the cloud
bool use_color_ramp_;
- /// Flag that indicates whether the cloud should be colored with its own
- /// color
- bool use_native_color_;
-
/// The axis which the color ramp is to be applied when drawing the cloud
Axis color_ramp_axis_;
void
cut ();
- /// @brief Enters the mode where users are able to translate the selecte
+ /// @brief Enters the mode where users are able to translate the selected
/// points.
void
transform ();
class CommandQueue
{
public:
- /// @brief Defaut Constructor
+ /// @brief Default Constructor
/// @details Creates a command queue object and makes its depth limit
/// be the default value.
CommandQueue ();
void
execute (CommandPtr);
- /// @brief Undoes the last command by poping the tail of the queue, invoke
+ /// @brief Undoes the last command by popping the tail of the queue, invoke
/// the undo function of the command.
void
undo ();
- /// @breif Changes the command history limit.
+ /// @brief Changes the command history limit.
/// @details If the passed size is smaller than the current size then the
/// oldest commands are removed (their undo functions are not called).
/// @param size The new maximum number of commands that may exist in this
///
/// @file transformCommand.h
-/// @details a TransfromCommand object provides transformation and undo
+/// @details a TransformCommand object provides transformation and undo
/// functionalities. // XXX - transformation of what?
/// @author Yue Li and Matthew Hielsberg
use_color_ramp_(copy.use_color_ramp_),
color_ramp_axis_(copy.color_ramp_axis_),
display_scale_(copy.display_scale_),
+ partitioned_indices_(copy.partitioned_indices_),
point_size_(copy.point_size_),
selected_point_size_(copy.selected_point_size_),
select_translate_x_(copy.select_translate_x_),
select_translate_y_(copy.select_translate_y_),
- select_translate_z_(copy.select_translate_z_),
- partitioned_indices_(copy.partitioned_indices_)
+ select_translate_z_(copy.select_translate_z_)
{
std::copy(copy.center_xyz_, copy.center_xyz_+XYZ_SIZE, center_xyz_);
std::copy(copy.cloud_matrix_, copy.cloud_matrix_+MATRIX_SIZE, cloud_matrix_);
return;
DenoiseParameterForm form;
form.exec();
+ // check for cancel.
+ if (!form.ok())
+ {
+ return;
+ }
boost::shared_ptr<DenoiseCommand> c(new DenoiseCommand(selection_ptr_,
cloud_ptr_, form.getMeanK(), form.getStdDevThresh()));
command_queue_ptr_->execute(c);
#include <pcl/apps/point_cloud_editor/cloudTransformTool.h>
#include <pcl/apps/point_cloud_editor/cloud.h>
-const float DEG_2_RADS = M_PI / 180.0f;
-
const float CloudTransformTool::DEFAULT_SCALE_FACTOR_ = 1.14;
const float CloudTransformTool::DEFAULT_TRANSLATE_FACTOR_ = 0.001f;
// This code was found on:
// http://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix
-// and is listed as being part of an open soure project (the MESA project)
+// and is listed as being part of an open source project (the MESA project)
//
// The original code in MESA comes from __gluInvertMatrixd() in project.c
//
float min_xyz[XYZ_SIZE] = {0.0f};
float max_xyz[XYZ_SIZE] = {0.0f};
Selection::const_iterator it = selection_ptr_->begin();
- float *pt = &((cloud_ptr_->getObjectSpacePoint(*it)).data[X]);
+ Point3D point_3d = cloud_ptr_->getObjectSpacePoint (*it);
+ float *pt = &(point_3d.data[X]);
std::copy(pt, pt+XYZ_SIZE, max_xyz);
std::copy(max_xyz, max_xyz+XYZ_SIZE, min_xyz);
for (++it; it != selection_ptr_->end(); ++it)
{
- pt = &((cloud_ptr_->getObjectSpacePoint(*it)).data[X]);
+ Point3D point_3d = cloud_ptr_->getObjectSpacePoint (*it);
+ pt = &(point_3d.data[X]);
for (unsigned int j = 0; j < XYZ_SIZE; ++j)
{
min_xyz[j] = std::min(min_xyz[j], pt[j]);
exppd.segment (*points_above_plane);
// Use an organized clustering segmentation to extract the individual clusters
- EuclideanClusterComparator<PointT, Normal, Label>::Ptr euclidean_cluster_comparator (new EuclideanClusterComparator<PointT, Normal, Label>);
+ EuclideanClusterComparator<PointT, Label>::Ptr euclidean_cluster_comparator (new EuclideanClusterComparator<PointT, Label>);
euclidean_cluster_comparator->setInputCloud (cloud);
euclidean_cluster_comparator->setDistanceThreshold (0.03f, false);
// Set the entire scene to false, and the inliers of the objects located on top of the plane to true
scene->points[points_above_plane->indices[i]].label = 1;
euclidean_cluster_comparator->setLabels (scene);
- vector<bool> exclude_labels (2); exclude_labels[0] = true; exclude_labels[1] = false;
- euclidean_cluster_comparator->setExcludeLabels (exclude_labels);
+ boost::shared_ptr<std::set<uint32_t> > exclude_labels = boost::make_shared<std::set<uint32_t> > ();
+ exclude_labels->insert (0);
OrganizedConnectedComponentSegmentation<PointT, Label> euclidean_segmentation (euclidean_cluster_comparator);
euclidean_segmentation.setInputCloud (cloud);
}while(false)
-int default_polynomial_order = 2;
-bool default_use_polynomial_fit = false;
+int default_polynomial_order = 0;
double default_search_radius = 0.0,
default_sqr_gauss_param = 0.0;
typedef typename Cloud::ConstPtr CloudConstPtr;
OpenNISmoothing (double search_radius, bool sqr_gauss_param_set, double sqr_gauss_param,
- bool use_polynomial_fit, int polynomial_order,
- const std::string& device_id = "")
+ int polynomial_order, const std::string& device_id = "")
: viewer ("PCL OpenNI MLS Smoothing")
, device_id_(device_id)
{
// Start 4 threads
smoother_.setSearchRadius (search_radius);
if (sqr_gauss_param_set) smoother_.setSqrGaussParam (sqr_gauss_param);
- smoother_.setPolynomialFit (use_polynomial_fit);
smoother_.setPolynomialOrder (polynomial_order);
typename pcl::search::KdTree<PointType>::Ptr tree (new typename pcl::search::KdTree<PointType> ());
<< "where options are:\n"
<< " -search_radius X = sphere radius to be used for finding the k-nearest neighbors used for fitting (default: " << default_search_radius << ")\n"
<< " -sqr_gauss_param X = parameter used for the distance based weighting of neighbors (recommended = search_radius^2) (default: " << default_sqr_gauss_param << ")\n"
- << " -use_polynomial_fit X = decides whether the surface and normal are approximated using a polynomial or only via tangent estimation (default: " << default_use_polynomial_fit << ")\n"
- << " -polynomial_order X = order of the polynomial to be fit (implicitly, use_polynomial_fit = 1) (default: " << default_polynomial_order << ")\n";
+ << " -polynomial_order X = order of the polynomial to be fit (0 means tangent estimation) (default: " << default_polynomial_order << ")\n";
openni_wrapper::OpenNIDriver& driver = openni_wrapper::OpenNIDriver::getInstance ();
if (driver.getNumberDevices () > 0)
double sqr_gauss_param = default_sqr_gauss_param;
bool sqr_gauss_param_set = true;
int polynomial_order = default_polynomial_order;
- bool use_polynomial_fit = default_use_polynomial_fit;
pcl::console::parse_argument (argc, argv, "-search_radius", search_radius);
if (pcl::console::parse_argument (argc, argv, "-sqr_gauss_param", sqr_gauss_param) == -1)
sqr_gauss_param_set = false;
- if (pcl::console::parse_argument (argc, argv, "-polynomial_order", polynomial_order) == -1 )
- use_polynomial_fit = true;
- pcl::console::parse_argument (argc, argv, "-use_polynomial_fit", use_polynomial_fit);
+ pcl::console::parse_argument (argc, argv, "-polynomial_order", polynomial_order);
pcl::OpenNIGrabber grabber (arg);
if (grabber.providesCallback<pcl::OpenNIGrabber::sig_cb_openni_point_cloud_rgba> ())
{
OpenNISmoothing<pcl::PointXYZRGBA> v (search_radius, sqr_gauss_param_set, sqr_gauss_param,
- use_polynomial_fit, polynomial_order, arg);
+ polynomial_order, arg);
v.run ();
}
else
{
OpenNISmoothing<pcl::PointXYZ> v (search_radius, sqr_gauss_param_set, sqr_gauss_param,
- use_polynomial_fit, polynomial_order, arg);
+ polynomial_order, arg);
v.run ();
}
euclidean_comparator_.reset (new pcl::EuclideanPlaneCoefficientComparator<PointT, pcl::Normal> ());
rgb_comparator_.reset (new pcl::RGBPlaneCoefficientComparator<PointT, pcl::Normal> ());
edge_aware_comparator_.reset (new pcl::EdgeAwarePlaneComparator<PointT, pcl::Normal> ());
- euclidean_cluster_comparator_ = pcl::EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label>::Ptr (new pcl::EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label> ());
+ euclidean_cluster_comparator_ = pcl::EuclideanClusterComparator<PointT, pcl::Label>::Ptr (new pcl::EuclideanClusterComparator<PointT, pcl::Label> ());
// Set up Organized Multi Plane Segmentation
mps.setMinInliers (10000);
if (use_clustering_ && regions.size () > 0)
{
- std::vector<bool> plane_labels;
- plane_labels.resize (label_indices.size (), false);
- for (size_t i = 0; i < label_indices.size (); i++)
- {
+ boost::shared_ptr<std::set<uint32_t> > plane_labels = boost::make_shared<std::set<uint32_t> > ();
+ for (size_t i = 0; i < label_indices.size (); ++i)
if (label_indices[i].indices.size () > 10000)
- {
- plane_labels[i] = true;
- }
- }
+ plane_labels->insert (i);
euclidean_cluster_comparator_->setInputCloud (cloud);
euclidean_cluster_comparator_->setLabels (labels);
if (cloud_->isOrganized ())
{
// Use an organized clustering segmentation to extract the individual clusters
- typename EuclideanClusterComparator<PointT, Normal, Label>::Ptr euclidean_cluster_comparator (new EuclideanClusterComparator<PointT, Normal, Label>);
+ typename EuclideanClusterComparator<PointT, Label>::Ptr euclidean_cluster_comparator (new EuclideanClusterComparator<PointT, Label>);
euclidean_cluster_comparator->setInputCloud (cloud);
euclidean_cluster_comparator->setDistanceThreshold (0.03f, false);
// Set the entire scene to false, and the inliers of the objects located on top of the plane to true
scene->points[points_above_plane->indices[i]].label = 1;
euclidean_cluster_comparator->setLabels (scene);
- vector<bool> exclude_labels (2); exclude_labels[0] = true; exclude_labels[1] = false;
+ boost::shared_ptr<std::set<uint32_t> > exclude_labels = boost::make_shared<std::set<uint32_t> > ();
+ exclude_labels->insert (0);
euclidean_cluster_comparator->setExcludeLabels (exclude_labels);
OrganizedConnectedComponentSegmentation<PointT, Label> euclidean_segmentation (euclidean_cluster_comparator);
void
PCDVideoPlayer::backButtonPressed ()
{
- if(current_frame_ == 0) // Allready in the beginning
+ if(current_frame_ == 0) // Already in the beginning
{
PCL_DEBUG ("[PCDVideoPlayer::nextButtonPressed] : reached the end\n");
current_frame_ = nr_of_frames_ - 1; // reset to end
PCL_INFO ("\t Up/Down move a vertical slider by one single step.\n");
PCL_INFO ("\t PageUp moves up one page.\n");
PCL_INFO ("\t PageDown moves down one page.\n");
- PCL_INFO ("\t Home moves to the start (mininum).\n");
+ PCL_INFO ("\t Home moves to the start (minimum).\n");
PCL_INFO ("\t End moves to the end (maximum).\n");
}
ico->SetSolidTypeToIcosahedron ();
ico->Update ();
- //tesselate cells from icosahedron
+ //tessellate cells from icosahedron
vtkSmartPointer<vtkLoopSubdivisionFilter> subdivide = vtkSmartPointer<vtkLoopSubdivisionFilter>::New ();
subdivide->SetNumberOfSubdivisions (tesselation_level_);
subdivide->SetInputConnection (ico->GetOutputPort ());
pcl::PointCloud<PointT>::CloudVectorType clusters;
if (ground_cloud->points.size () > 0)
{
- std::vector<bool> plane_labels;
- plane_labels.resize (region_indices.size (), false);
- for (size_t i = 0; i < region_indices.size (); i++)
- {
- if (region_indices[i].indices.size () > mps.getMinInliers ())
- {
- plane_labels[i] = true;
- }
- }
+ boost::shared_ptr<std::set<uint32_t> > plane_labels = boost::make_shared<std::set<uint32_t> > ();
+ for (size_t i = 0; i < region_indices.size (); ++i)
+ if ((region_indices[i].indices.size () > mps.getMinInliers ()))
+ plane_labels->insert (i);
- pcl::EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label>::Ptr euclidean_cluster_comparator_ (new pcl::EuclideanClusterComparator<PointT, pcl::Normal, pcl::Label> ());
-
+ pcl::EuclideanClusterComparator<PointT, pcl::Label>::Ptr euclidean_cluster_comparator_ (new pcl::EuclideanClusterComparator<PointT, pcl::Label> ());
euclidean_cluster_comparator_->setInputCloud (cloud);
euclidean_cluster_comparator_->setLabels (labels_ptr);
euclidean_cluster_comparator_->setExcludeLabels (plane_labels);
# the new option.
# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in
# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would
-# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor.
+# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefore.
#=============================================================================
# Copyright 2010 Alexander Neundorf <neundorf@kde.org>
###############################################################################
-# - Try to find Ensenso SDK (IDS-Imaging)
-# Once done this will define
-# ENSENSO_FOUND - System has Ensenso SDK
-# ENSENSO_INCLUDE_DIRS - The Ensenso SDK include directories
-# ENSENSO_LIBRARIES - The libraries needed to use Ensenso SDK
-# ENSENSO_DEFINITIONS - Compiler switches required for using Ensenso SDK
-# -----------------------
+# Find Ensenso SDK (IDS-Imaging)
+#
+# find_package(Ensenso)
+#
+# Variables defined by this module:
+#
+# ENSENSO_FOUND True if Ensenso SDK was found
+# ENSENSO_INCLUDE_DIRS The location(s) of Ensenso SDK headers
+# ENSENSO_LIBRARIES Libraries needed to use Ensenso SDK
find_path(ENSENSO_INCLUDE_DIR nxLib.h
- HINTS ${ENSENSO_ABI_HINT}
- /opt/ensenso/development/c
- "$ENV{PROGRAMFILES}/Ensenso/development/c" "$ENV{PROGRAMW6432}/Ensenso/development/c"
+ HINTS "${ENSENSO_ABI_HINT}"
+ "/opt/ensenso/development/c"
+ "$ENV{PROGRAMFILES}/Ensenso/development/c"
+ "$ENV{PROGRAMW6432}/Ensenso/development/c"
PATH_SUFFIXES include/)
-find_library(ENSENSO_LIBRARY QUIET NAMES NxLib64 NxLib32 nxLib64 nxLib32
- HINTS ${ENSENSO_ABI_HINT}
- "$ENV{PROGRAMFILES}/Ensenso/development/c" "$ENV{PROGRAMW6432}/Ensenso/development/c"
+find_library(ENSENSO_LIBRARY QUIET
+ NAMES NxLib64 NxLib32 nxLib64 nxLib32
+ HINTS "${ENSENSO_ABI_HINT}"
+ "$ENV{PROGRAMFILES}/Ensenso/development/c"
+ "$ENV{PROGRAMW6432}/Ensenso/development/c"
PATH_SUFFIXES lib/)
-set(ENSENSO_LIBRARIES ${ENSENSO_LIBRARY})
-set(ENSENSO_INCLUDE_DIRS ${ENSENSO_INCLUDE_DIR})
+if(ENSENSO_INCLUDE_DIR AND ENSENSO_LIBRARY)
-include(FindPackageHandleStandardArgs)
-# handle the QUIETLY and REQUIRED arguments and set ENSENSO_FOUND to TRUE
-# if all listed variables are TRUE
-find_package_handle_standard_args(ensenso DEFAULT_MSG
- ENSENSO_LIBRARY ENSENSO_INCLUDE_DIR)
+ # Include directories
+ set(ENSENSO_INCLUDE_DIRS ${ENSENSO_INCLUDE_DIR})
+ unset(ENSENSO_INCLUDE_DIR)
+ mark_as_advanced(ENSENSO_INCLUDE_DIRS)
-mark_as_advanced(ENSENSO_INCLUDE_DIR ENSENSO_LIBRARY)
+ # Libraries
+ set(ENSENSO_LIBRARIES ${ENSENSO_LIBRARY})
+ unset(ENSENSO_LIBRARY)
+ mark_as_advanced(ENSENSO_LIBRARIES)
-if(ENSENSO_FOUND)
- message(STATUS "Ensenso SDK found")
-endif(ENSENSO_FOUND)
+endif()
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(ENSENSO
+ FOUND_VAR ENSENSO_FOUND
+ REQUIRED_VARS ENSENSO_LIBRARIES ENSENSO_INCLUDE_DIRS
+)
+
+if(ENSENSO_FOUND)
+ message(STATUS "Ensenso found (include: ${ENSENSO_INCLUDE_DIRS}, lib: ${ENSENSO_LIBRARIES})")
+endif()
find_package(PkgConfig QUIET)
if (FLANN_FIND_VERSION)
- pkg_check_modules(PC_FLANN flann>=${FLANN_FIND_VERSION})
+ pkg_check_modules(FLANN flann>=${FLANN_FIND_VERSION})
else(FLANN_FIND_VERSION)
- pkg_check_modules(PC_FLANN flann)
+ pkg_check_modules(FLANN flann)
endif(FLANN_FIND_VERSION)
-set(FLANN_DEFINITIONS ${PC_FLANN_CFLAGS_OTHER})
+if(NOT FLANN_FOUND)
+ find_path(FLANN_INCLUDE_DIR flann/flann.hpp
+ HINTS "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
+ PATHS "$ENV{PROGRAMFILES}/flann" "$ENV{PROGRAMW6432}/flann"
+ PATH_SUFFIXES include)
-find_path(FLANN_INCLUDE_DIR flann/flann.hpp
- HINTS ${PC_FLANN_INCLUDEDIR} ${PC_FLANN_INCLUDE_DIRS} "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/Flann" "$ENV{PROGRAMW6432}/Flann"
- PATH_SUFFIXES include)
+ find_library(FLANN_LIBRARY
+ NAMES ${FLANN_RELEASE_NAME}
+ HINTS "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
+ PATHS "$ENV{PROGRAMFILES}/flann" "$ENV{PROGRAMW6432}/flann"
+ PATH_SUFFIXES lib)
-find_library(FLANN_LIBRARY
- NAMES ${FLANN_RELEASE_NAME}
- HINTS ${PC_FLANN_LIBDIR} ${PC_FLANN_LIBRARY_DIRS} "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/Flann" "$ENV{PROGRAMW6432}/Flann"
- PATH_SUFFIXES lib)
+ find_library(FLANN_LIBRARY_DEBUG
+ NAMES ${FLANN_DEBUG_NAME} ${FLANN_RELEASE_NAME}
+ HINTS "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
+ PATHS "$ENV{PROGRAMFILES}/flann" "$ENV{PROGRAMW6432}/flann"
+ PATH_SUFFIXES lib debug/lib)
-find_library(FLANN_LIBRARY_DEBUG
- NAMES ${FLANN_DEBUG_NAME} ${FLANN_RELEASE_NAME}
- HINTS ${PC_FLANN_LIBDIR} ${PC_FLANN_LIBRARY_DIRS} "${FLANN_ROOT}" "$ENV{FLANN_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/Flann" "$ENV{PROGRAMW6432}/Flann"
- PATH_SUFFIXES lib)
+ if(NOT FLANN_LIBRARY_DEBUG)
+ set(FLANN_LIBRARY_DEBUG ${FLANN_LIBRARY})
+ endif(NOT FLANN_LIBRARY_DEBUG)
-if(NOT FLANN_LIBRARY_DEBUG)
- set(FLANN_LIBRARY_DEBUG ${FLANN_LIBRARY})
-endif(NOT FLANN_LIBRARY_DEBUG)
+ set(FLANN_INCLUDE_DIRS ${FLANN_INCLUDE_DIR})
+ set(FLANN_LIBRARIES optimized ${FLANN_LIBRARY} debug ${FLANN_LIBRARY_DEBUG})
-set(FLANN_INCLUDE_DIRS ${FLANN_INCLUDE_DIR})
-set(FLANN_LIBRARIES optimized ${FLANN_LIBRARY} debug ${FLANN_LIBRARY_DEBUG})
+ include(FindPackageHandleStandardArgs)
+ find_package_handle_standard_args(FLANN DEFAULT_MSG FLANN_LIBRARY FLANN_INCLUDE_DIR)
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(FLANN DEFAULT_MSG FLANN_LIBRARY FLANN_INCLUDE_DIR)
-
-mark_as_advanced(FLANN_LIBRARY FLANN_LIBRARY_DEBUG FLANN_INCLUDE_DIR)
+ mark_as_advanced(FLANN_LIBRARY FLANN_LIBRARY_DEBUG FLANN_INCLUDE_DIR)
+endif()
if(FLANN_FOUND)
message(STATUS "FLANN found (include: ${FLANN_INCLUDE_DIRS}, lib: ${FLANN_LIBRARIES})")
###############################################################################
# Find OpenNI
#
-# This sets the following variables:
-# OPENNI_FOUND - True if OPENNI was found.
-# OPENNI_INCLUDE_DIRS - Directories containing the OPENNI include files.
-# OPENNI_LIBRARIES - Libraries needed to use OPENNI.
-# OPENNI_DEFINITIONS - Compiler flags for OPENNI.
+# find_package(OpenNI)
#
-# For libusb-1.0, add USB_10_ROOT if not found
+# Variables defined by this module:
+#
+# OPENNI_FOUND True if OpenNI was found
+# OPENNI_INCLUDE_DIRS The location(s) of OpenNI headers
+# OPENNI_LIBRARIES Libraries needed to use OpenNI
+# OPENNI_DEFINITIONS Compiler flags for OpenNI
find_package(PkgConfig QUIET)
set(OPENNI_SUFFIX 64)
endif(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
-#add a hint so that it can find it without the pkg-config
+# Add a hint so that it can find it without the pkg-config
find_path(OPENNI_INCLUDE_DIR XnStatus.h
- HINTS ${PC_OPENNI_INCLUDEDIR} ${PC_OPENNI_INCLUDE_DIRS} /usr/include/openni /usr/include/ni /opt/local/include/ni "${OPENNI_ROOT}" "$ENV{OPENNI_ROOT}"
+ HINTS ${PC_OPENNI_INCLUDEDIR}
+ ${PC_OPENNI_INCLUDE_DIRS}
+ /usr/include/openni
+ /usr/include/ni
+ /opt/local/include/ni
+ "${OPENNI_ROOT}"
+ "$ENV{OPENNI_ROOT}"
PATHS "$ENV{OPEN_NI_INSTALL_PATH${OPENNI_SUFFIX}}/Include"
PATH_SUFFIXES openni include Include)
-#add a hint so that it can find it without the pkg-config
+
+# Add a hint so that it can find it without the pkg-config
find_library(OPENNI_LIBRARY
NAMES OpenNI${OPENNI_SUFFIX}
- HINTS ${PC_OPENNI_LIBDIR} ${PC_OPENNI_LIBRARY_DIRS} /usr/lib "${OPENNI_ROOT}" "$ENV{OPENNI_ROOT}"
+ HINTS ${PC_OPENNI_LIBDIR}
+ ${PC_OPENNI_LIBRARY_DIRS}
+ /usr/lib
+ "${OPENNI_ROOT}"
+ "$ENV{OPENNI_ROOT}"
PATHS "$ENV{OPEN_NI_LIB${OPENNI_SUFFIX}}"
PATH_SUFFIXES lib Lib Lib64)
-if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
- set(OPENNI_LIBRARIES ${OPENNI_LIBRARY} ${LIBUSB_1_LIBRARIES})
-else()
- set(OPENNI_LIBRARIES ${OPENNI_LIBRARY})
+if(OPENNI_INCLUDE_DIR AND OPENNI_LIBRARY)
+
+ # Include directories
+ set(OPENNI_INCLUDE_DIRS ${OPENNI_INCLUDE_DIR})
+ unset(OPENNI_INCLUDE_DIR)
+ mark_as_advanced(OPENNI_INCLUDE_DIRS)
+
+ # Libraries
+ if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
+ set(OPENNI_LIBRARIES ${OPENNI_LIBRARY} ${LIBUSB_1_LIBRARIES})
+ else()
+ set(OPENNI_LIBRARIES ${OPENNI_LIBRARY})
+ endif()
+ unset(OPENNI_LIBRARY)
+ mark_as_advanced(OPENNI_LIBRARIES)
+
endif()
include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(OpenNI DEFAULT_MSG OPENNI_LIBRARY OPENNI_INCLUDE_DIR)
-
-mark_as_advanced(OPENNI_LIBRARY OPENNI_INCLUDE_DIR)
+find_package_handle_standard_args(OpenNI
+ FOUND_VAR OPENNI_FOUND
+ REQUIRED_VARS OPENNI_LIBRARIES OPENNI_INCLUDE_DIRS
+)
if(OPENNI_FOUND)
- # Add the include directories
- set(OPENNI_INCLUDE_DIRS ${OPENNI_INCLUDE_DIR})
- message(STATUS "OpenNI found (include: ${OPENNI_INCLUDE_DIRS}, lib: ${OPENNI_LIBRARY})")
-endif(OPENNI_FOUND)
+ message(STATUS "OpenNI found (include: ${OPENNI_INCLUDE_DIRS}, lib: ${OPENNI_LIBRARIES})")
+endif()
###############################################################################
-# Find OpenNI 2
+# Find OpenNI2
#
-# This sets the following variables:
-# OPENNI2_FOUND - True if OPENNI 2 was found.
-# OPENNI2_INCLUDE_DIRS - Directories containing the OPENNI 2 include files.
-# OPENNI2_LIBRARIES - Libraries needed to use OPENNI 2.
-# OPENNI2_DEFINITIONS - Compiler flags for OPENNI 2.
+# find_package(OpenNI2)
#
-# For libusb-1.0, add USB_10_ROOT if not found
+# Variables defined by this module:
+#
+# OPENNI2_FOUND True if OpenNI2 was found
+# OPENNI2_INCLUDE_DIRS The location(s) of OpenNI2 headers
+# OPENNI2_LIBRARIES Libraries needed to use OpenNI2
+# OPENNI2_DEFINITIONS Compiler flags for OpenNI2
find_package(PkgConfig QUIET)
set(OPENNI2_SUFFIX 64)
endif(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
-find_path(OPENNI2_INCLUDE_DIRS OpenNI.h
- PATHS
- "$ENV{OPENNI2_INCLUDE${OPENNI2_SUFFIX}}" # Win64 needs '64' suffix
- /usr/include/openni2 # common path for deb packages
+find_path(OPENNI2_INCLUDE_DIR OpenNI.h
+ PATHS "$ENV{OPENNI2_INCLUDE${OPENNI2_SUFFIX}}" # Win64 needs '64' suffix
+ "/usr/include/openni2" # common path for deb packages
+ PATH_SUFFIXES include/openni2
)
find_library(OPENNI2_LIBRARY
- NAMES OpenNI2 # No suffix needed on Win64
- libOpenNI2 # Linux
- PATHS "$ENV{OPENNI2_LIB${OPENNI2_SUFFIX}}" # Windows default path, Win64 needs '64' suffix
- "$ENV{OPENNI2_REDIST}" # Linux install does not use a separate 'lib' directory
- )
-
-if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
- set(OPENNI2_LIBRARIES ${OPENNI2_LIBRARY} ${LIBUSB_1_LIBRARIES})
-else()
- set(OPENNI2_LIBRARIES ${OPENNI2_LIBRARY})
-endif()
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(OpenNI2 DEFAULT_MSG OPENNI2_LIBRARY OPENNI2_INCLUDE_DIRS)
+ NAMES OpenNI2 # No suffix needed on Win64
+ libOpenNI2 # Linux
+ PATHS "$ENV{OPENNI2_LIB${OPENNI2_SUFFIX}}" # Windows default path, Win64 needs '64' suffix
+ "$ENV{OPENNI2_REDIST}" # Linux install does not use a separate 'lib' directory
+)
-mark_as_advanced(OPENNI2_LIBRARY OPENNI2_INCLUDE_DIRS)
+if(OPENNI2_INCLUDE_DIR AND OPENNI2_LIBRARY)
-if(OPENNI2_FOUND)
- # Add the include directories
+ # Include directories
set(OPENNI2_INCLUDE_DIRS ${OPENNI2_INCLUDE_DIR})
+ unset(OPENNI2_INCLUDE_DIR)
+ mark_as_advanced(OPENNI2_INCLUDE_DIRS)
+
+ # Libraries
+ if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
+ set(OPENNI2_LIBRARIES ${OPENNI2_LIBRARY} ${LIBUSB_1_LIBRARIES})
+ else()
+ set(OPENNI2_LIBRARIES ${OPENNI2_LIBRARY})
+ endif()
+ unset(OPENNI2_LIBRARY)
+ mark_as_advanced(OPENNI2_LIBRARIES)
+
set(OPENNI2_REDIST_DIR $ENV{OPENNI2_REDIST${OPENNI2_SUFFIX}})
- message(STATUS "OpenNI 2 found (include: ${OPENNI2_INCLUDE_DIRS}, lib: ${OPENNI2_LIBRARY}, redist: ${OPENNI2_REDIST_DIR})")
-endif(OPENNI2_FOUND)
+ mark_as_advanced(OPENNI2_REDIST_DIR)
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(OpenNI2
+ FOUND_VAR OPENNI2_FOUND
+ REQUIRED_VARS OPENNI2_LIBRARIES OPENNI2_INCLUDE_DIRS
+)
+
+if(OPENNI2_FOUND)
+ message(STATUS "OpenNI2 found (include: ${OPENNI2_INCLUDE_DIRS}, lib: ${OPENNI2_LIBRARIES})")
+endif()
# QHULL_INCLUDE_DIRS - Directories containing the QHULL include files.
# QHULL_LIBRARIES - Libraries needed to use QHULL.
# QHULL_DEFINITIONS - Compiler flags for QHULL.
-# If QHULL_USE_STATIC is specified then look for static libraries ONLY else
+# If QHULL_USE_STATIC is specified then look for static libraries ONLY else
# look for shared ones
set(QHULL_MAJOR_VERSION 6)
set(QHULL_DEBUG_NAME qhullstatic_d)
else(QHULL_USE_STATIC)
set(QHULL_RELEASE_NAME qhull_p qhull${QHULL_MAJOR_VERSION} qhull)
- set(QHULL_DEBUG_NAME qhull_pd qhull${QHULL_MAJOR_VERSION}_d qhull_d${QHULL_MAJOR_VERSION} qhull_d)
+ set(QHULL_DEBUG_NAME qhull_p_d qhull${QHULL_MAJOR_VERSION}_d qhull_d${QHULL_MAJOR_VERSION} qhull_d)
endif(QHULL_USE_STATIC)
find_file(QHULL_HEADER
NAMES libqhull/libqhull.h qhull.h
HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}" "${QHULL_INCLUDE_DIR}"
- PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
+ PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
PATH_SUFFIXES qhull src/libqhull libqhull include)
set(QHULL_HEADER "${QHULL_HEADER}" CACHE INTERNAL "QHull header" FORCE )
set(QHULL_INCLUDE_DIR "QHULL_INCLUDE_DIR-NOTFOUND")
endif(QHULL_HEADER)
-set(QHULL_INCLUDE_DIR "${QHULL_INCLUDE_DIR}" CACHE PATH "QHull include dir." FORCE)
-
-find_library(QHULL_LIBRARY
+find_library(QHULL_LIBRARY
NAMES ${QHULL_RELEASE_NAME}
HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
+ PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
PATH_SUFFIXES project build bin lib)
get_filename_component(QHULL_LIBRARY_NAME "${QHULL_LIBRARY}" NAME)
-find_library(QHULL_LIBRARY_DEBUG
+find_library(QHULL_LIBRARY_DEBUG
NAMES ${QHULL_DEBUG_NAME} ${QHULL_RELEASE_NAME}
HINTS "${QHULL_ROOT}" "$ENV{QHULL_ROOT}"
- PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
- PATH_SUFFIXES project build bin lib)
+ PATHS "$ENV{PROGRAMFILES}/QHull" "$ENV{PROGRAMW6432}/QHull"
+ PATH_SUFFIXES project build bin lib debug/lib)
if(NOT QHULL_LIBRARY_DEBUG)
set(QHULL_LIBRARY_DEBUG ${QHULL_LIBRARY})
get_filename_component(QHULL_LIBRARY_DEBUG_NAME "${QHULL_LIBRARY_DEBUG}" NAME)
-set(QHULL_INCLUDE_DIRS ${QHULL_INCLUDE_DIR})
-set(QHULL_LIBRARIES optimized ${QHULL_LIBRARY} debug ${QHULL_LIBRARY_DEBUG})
+if(QHULL_INCLUDE_DIR AND QHULL_LIBRARY)
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(Qhull DEFAULT_MSG QHULL_LIBRARY QHULL_INCLUDE_DIR)
+ # Include directories
+ set(QHULL_INCLUDE_DIRS ${QHULL_INCLUDE_DIR})
+ unset(QHULL_INCLUDE_DIR)
+ mark_as_advanced(QHULL_INCLUDE_DIRS)
-mark_as_advanced(QHULL_LIBRARY QHULL_LIBRARY_DEBUG QHULL_INCLUDE_DIR)
+ # Libraries
+ set(QHULL_LIBRARIES optimized ${QHULL_LIBRARY} debug ${QHULL_LIBRARY_DEBUG})
+ unset(QHULL_LIBRARY)
+ unset(QHULL_LIBRARY_DEBUG)
+ mark_as_advanced(QHULL_LIBRARIES)
+
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(Qhull
+ FOUND_VAR QHULL_FOUND
+ REQUIRED_VARS QHULL_LIBRARIES QHULL_INCLUDE_DIRS
+)
if(QHULL_FOUND)
set(HAVE_QHULL ON)
@CPACK_NSIS_DELETE_ICONS@
@CPACK_NSIS_DELETE_ICONS_EXTRA@
- ;Delete empty start menu parent diretories
+ ;Delete empty start menu parent directories
StrCpy $MUI_TEMP "$SMPROGRAMS\$MUI_TEMP"
startMenuDeleteLoop:
Delete "$SMPROGRAMS\$MUI_TEMP\Uninstall.lnk"
@CPACK_NSIS_DELETE_ICONS_EXTRA@
- ;Delete empty start menu parent diretories
+ ;Delete empty start menu parent directories
StrCpy $MUI_TEMP "$SMPROGRAMS\$MUI_TEMP"
secondStartMenuDeleteLoop:
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2013-${win_system_name}")
elseif(MSVC_VERSION EQUAL 1900)
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2015-${win_system_name}")
- elseif(MSVC_VERSION EQUAL 1910)
+ elseif(MSVC_VERSION MATCHES "^191[0-9]$")
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-msvc2017-${win_system_name}")
else()
set(CPACK_NSIS_PACKAGE_NAME "${CPACK_NSIS_PACKAGE_NAME}-${win_system_name}")
# Make the CPack input file.
macro(PCL_MAKE_CPACK_INPUT)
set(_cpack_cfg_in "${PCL_SOURCE_DIR}/cmake/cpack_options.cmake.in")
- set(${_var} "${${_var}}\nset(CPACK_COMPONENT_GROUP_PCL_DESCRIPTION \"PCL headers and librairies\")\n")
+ set(${_var} "${${_var}}\nset(CPACK_COMPONENT_GROUP_PCL_DESCRIPTION \"PCL headers and libraries\")\n")
# Prepare the components list
set(PCL_CPACK_COMPONENTS)
"1.46.0" "1.46" "1.45.0" "1.45" "1.44.0" "1.44" "1.43.0" "1.43")
else(${CMAKE_VERSION} VERSION_LESS 2.8.5)
set(Boost_ADDITIONAL_VERSIONS
+ "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65"
"1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60"
"1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55"
"1.54.0" "1.54" "1.53.0" "1.53" "1.52.0" "1.52" "1.51.0" "1.51"
# Required boost modules
if(WITH_OPENNI2)
-set(BOOST_REQUIRED_MODULES system filesystem thread date_time iostreams chrono)
+set(BOOST_REQUIRED_MODULES filesystem thread date_time iostreams chrono system)
find_package(Boost 1.47.0 REQUIRED COMPONENTS ${BOOST_REQUIRED_MODULES})
else()
-set(BOOST_REQUIRED_MODULES system filesystem thread date_time iostreams)
+set(BOOST_REQUIRED_MODULES filesystem thread date_time iostreams system)
find_package(Boost 1.40.0 REQUIRED COMPONENTS ${BOOST_REQUIRED_MODULES})
endif()
# Find CUDA
-
-
if(MSVC)
- # Setting this to true brakes Visual Studio builds.
- set(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE OFF CACHE BOOL "CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE")
+ # Setting this to true brakes Visual Studio builds.
+ set(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE OFF CACHE BOOL "CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE")
endif()
set(CUDA_FIND_QUIETLY TRUE)
-find_package(CUDA 4)
+find_package(CUDA)
if(CUDA_FOUND)
- message(STATUS "Found CUDA Toolkit v${CUDA_VERSION_STRING}")
-
- if(${CUDA_VERSION_STRING} VERSION_LESS "7.5")
- # Recent versions of cmake set CUDA_HOST_COMPILER to CMAKE_C_COMPILER which
- # on OSX defaults to clang (/usr/bin/cc), but this is not a supported cuda
- # compiler. So, here we will preemptively set CUDA_HOST_COMPILER to gcc if
- # that compiler exists in /usr/bin. This will not override an existing cache
- # value if the user has passed CUDA_HOST_COMPILER on the command line.
- if (NOT DEFINED CUDA_HOST_COMPILER AND CMAKE_C_COMPILER_ID STREQUAL "Clang" AND EXISTS /usr/bin/gcc)
- set(CUDA_HOST_COMPILER /usr/bin/gcc CACHE FILEPATH "Host side compiler used by NVCC")
- message(STATUS "Setting CMAKE_HOST_COMPILER to /usr/bin/gcc instead of ${CMAKE_C_COMPILER}. See http://dev.pointclouds.org/issues/979")
- endif()
+ message(STATUS "Found CUDA Toolkit v${CUDA_VERSION_STRING}")
+ set(HAVE_CUDA TRUE)
+
+ if(${CUDA_VERSION_STRING} VERSION_LESS "7.5")
+ # Recent versions of cmake set CUDA_HOST_COMPILER to CMAKE_C_COMPILER which
+ # on OSX defaults to clang (/usr/bin/cc), but this is not a supported cuda
+ # compiler. So, here we will preemptively set CUDA_HOST_COMPILER to gcc if
+ # that compiler exists in /usr/bin. This will not override an existing cache
+ # value if the user has passed CUDA_HOST_COMPILER on the command line.
+ if (NOT DEFINED CUDA_HOST_COMPILER AND CMAKE_C_COMPILER_ID STREQUAL "Clang" AND EXISTS /usr/bin/gcc)
+ set(CUDA_HOST_COMPILER /usr/bin/gcc CACHE FILEPATH "Host side compiler used by NVCC")
+ message(STATUS "Setting CMAKE_HOST_COMPILER to /usr/bin/gcc instead of ${CMAKE_C_COMPILER}. See http://dev.pointclouds.org/issues/979")
+ endif()
+
+ # Send a warning if CUDA_HOST_COMPILER is set to a compiler that is known
+ # to be unsupported.
+ if (CUDA_HOST_COMPILER STREQUAL CMAKE_C_COMPILER AND CMAKE_C_COMPILER_ID STREQUAL "Clang")
+ message(WARNING "CUDA_HOST_COMPILER is set to an unsupported compiler: ${CMAKE_C_COMPILER}. See http://dev.pointclouds.org/issues/979")
+ endif()
+ endif()
- # Send a warning if CUDA_HOST_COMPILER is set to a compiler that is known
- # to be unsupported.
- if (CUDA_HOST_COMPILER STREQUAL CMAKE_C_COMPILER AND CMAKE_C_COMPILER_ID STREQUAL "Clang")
- message(WARNING "CUDA_HOST_COMPILER is set to an unsupported compiler: ${CMAKE_C_COMPILER}. See http://dev.pointclouds.org/issues/979")
- endif()
- endif()
+ # CUDA_ARCH_BIN is a space separated list of versions to include in output so-file. So you can set CUDA_ARCH_BIN = 10 11 12 13 20
+ # Also user can specify virtual arch in parenthesis to limit instructions set,
+ # for example CUDA_ARCH_BIN = 11(11) 12(11) 13(11) 20(11) 21(11) -> forces using only sm_11 instructions.
+ # The CMake scripts interpret XX as XX (XX). This allows user to omit parenthesis.
+ # Arch 21 is an exceptional case since it doesn't have own sm_21 instructions set.
+ # So 21 = 21(21) is an invalid configuration and user has to explicitly force previous sm_20 instruction set via 21(20).
+ # CUDA_ARCH_BIN adds support of only listed GPUs. As alternative CMake scripts also parse 'CUDA_ARCH_PTX' variable,
+ # which is a list of intermediate PTX codes to include in final so-file. The PTX code can/will be JIT compiled for any current or future GPU.
+ # To add support of older GPU for kinfu, I would embed PTX 11 and 12 into so-file. GPU with sm_13 will run PTX 12 code (no difference for kinfu)
- # CUDA_ARCH_BIN is a space separated list of versions to include in output so-file. So you can set CUDA_ARCH_BIN = 10 11 12 13 20
- # Also user can specify virtual arch in parenthesis to limit instructions set,
- # for example CUDA_ARCH_BIN = 11(11) 12(11) 13(11) 20(11) 21(11) -> forces using only sm_11 instructions.
- # The CMake scripts interpret XX as XX (XX). This allows user to omit parenthesis.
- # Arch 21 is an exceptional case since it doesn't have own sm_21 instructions set.
- # So 21 = 21(21) is an invalid configuration and user has to explicitly force previous sm_20 instruction set via 21(20).
- # CUDA_ARCH_BIN adds support of only listed GPUs. As alternative CMake scripts also parse 'CUDA_ARCH_PTX' variable,
- # which is a list of intermediate PTX codes to include in final so-file. The PTX code can/will be JIT compiled for any current or future GPU.
- # To add support of older GPU for kinfu, I would embed PTX 11 and 12 into so-file. GPU with sm_13 will run PTX 12 code (no difference for kinfu)
-
- # Find a complete list for CUDA compute capabilities at http://developer.nvidia.com/cuda-gpus
+ # Find a complete list for CUDA compute capabilities at http://developer.nvidia.com/cuda-gpus
- if(NOT ${CUDA_VERSION_STRING} VERSION_LESS "8.0")
- set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2 5.3 6.0 6.1")
- elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "6.5")
- set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2")
- elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "6.0")
- set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0")
- elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "5.0")
- set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5")
- elseif(${CUDA_VERSION_STRING} VERSION_GREATER "4.1")
- set(__cuda_arch_bin "2.0 2.1(2.0) 3.0")
- else()
- set(__cuda_arch_bin "2.0 2.1(2.0)")
- endif()
+ if(NOT ${CUDA_VERSION_STRING} VERSION_LESS "10.0")
+ set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0 7.2 7.5")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "9.1")
+ set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0 7.2")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "9.0")
+ set(__cuda_arch_bin "3.0 3.5 5.0 5.2 5.3 6.0 6.1 7.0")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "8.0")
+ set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2 5.3 6.0 6.1")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "6.5")
+ set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0 5.2")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "6.0")
+ set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5 5.0")
+ elseif(NOT ${CUDA_VERSION_STRING} VERSION_LESS "5.0")
+ set(__cuda_arch_bin "2.0 2.1(2.0) 3.0 3.5")
+ elseif(${CUDA_VERSION_STRING} VERSION_GREATER "4.1")
+ set(__cuda_arch_bin "2.0 2.1(2.0) 3.0")
+ else()
+ set(__cuda_arch_bin "2.0 2.1(2.0)")
+ endif()
- set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
+ set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
- set(CUDA_ARCH_PTX "" CACHE STRING "Specify 'virtual' PTX arch to build PTX intermediate code for. Example: 1.0 1.2 or 10 12")
- #set(CUDA_ARCH_PTX "1.1 1.2" CACHE STRING "Specify 'virtual' PTX arch to build PTX intermediate code for. Example: 1.0 1.2 or 10 12")
+ set(CUDA_ARCH_PTX "" CACHE STRING "Specify 'virtual' PTX arch to build PTX intermediate code for. Example: 1.0 1.2 or 10 12")
+ #set(CUDA_ARCH_PTX "1.1 1.2" CACHE STRING "Specify 'virtual' PTX arch to build PTX intermediate code for. Example: 1.0 1.2 or 10 12")
- # Guess this macros will be included in cmake distributive
- include(${PCL_SOURCE_DIR}/cmake/CudaComputeTargetFlags.cmake)
- APPEND_TARGET_ARCH_FLAGS()
+ # Guess this macros will be included in cmake distributive
+ include(${PCL_SOURCE_DIR}/cmake/CudaComputeTargetFlags.cmake)
+ APPEND_TARGET_ARCH_FLAGS()
# Prevent compilation issues between recent gcc versions and old CUDA versions
list(APPEND CUDA_NVCC_FLAGS "-D_FORCE_INLINES")
set(SSE_FLAGS)
set(SSE_DEFINITIONS)
- # Test CLANG
- #if(CMAKE_COMPILER_IS_CLANG)
- # if(APPLE)
- # SET(SSE_FLAGS "${SSE_FLAGS} -march=native")
- # endif(APPLE)
- #endif(CMAKE_COMPILER_IS_CLANG)
-
- # Test GCC/G++
- if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX)
- execute_process(COMMAND ${CMAKE_CXX_COMPILER} "-dumpversion"
- OUTPUT_VARIABLE GCC_VERSION_STRING)
- if(GCC_VERSION_STRING VERSION_GREATER 4.2 AND NOT APPLE AND NOT CMAKE_CROSSCOMPILING)
- SET(SSE_FLAGS "${SSE_FLAGS} -march=native")
+ if(NOT CMAKE_CROSSCOMPILING)
+ # Test GCC/G++ and CLANG
+ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
+ list(APPEND SSE_FLAGS "-march=native")
message(STATUS "Using CPU native flags for SSE optimization: ${SSE_FLAGS}")
endif()
endif()
- # Unfortunately we need to check for SSE to enable "-mfpmath=sse" alongside
+ # Unfortunately we need to check for SSE to enable "-mfpmath=sse" alongside
# "-march=native". The reason for this is that by default, 32bit architectures
# tend to use the x87 FPU (which has 80 bit internal precision), thus leading
# to different results than 64bit architectures which are using SSE2 (64 bit internal
- # precision). One solution would be to use "-ffloat-store" on 32bit (see
+ # precision). One solution would be to use "-ffloat-store" on 32bit (see
# http://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html), but that slows code down,
# so the preferred solution is to try "-mpfmath=sse" first.
include(CheckCXXSourceRuns)
set(CMAKE_REQUIRED_FLAGS)
check_cxx_source_runs("
- #include <mm_malloc.h>
+ // Intel compiler defines an incompatible _mm_malloc signature
+ #if defined(__INTEL_COMPILER)
+ #include <malloc.h>
+ #else
+ #include <mm_malloc.h>
+ #endif
int main()
{
void* mem = _mm_malloc (100, 16);
elseif(MSVC AND NOT CMAKE_CL_64)
set(CMAKE_REQUIRED_FLAGS "/arch:SSE2")
endif(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
-
+
check_cxx_source_runs("
#include <emmintrin.h>
int main ()
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
if(HAVE_SSE4_2_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} -msse4.2 -mfpmath=sse")
+ list(APPEND SSE_FLAGS "-msse4.2" "-mfpmath=sse")
elseif(HAVE_SSE4_1_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} -msse4.1 -mfpmath=sse")
+ list(APPEND SSE_FLAGS "-msse4.1" "-mfpmath=sse")
elseif(HAVE_SSSE3_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} -mssse3 -mfpmath=sse")
+ list(APPEND SSE_FLAGS "-mssse3" "-mfpmath=sse")
elseif(HAVE_SSE3_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} -msse3 -mfpmath=sse")
+ list(APPEND SSE_FLAGS "-msse3" "-mfpmath=sse")
elseif(HAVE_SSE2_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} -msse2 -mfpmath=sse")
+ list(APPEND SSE_FLAGS "-msse2" "-mfpmath=sse")
elseif(HAVE_SSE_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} -msse -mfpmath=sse")
+ list(APPEND SSE_FLAGS "-msse" "-mfpmath=sse")
else()
# Setting -ffloat-store to alleviate 32bit vs 64bit discrepancies on non-SSE
# platforms.
- set(SSE_FLAGS "-ffloat-store")
+ list(APPEND SSE_FLAGS "-ffloat-store")
endif()
elseif(MSVC AND NOT CMAKE_CL_64)
if(HAVE_SSE2_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} /arch:SSE2")
+ list(APPEND SSE_FLAGS "/arch:SSE2")
elseif(HAVE_SSE_EXTENSIONS)
- SET(SSE_FLAGS "${SSE_FLAGS} /arch:SSE")
+ list(APPEND SSE_FLAGS "/arch:SSE")
endif(HAVE_SSE2_EXTENSIONS)
endif()
SET(SSE_DEFINITIONS "${SSE_DEFINITIONS} -D__SSE__")
endif()
endif()
+ string(REPLACE ";" " " SSE_FLAGS_STR "${SSE_FLAGS}")
endmacro(PCL_CHECK_FOR_SSE)
option(PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32 "Build against a dynamically linked FLANN on Win32 platforms." OFF)
mark_as_advanced(PCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32)
+# Build with dynamic linking for QHull (advanced users)
+option(PCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32 "Build against a dynamically linked QHull on Win32 platforms." OFF)
+mark_as_advanced(PCL_BUILD_WITH_QHULL_DYNAMIC_LINKING_WIN32)
+
# Precompile for a minimal set of point types instead of all.
option(PCL_ONLY_CORE_POINT_TYPES "Compile explicitly only for a small subset of point types (e.g., pcl::PointXYZ instead of PCL_XYZ_POINT_TYPES)." OFF)
mark_as_advanced(PCL_ONLY_CORE_POINT_TYPES)
set(PCL_SUBSYSTEMS_MODULES ${PCL_SUBSYSTEMS})
list(REMOVE_ITEM PCL_SUBSYSTEMS_MODULES tools cuda_apps global_tests proctor examples)
+
+file(GLOB PCLCONFIG_FIND_MODULES "${PCL_SOURCE_DIR}/cmake/Modules/*.cmake")
+install(FILES ${PCLCONFIG_FIND_MODULES} COMPONENT pclconfig DESTINATION ${PCLCONFIG_INSTALL_DIR}/Modules)
+
set(PCLCONFIG_AVAILABLE_COMPONENTS)
set(PCLCONFIG_AVAILABLE_COMPONENTS_LIST)
set(PCLCONFIG_INTERNAL_DEPENDENCIES)
set(PCLCONFIG_EXTERNAL_DEPENDENCIES)
set(PCLCONFIG_OPTIONAL_DEPENDENCIES)
-set(PCLCONFIG_SSE_DEFINITIONS "${SSE_FLAGS} ${SSE_DEFINITIONS}")
+set(PCLCONFIG_SSE_DEFINITIONS "${SSE_DEFINITIONS}")
+set(PCLCONFIG_SSE_COMPILE_OPTIONS ${SSE_FLAGS})
+
foreach(_ss ${PCL_SUBSYSTEMS_MODULES})
PCL_GET_SUBSYS_STATUS(_status ${_ss})
- if(_status)
+
+ # do not include test targets
+ string(REGEX MATCH "^tests_" _is_test ${_ss})
+
+ if(_status AND NOT _is_test)
set(PCLCONFIG_AVAILABLE_COMPONENTS "${PCLCONFIG_AVAILABLE_COMPONENTS} ${_ss}")
set(PCLCONFIG_AVAILABLE_COMPONENTS_LIST "${PCLCONFIG_AVAILABLE_COMPONENTS_LIST}\n# - ${_ss}")
GET_IN_MAP(_deps PCL_SUBSYS_DEPS ${_ss})
if(_opt_deps)
set(PCLCONFIG_OPTIONAL_DEPENDENCIES "${PCLCONFIG_OPTIONAL_DEPENDENCIES}set(pcl_${_ss}_opt_dep ")
foreach(_opt_dep ${_opt_deps})
- set(PCLCONFIG_OPTIONAL_DEPENDENCIES "${PCLCONFIG_OPTIONAL_DEPENDENCIES}${_opt_dep} ")
+ string(TOUPPER "WITH_${_opt_dep}" _tmp)
+ string(REGEX REPLACE "-(.*)" "" _condition ${_tmp}) #libusb-1.0 case
+ if(${_condition})
+ set(PCLCONFIG_OPTIONAL_DEPENDENCIES "${PCLCONFIG_OPTIONAL_DEPENDENCIES}${_opt_dep} ")
+ endif()
endforeach(_opt_dep)
set(PCLCONFIG_OPTIONAL_DEPENDENCIES "${PCLCONFIG_OPTIONAL_DEPENDENCIES})\n")
endif(_opt_deps)
endif (_sub_status)
endforeach(_sub)
endif (${PCL_SUBSYS_SUBSYS})
- endif(_status)
+ endif()
endforeach(_ss)
#Boost modules
if(SUBSYS_EXT_DEPS)
foreach(_dep ${SUBSYS_EXT_DEPS})
string(TOUPPER "${_dep}_found" EXT_DEP_FOUND)
- if(NOT ${EXT_DEP_FOUND} OR (NOT (${EXT_DEP_FOUND} STREQUAL "TRUE")))
+ if(NOT ${EXT_DEP_FOUND} AND (NOT (${EXT_DEP_FOUND} STREQUAL "TRUE")))
set(${_var} FALSE)
PCL_SET_SUBSYS_STATUS(${_name} FALSE "Requires external library ${_dep}.")
- endif(NOT ${EXT_DEP_FOUND} OR (NOT (${EXT_DEP_FOUND} STREQUAL "TRUE")))
+ endif(NOT ${EXT_DEP_FOUND} AND (NOT (${EXT_DEP_FOUND} STREQUAL "TRUE")))
endforeach(_dep)
endif(SUBSYS_EXT_DEPS)
if(SUBSYS_OPT_DEPS)
if(SUBSUBSYS_EXT_DEPS)
foreach(_dep ${SUBSUBSYS_EXT_DEPS})
string(TOUPPER "${_dep}_found" EXT_DEP_FOUND)
- if(NOT ${EXT_DEP_FOUND} OR (NOT ("${EXT_DEP_FOUND}" STREQUAL "TRUE")))
+ if(NOT ${EXT_DEP_FOUND} AND (NOT ("${EXT_DEP_FOUND}" STREQUAL "TRUE")))
set(${_var} FALSE)
PCL_SET_SUBSYS_STATUS(${_parent}_${_name} FALSE "Requires external library ${_dep}.")
- endif(NOT ${EXT_DEP_FOUND} OR (NOT ("${EXT_DEP_FOUND}" STREQUAL "TRUE")))
+ endif(NOT ${EXT_DEP_FOUND} AND (NOT ("${EXT_DEP_FOUND}" STREQUAL "TRUE")))
endforeach(_dep)
endif(SUBSUBSYS_EXT_DEPS)
endif(${_var} AND (NOT ("${subsys_status}" STREQUAL "AUTO_OFF")))
###############################################################################
# Make a pkg-config file for a library. Do not include general PCL stuff in the
-# arguments; they will be added automaticaly.
+# arguments; they will be added automatically.
# _name The library name. "pcl_" will be preprended to this.
# _component The part of PCL that this pkg-config file belongs to.
# _desc Description of the library.
# Essentially a duplicate of PCL_MAKE_PKGCONFIG, but
# ensures that no -L or l flags will be created
# Do not include general PCL stuff in the
-# arguments; they will be added automaticaly.
+# arguments; they will be added automatically.
# _name The library name. "pcl_" will be preprended to this.
# _component The part of PCL that this pkg-config file belongs to.
# _desc Description of the library.
###############################################################################
# Set the hyperstatus of a subsystem and its dependee
# _name Subsystem name.
-# _dependee Dependant subsystem.
+# _dependee Dependent subsystem.
# _status AUTO_OFF to disable AUTO_ON to enable
# ARGN[0] Reason for not building.
macro(PCL_SET_SUBSYS_HYPERSTATUS _name _dependee _status)
###############################################################################
# Get the hyperstatus of a subsystem and its dependee
# _name IN subsystem name.
-# _dependee IN dependant subsystem.
+# _dependee IN dependent subsystem.
# _var OUT hyperstatus
# ARGN[0] Reason for not building.
macro(PCL_GET_SUBSYS_HYPERSTATUS _var _name)
# This macro adds on option named "WITH_NAME", where NAME is the capitalized
# dependency name. The user may use this option to control whether the
# corresponding grabber should be built or not. Also an attempt to find a
-# package with the given name is made. If it is not successfull, then the
+# package with the given name is made. If it is not successful, then the
# "WITH_NAME" option is coerced to FALSE.
macro(PCL_ADD_GRABBER_DEPENDENCY _name _description)
string(TOUPPER ${_name} _name_capitalized)
if(NOT (list_length EQUAL to_sort_list_length))
message(FATAL_ERROR "size mismatch between ${_to_sort_relative} ${to_sort_list_length} and ${_list} ${list_length}")
endif(NOT (list_length EQUAL to_sort_list_length))
- # unset the temporary list to avoid suprises (I had some them and were hard to find)
+ # unset the temporary list to avoid surprises (I had some them and were hard to find)
unset(tmp_list)
# fill it with a dummy value
fill_list(tmp_list list_length "#")
include/pcl/impl/cloud_iterator.hpp
)
- set(ros_incs
- include/pcl/ros/conversions.h
- include/pcl/ros/register_point_struct.h
- )
-
set(tools_incs
include/pcl/console/parse.h
include/pcl/console/print.h
set(LIB_NAME "pcl_${SUBSYS_NAME}")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include")
- PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${kissfft_srcs} ${incs} ${common_incs} ${impl_incs} ${ros_incs} ${tools_incs} ${kissfft_incs} ${common_incs_impl} ${range_image_incs} ${range_image_incs_impl})
- #PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${incs} ${common_incs} ${impl_incs} ${ros_incs} ${tools_incs} ${common_incs_impl} ${range_image_incs} ${range_image_incs_impl})
+ PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${kissfft_srcs} ${incs} ${common_incs} ${impl_incs} ${tools_incs} ${kissfft_incs} ${common_incs_impl} ${range_image_incs} ${range_image_incs_impl})
PCL_MAKE_PKGCONFIG("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}" "" ""
"" "" "")
PCL_ADD_INCLUDES("${SUBSYS_NAME}" common/fft ${kissfft_incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" common/impl ${common_incs_impl})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" impl ${impl_incs})
- PCL_ADD_INCLUDES("${SUBSYS_NAME}" ros ${ros_incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" console ${tools_incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" range_image ${range_image_incs})
PCL_ADD_INCLUDES("${SUBSYS_NAME}" range_image/impl ${range_image_incs_impl})
void
setDegree (int new_degree);
- /** How many parametes has a bivariate polynomial with this degree */
+ /** How many parameters has a bivariate polynomial with this degree */
unsigned int
getNoOfParameters () const { return getNoOfParametersFromDegree (degree);}
void
readBinary (const char* filename);
- /** How many parametes has a bivariate polynomial of the given degree */
+ /** How many parameters has a bivariate polynomial of the given degree */
static unsigned int
getNoOfParametersFromDegree (int n) { return ((n+2)* (n+1))/2;}
/** \brief Compute the 3D (X-Y-Z) centroid of a set of points and return it as a 3D vector.
* \param[in] cloud_iterator an iterator over the input point cloud
* \param[out] centroid the output centroid
- * \return number of valid point used to determine the centroid. In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the centroid. In case of dense point clouds, this is the same as the size of input cloud.
* \note if return value is 0, the centroid is not changed, thus not valid.
- * The last compononent of the vector is set to 1, this allow to transform the centroid vector with 4x4 matrices.
+ * The last component of the vector is set to 1, this allows to transform the centroid vector with 4x4 matrices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
/** \brief Compute the 3D (X-Y-Z) centroid of a set of points and return it as a 3D vector.
* \param[in] cloud the input point cloud
* \param[out] centroid the output centroid
- * \return number of valid point used to determine the centroid. In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the centroid. In case of dense point clouds, this is the same as the size of input cloud.
* \note if return value is 0, the centroid is not changed, thus not valid.
- * The last compononent of the vector is set to 1, this allow to transform the centroid vector with 4x4 matrices.
+ * The last component of the vector is set to 1, this allows to transform the centroid vector with 4x4 matrices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
* \param[in] cloud the input point cloud
* \param[in] indices the point cloud indices that need to be used
* \param[out] centroid the output centroid
- * \return number of valid point used to determine the centroid. In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the centroid. In case of dense point clouds, this is the same as the size of input indices.
* \note if return value is 0, the centroid is not changed, thus not valid.
- * The last compononent of the vector is set to 1, this allow to transform the centroid vector with 4x4 matrices.
+ * The last component of the vector is set to 1, this allows to transform the centroid vector with 4x4 matrices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
* \param[in] cloud the input point cloud
* \param[in] indices the point cloud indices that need to be used
* \param[out] centroid the output centroid
- * \return number of valid point used to determine the centroid. In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the centroid. In case of dense point clouds, this is the same as the size of input indices.
* \note if return value is 0, the centroid is not changed, thus not valid.
- * The last compononent of the vector is set to 1, this allow to transform the centroid vector with 4x4 matrices.
+ * The last component of the vector is set to 1, this allows to transform the centroid vector with 4x4 matrices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
* The result is returned as a Eigen::Matrix3f.
* Note: the covariance matrix is not normalized with the number of
* points. For a normalized covariance, please use
- * computeNormalizedCovarianceMatrix.
+ * computeCovarianceMatrixNormalized.
* \param[in] cloud the input point cloud
* \param[in] centroid the centroid of the set of points in the cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
+ * \return number of valid points used to determine the covariance matrix.
* In case of dense point clouds, this is the same as the size of input cloud.
* \note if return value is 0, the covariance matrix is not changed, thus not valid.
* \ingroup common
/** \brief Compute normalized the 3x3 covariance matrix of a given set of points.
* The result is returned as a Eigen::Matrix3f.
* Normalized means that every entry has been divided by the number of points in the point cloud.
- * For small number of points, or if you want explicitely the sample-variance, use computeCovarianceMatrix
+ * For small number of points, or if you want explicitly the sample-variance, use computeCovarianceMatrix
* and scale the covariance matrix with 1 / (n-1), where n is the number of points used to calculate
* the covariance matrix and is returned by the computeCovarianceMatrix function.
* \param[in] cloud the input point cloud
* \param[in] centroid the centroid of the set of points in the cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
+ * \return number of valid points used to determine the covariance matrix.
* In case of dense point clouds, this is the same as the size of input cloud.
* \ingroup common
*/
* The result is returned as a Eigen::Matrix3f.
* Note: the covariance matrix is not normalized with the number of
* points. For a normalized covariance, please use
- * computeNormalizedCovarianceMatrix.
+ * computeCovarianceMatrixNormalized.
* \param[in] cloud the input point cloud
* \param[in] indices the point cloud indices that need to be used
* \param[in] centroid the centroid of the set of points in the cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
* The result is returned as a Eigen::Matrix3f.
* Note: the covariance matrix is not normalized with the number of
* points. For a normalized covariance, please use
- * computeNormalizedCovarianceMatrix.
+ * computeCovarianceMatrixNormalized.
* \param[in] cloud the input point cloud
* \param[in] indices the point cloud indices that need to be used
* \param[in] centroid the centroid of the set of points in the cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
* their indices.
* The result is returned as a Eigen::Matrix3f.
* Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, use computeCovarianceMatrix
+ * For small number of points, or if you want explicitly the sample-variance, use computeCovarianceMatrix
* and scale the covariance matrix with 1 / (n-1), where n is the number of points used to calculate
* the covariance matrix and is returned by the computeCovarianceMatrix function.
* \param[in] cloud the input point cloud
* \param[in] indices the point cloud indices that need to be used
* \param[in] centroid the centroid of the set of points in the cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
/** \brief Compute the normalized 3x3 covariance matrix of a given set of points using
* their indices. The result is returned as a Eigen::Matrix3f.
* Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, use computeCovarianceMatrix
+ * For small number of points, or if you want explicitly the sample-variance, use computeCovarianceMatrix
* and scale the covariance matrix with 1 / (n-1), where n is the number of points used to calculate
* the covariance matrix and is returned by the computeCovarianceMatrix function.
* \param[in] cloud the input point cloud
* \param[in] indices the point cloud indices that need to be used
* \param[in] centroid the centroid of the set of points in the cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
}
/** \brief Compute the normalized 3x3 covariance matrix and the centroid of a given set of points in a single loop.
- * Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, scale the covariance matrix
+ * Normalized means that every entry has been divided by the number of valid entries in the point cloud.
+ * For small number of points, or if you want explicitly the sample-variance, scale the covariance matrix
* with n / (n-1), where n is the number of points used to calculate the covariance matrix and is returned by this function.
* \note This method is theoretically exact. However using float for internal calculations reduces the accuracy but increases the efficiency.
* \param[in] cloud the input point cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
* \param[out] centroid the centroid of the set of points in the cloud
- * \return number of valid point used to determine the covariance matrix.
+ * \return number of valid points used to determine the covariance matrix.
* In case of dense point clouds, this is the same as the size of input cloud.
* \ingroup common
*/
/** \brief Compute the normalized 3x3 covariance matrix and the centroid of a given set of points in a single loop.
* Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, scale the covariance matrix
+ * For small number of points, or if you want explicitly the sample-variance, scale the covariance matrix
* with n / (n-1), where n is the number of points used to calculate the covariance matrix and is returned by this function.
* \note This method is theoretically exact. However using float for internal calculations reduces the accuracy but increases the efficiency.
* \param[in] cloud the input point cloud
* \param[in] indices subset of points given by their indices
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
* \param[out] centroid the centroid of the set of points in the cloud
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
/** \brief Compute the normalized 3x3 covariance matrix and the centroid of a given set of points in a single loop.
* Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, scale the covariance matrix
+ * For small number of points, or if you want explicitly the sample-variance, scale the covariance matrix
* with n / (n-1), where n is the number of points used to calculate the covariance matrix and is returned by this function.
* \note This method is theoretically exact. However using float for internal calculations reduces the accuracy but increases the efficiency.
* \param[in] cloud the input point cloud
* \param[in] indices subset of points given by their indices
* \param[out] centroid the centroid of the set of points in the cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
}
/** \brief Compute the normalized 3x3 covariance matrix for a already demeaned point cloud.
- * Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, scale the covariance matrix
+ * Normalized means that every entry has been divided by the number of entries in the input point cloud.
+ * For small number of points, or if you want explicitly the sample-variance, scale the covariance matrix
* with n / (n-1), where n is the number of points used to calculate the covariance matrix and is returned by this function.
* \note This method is theoretically exact. However using float for internal calculations reduces the accuracy but increases the efficiency.
* \param[in] cloud the input point cloud
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
+ * \return number of valid points used to determine the covariance matrix.
* In case of dense point clouds, this is the same as the size of input cloud.
* \ingroup common
*/
/** \brief Compute the normalized 3x3 covariance matrix for a already demeaned point cloud.
* Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, scale the covariance matrix
+ * For small number of points, or if you want explicitly the sample-variance, scale the covariance matrix
* with n / (n-1), where n is the number of points used to calculate the covariance matrix and is returned by this function.
* \note This method is theoretically exact. However using float for internal calculations reduces the accuracy but increases the efficiency.
* \param[in] cloud the input point cloud
* \param[in] indices subset of points given by their indices
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
/** \brief Compute the normalized 3x3 covariance matrix for a already demeaned point cloud.
* Normalized means that every entry has been divided by the number of entries in indices.
- * For small number of points, or if you want explicitely the sample-variance, scale the covariance matrix
+ * For small number of points, or if you want explicitly the sample-variance, scale the covariance matrix
* with n / (n-1), where n is the number of points used to calculate the covariance matrix and is returned by this function.
* \note This method is theoretically exact. However using float for internal calculations reduces the accuracy but increases the efficiency.
* \param[in] cloud the input point cloud
* \param[in] indices subset of points given by their indices
* \param[out] covariance_matrix the resultant 3x3 covariance matrix
- * \return number of valid point used to determine the covariance matrix.
- * In case of dense point clouds, this is the same as the size of input cloud.
+ * \return number of valid points used to determine the covariance matrix.
+ * In case of dense point clouds, this is the same as the size of input indices.
* \ingroup common
*/
template <typename PointT, typename Scalar> inline unsigned int
#define PCL_COMMON_COLORS_H
#include <pcl/pcl_macros.h>
+#include <pcl/point_types.h>
namespace pcl
{
- struct RGB;
-
PCL_EXPORTS RGB
getRandomColor (double min = 0.2, double max = 2.8);
- /** Color lookup table consisting of 256 colors structured in a maximally
- * discontinuous manner. Generated using the method of Glasbey et al.
- * (see https://github.com/taketwo/glasbey) */
- class PCL_EXPORTS GlasbeyLUT
+ enum ColorLUTName
+ {
+ /** Color lookup table consisting of 256 colors structured in a maximally
+ * discontinuous manner. Generated using the method of Glasbey et al.
+ * (see https://github.com/taketwo/glasbey) */
+ LUT_GLASBEY,
+ /** A perceptually uniform colormap created by Stéfan van der Walt and
+ * Nathaniel Smith for the Python matplotlib library.
+ * (see https://youtu.be/xAoljeRJ3lU for background and overview) */
+ LUT_VIRIDIS,
+ };
+
+ template <ColorLUTName T>
+ class ColorLUT
{
public:
/** Get a color from the lookup table with a given id.
*
* The id should be less than the size of the LUT (see size()). */
- static RGB at (unsigned int color_id);
+ static RGB at (size_t color_id);
/** Get the number of colors in the lookup table.
*
};
+ typedef ColorLUT<pcl::LUT_GLASBEY> GlasbeyLUT;
+ typedef ColorLUT<pcl::LUT_VIRIDIS> ViridisLUT;
+
}
#endif /* PCL_COMMON_COLORS_H */
/** \brief Get the point at maximum distance from a given point and a given pointcloud
* \param cloud the point cloud data message
- * \param pivot_pt the point from where to compute the distance
* \param indices the vector of point indices to use from \a cloud
+ * \param pivot_pt the point from where to compute the distance
* \param max_pt the point in cloud that is the farthest point away from pivot_pt
* \ingroup common
*/
* \ingroup common
*/
inline Eigen::Affine3f
- getTransformationFromTwoUnitVectors (const Eigen::Vector3f& y_direction,
+ getTransformationFromTwoUnitVectors (const Eigen::Vector3f& y_direction,
const Eigen::Vector3f& z_axis);
- /** \brief Get the transformation that will translate \a orign to (0,0,0) and rotate \a z_axis into (0,0,1)
+ /** \brief Get the transformation that will translate \a origin to (0,0,0) and rotate \a z_axis into (0,0,1)
* and \a y_direction into a vector with x=0 (or into (0,1,0) should \a y_direction be orthogonal to \a z_axis)
* \param[in] y_direction the y direction
* \param[in] z_axis the z-axis
#include <math.h>
#include <string.h>
-#if !defined(__APPLE__)
-#include <malloc.h>
-#endif
-
#include <pcl/pcl_exports.h>
#ifdef __cplusplus
/// Default constructor
CloudGenerator ();
- /** Consttructor with single generator to ensure all X, Y and Z values are within same range
- * \param params paramteres for X, Y and Z values generation. Uniqueness is ensured through
+ /** Constructor with single generator to ensure all X, Y and Z values are within same range
+ * \param params parameters for X, Y and Z values generation. Uniqueness is ensured through
* seed incrementation
*/
CloudGenerator (const GeneratorParameters& params);
- /** Constructor with independant generators per axis
+ /** Constructor with independent generators per axis
* \param x_params parameters for x values generation
* \param y_params parameters for y values generation
* \param z_params parameters for z values generation
setParameters (const GeneratorParameters& params);
/** Set parameters for x values generation
- * \param x_params paramters for x values generation
+ * \param x_params parameters for x values generation
*/
void
setParametersForX (const GeneratorParameters& x_params);
/** Set parameters for y values generation
- * \param y_params paramters for y values generation
+ * \param y_params parameters for y values generation
*/
void
setParametersForY (const GeneratorParameters& y_params);
/** Set parameters for z values generation
- * \param z_params paramters for z values generation
+ * \param z_params parameters for z values generation
*/
void
setParametersForZ (const GeneratorParameters& z_params);
#endif
#include <Eigen/Core>
+#include <pcl/console/print.h>
/**
* \file common/geometry.h
float lambda = plane_normal.dot(po);
projected = point - (lambda * plane_normal);
}
+
+
+ /** \brief Given a plane defined by plane_origin and plane_normal, find the unit vector pointing from plane_origin to the projection of point on the plane.
+ *
+ * \param[in] point Point projected on the plane
+ * \param[in] plane_origin The plane origin
+ * \param[in] plane_normal The plane normal
+ * \return unit vector pointing from plane_origin to the projection of point on the plane.
+ * \ingroup geometry
+ */
+ inline Eigen::Vector3f
+ projectedAsUnitVector (Eigen::Vector3f const &point,
+ Eigen::Vector3f const &plane_origin,
+ Eigen::Vector3f const &plane_normal)
+ {
+ Eigen::Vector3f projection;
+ project (point, plane_origin, plane_normal, projection);
+ Eigen::Vector3f projected_as_unit_vector = projection - plane_origin;
+ projected_as_unit_vector.normalize ();
+ return projected_as_unit_vector;
+ }
+
+
+ /** \brief Define a random unit vector orthogonal to axis.
+ *
+ * \param[in] axis Axis
+ * \return random unit vector orthogonal to axis
+ * \ingroup geometry
+ */
+ inline Eigen::Vector3f
+ randomOrthogonalAxis (Eigen::Vector3f const &axis)
+ {
+ Eigen::Vector3f rand_ortho_axis;
+ rand_ortho_axis.setRandom();
+ if (std::abs (axis.z ()) > 1E-8f)
+ {
+ rand_ortho_axis.z () = -(axis.x () * rand_ortho_axis.x () + axis.y () * rand_ortho_axis.y ()) / axis.z ();
+ }
+ else if (std::abs (axis.y ()) > 1E-8f)
+ {
+ rand_ortho_axis.y () = -(axis.x () * rand_ortho_axis.x () + axis.z () * rand_ortho_axis.z ()) / axis.y ();
+ }
+ else if (std::abs (axis.x ()) > 1E-8f)
+ {
+ rand_ortho_axis.x () = -(axis.y () * rand_ortho_axis.y () + axis.z () * rand_ortho_axis.z ()) / axis.x ();
+ }
+ else
+ {
+ PCL_WARN ("[pcl::randomOrthogonalAxis] provided axis has norm < 1E-8f");
+ }
+
+ rand_ortho_axis.normalize ();
+ return rand_ortho_axis;
+ }
+
+
}
}
template<typename real> void
pcl::BivariatePolynomialT<real>::calculateGradient (bool forceRecalc)
{
- if (gradient_x!=NULL && !forceRecalc)
+ if (gradient_x!=NULL && !forceRecalc) return;
if (gradient_x == NULL)
gradient_x = new pcl::BivariatePolynomialT<real> (degree-1);
inline void
pcl::getMeanStd (const std::vector<float> &values, double &mean, double &stddev)
{
+ // throw an exception when the input array is empty
+ if (values.empty ())
+ {
+ PCL_THROW_EXCEPTION (BadArgumentException, "Input array must have at least 1 element.");
+ }
+
+ // when the array has only one element, mean is the number itself and standard dev is 0
+ if (values.size () == 1)
+ {
+ mean = values.at (0);
+ stddev = 0;
+ return;
+ }
+
double sum = 0, sq_sum = 0;
for (size_t i = 0; i < values.size (); ++i)
std::swap (roots (0), roots (1));
}
- if (roots (0) <= 0) // eigenval for symetric positive semi-definite matrix can not be negative! Set it to 0
+ if (roots (0) <= 0) // eigenval for symmetric positive semi-definite matrix can not be negative! Set it to 0
computeRoots2 (c2, c1, roots);
}
}
plane.coeffs () << plane_in;
plane.transform (transformation);
plane_out << plane.coeffs ();
+
+ // Versions prior to 3.3.2 don't normalize the result
+ #if !EIGEN_VERSION_AT_LEAST (3, 3, 2)
+ plane_out /= plane_out.template head<3> ().norm ();
+ #endif
}
//////////////////////////////////////////////////////////////////////////////////////////
pcl::ModelCoefficients::Ptr plane_out,
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transformation)
{
- Eigen::Matrix < Scalar, 4, 1 > v_plane_in (std::vector < Scalar > (plane_in->values.begin (), plane_in->values.end ()).data ());
+ std::vector<Scalar> values (plane_in->values.begin (), plane_in->values.end ());
+ Eigen::Matrix < Scalar, 4, 1 > v_plane_in (values.data ());
pcl::transformPlane (v_plane_in, v_plane_in, transformation);
plane_out->values.resize (4);
for (int i = 0; i < 4; i++)
if (fabs (determinant) < determinant_tolerance)
{
// det ~= 0
- PCL_DEBUG ("At least two planes are parralel.\n");
+ PCL_DEBUG ("At least two planes are parallel.\n");
return (false);
}
#include <pcl/common/transforms.h>
#include <pcl/exceptions.h>
-/////////////////////////////////////////////////////////////////////////////////////////
-/** \brief Constructor with direct computation
- * \param[in] cloud input m*n matrix (ie n vectors of R(m))
- * \param[in] basis_only flag to compute only the PCA basis
- */
-template<typename PointT>
-pcl::PCA<PointT>::PCA (const pcl::PointCloud<PointT> &cloud, bool basis_only)
-{
- Base ();
- basis_only_ = basis_only;
- setInputCloud (cloud.makeShared ());
- compute_done_ = initCompute ();
-}
-
/////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> bool
pcl::PCA<PointT>::initCompute ()
if(!Base::initCompute ())
{
PCL_THROW_EXCEPTION (InitFailedException, "[pcl::PCA::initCompute] failed");
- return (false);
}
if(indices_->size () < 3)
{
PCL_THROW_EXCEPTION (InitFailedException, "[pcl::PCA::initCompute] number of points < 3");
- return (false);
}
// Compute mean
demeanPointCloud (*input_, *indices_, mean_, cloud_demean);
assert (cloud_demean.cols () == int (indices_->size ()));
// Compute the product cloud_demean * cloud_demean^T
- Eigen::Matrix3f alpha = static_cast<Eigen::Matrix3f> (cloud_demean.topRows<3> () * cloud_demean.topRows<3> ().transpose ());
+ const Eigen::Matrix3f alpha = (1.f / (float (indices_->size ()) - 1.f))
+ * cloud_demean.topRows<3> () * cloud_demean.topRows<3> ().transpose ();
// Compute eigen vectors and values
Eigen::SelfAdjointEigenSolver<Eigen::Matrix3f> evd (alpha);
eigenvectors_.col (i) = evd.eigenvectors ().col (2-i);
}
// If not basis only then compute the coefficients
-
if (!basis_only_)
coefficients_ = eigenvectors_.transpose() * cloud_demean.topRows<3> ();
compute_done_ = true;
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2010, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef PCL_POLYNOMIAL_CALCULATIONS_HPP_
+#define PCL_POLYNOMIAL_CALCULATIONS_HPP_
+
////////////////////////////////////
template <typename real>
////////////////////////////////////
template <typename real>
-inline void
+inline void
pcl::PolynomialCalculationsT<real>::Parameters::setZeroValue (real new_zero_value)
{
zero_value = new_zero_value;
{
roots.push_back (-b/a);
}
-
+
#if 0
cout << __PRETTY_FUNCTION__ << ": Found "<<roots.size ()<<" roots.\n";
for (unsigned int i=0; i<roots.size (); i++)
alpha2 = alpha*alpha,
alpha3 = alpha2*alpha,
beta2 = beta*beta;
-
+
// Value for resubstitution:
double resubValue = b/ (3*a);
roots.push_back (-tmp1*cos (tmp2 + M_PI/3.0) - resubValue);
roots.push_back (-tmp1*cos (tmp2 - M_PI/3.0) - resubValue);
}
-
+
#if 0
cout << __PRETTY_FUNCTION__ << ": Found "<<roots.size ()<<" roots.\n";
for (unsigned int i=0; i<roots.size (); i++)
//cout << "Highest order element is 0 => Calling solveCubicEquation.\n";
solveCubicEquation (b, c, d, e, roots);
return;
- }
+ }
if (isNearlyZero (e))
{
if (!isNearlyZero (tmpRoots[i]))
roots.push_back (tmpRoots[i]);
return;
- }
+ }
double root1, root2, root3, root4,
a2 = a*a,
beta = (b3/ (8.0*a3)) - ( (b*c)/ (2.0*a2)) + (d/a),
gamma = ( (-3.0*b4)/ (256.0*a4)) + ( (c*b2)/ (16.0*a3)) - ( (b*d)/ (4.0*a2)) + (e/a),
alpha2 = alpha*alpha;
-
+
// Value for resubstitution:
double resubValue = b/ (4*a);
//cout << "Trying to solve y^4 + "<<alpha<<"y^2 + "<<beta<<"y + "<<gamma<<"\n";
-
+
if (isNearlyZero (beta))
{ // y^4 + alpha*y^2 + gamma\n";
//cout << "Using beta=0 condition\n";
y += p/ (3.0*u);
double w = alpha + 2.0*y;
-
+
if (w > 0)
{
w = sqrt (w);
double tmp1 = - (3.0*alpha + 2.0*y + 2.0* (beta/w)),
tmp2 = - (3.0*alpha + 2.0*y - 2.0* (beta/w));
-
+
if (tmp1 > 0)
{
tmp1 = sqrt (tmp1);
root3 = - (b/ (4.0*a)) - 0.5*w;
roots.push_back (root3);
}
-
+
//cout << "Test: " << alpha<<", "<<beta<<", "<<gamma<<", "<<p<<", "<<q<<", "<<u <<", "<<y<<", "<<w<<"\n";
}
-
+
#if 0
cout << __PRETTY_FUNCTION__ << ": Found "<<roots.size ()<<" roots.\n";
for (unsigned int i=0; i<roots.size (); i++)
//cout << "Searching for the "<<parameters_size<<" parameters for the bivariate polynom of degree "
// << polynomial_degree<<" using "<<samplePoints.size ()<<" points.\n";
-
+
if (parameters_size > samplePoints.size ()) // Too many parameters for this number of equations (points)?
{
- return false;
+ return false;
// Reduce degree of polynomial
//polynomial_degree = (unsigned int) (0.5f* (std::sqrt (8*samplePoints.size ()+1) - 3));
//parameters_size = BivariatePolynomialT<real>::getNoOfParametersFromDegree (polynomial_degree);
//cout << "Not enough points, so degree of polynomial was decreased to "<<polynomial_degree
// << " ("<<samplePoints.size ()<<" points => "<<parameters_size<<" parameters)\n";
}
-
+
ret.setDegree (polynomial_degree);
-
+
//double coeffStuffStartTime=-get_time ();
Eigen::Matrix<real, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> A (parameters_size, parameters_size);
A.setZero();
}
tmpX *= currentX;
}
-
+
real* APtr = &A(0,0);
real* bPtr = &b[0];
real* tmpCPtr1=tmpC;
for (unsigned int i=0; i<parameters_size; ++i)
{
* (bPtr++) += currentZ * *tmpCPtr1;
-
+
real* tmpCPtr2=tmpC;
for (unsigned int j=0; j<parameters_size; ++j)
{
* (APtr++) += *tmpCPtr1 * * (tmpCPtr2++);
}
-
+
++tmpCPtr1;
}
//A += DMatrix<real>::outProd (tmpC);
//}
//cout << "Calculating matrix A and vector b (size "<<b.size ()<<") from "<<samplePoints.size ()<<" points took "
//<< (coeffStuffStartTime+get_time ())*1000<<"ms.\n";
-
+
Eigen::Matrix<real, Eigen::Dynamic, 1> parameters;
//double choleskyStartTime=-get_time ();
//parameters = A.choleskySolve (b);
//cout << "Inverse took "<< (invStartTime+get_time ())*1000<<"ms.\n";
//cout << PVARC (A)<<PVARC (b)<<PVARN (parameters);
-
+
real inversionCheckResult = (A*parameters - b).norm ();
if (inversionCheckResult > 1e-5)
{
//cout << "Inversion result: "<< inversionCheckResult<<" for matrix "<<A<<"\n";
return false;
}
-
+
for (unsigned int i=0; i<parameters_size; i++)
ret.parameters[i] = parameters[i];
-
+
//cout << "Resulting polynomial is "<<ret<<"\n";
//Test of gradient: ret.calculateGradient ();
delete [] tmpC;
return true;
}
+
+#endif // PCL_POLYNOMIAL_CALCULATIONS_HPP_
+
*
*/
+#if defined(__SSE2__)
+#include <xmmintrin.h>
+#endif
+
+#if defined(__AVX__)
+#include <immintrin.h>
+#endif
+
+namespace pcl
+{
+
+ namespace detail
+ {
+
+ /** A helper struct to apply an SO3 or SE3 transform to a 3D point.
+ * Supports single and double precision transform matrices. */
+ template<typename Scalar>
+ struct Transformer
+ {
+ const Eigen::Matrix<Scalar, 4, 4>& tf;
+
+ /** Construct a transformer object.
+ * The transform matrix is captured by const reference. Make sure that it does not go out of scope before this
+ * object does. */
+ Transformer (const Eigen::Matrix<Scalar, 4, 4>& transform) : tf (transform) { };
+
+ /** Apply SO3 transform (top-left corner of the transform matrix).
+ * \param[in] src input 3D point (pointer to 3 floats)
+ * \param[out] tgt output 3D point (pointer to 4 floats), can be the same as input. The fourth element is set to 0. */
+ void so3 (const float* src, float* tgt) const
+ {
+ const Scalar p[3] = { src[0], src[1], src[2] }; // need this when src == tgt
+ tgt[0] = static_cast<float> (tf (0, 0) * p[0] + tf (0, 1) * p[1] + tf (0, 2) * p[2]);
+ tgt[1] = static_cast<float> (tf (1, 0) * p[0] + tf (1, 1) * p[1] + tf (1, 2) * p[2]);
+ tgt[2] = static_cast<float> (tf (2, 0) * p[0] + tf (2, 1) * p[1] + tf (2, 2) * p[2]);
+ tgt[3] = 0;
+ }
+
+ /** Apply SE3 transform.
+ * \param[in] src input 3D point (pointer to 3 floats)
+ * \param[out] tgt output 3D point (pointer to 4 floats), can be the same as input. The fourth element is set to 1. */
+ void se3 (const float* src, float* tgt) const
+ {
+ const Scalar p[3] = { src[0], src[1], src[2] }; // need this when src == tgt
+ tgt[0] = static_cast<float> (tf (0, 0) * p[0] + tf (0, 1) * p[1] + tf (0, 2) * p[2] + tf (0, 3));
+ tgt[1] = static_cast<float> (tf (1, 0) * p[0] + tf (1, 1) * p[1] + tf (1, 2) * p[2] + tf (1, 3));
+ tgt[2] = static_cast<float> (tf (2, 0) * p[0] + tf (2, 1) * p[1] + tf (2, 2) * p[2] + tf (2, 3));
+ tgt[3] = 1;
+ }
+ };
+
+#if defined(__SSE2__)
+
+ /** Optimized version for single-precision transforms using SSE2 intrinsics. */
+ template<>
+ struct Transformer<float>
+ {
+ /// Columns of the transform matrix stored in XMM registers.
+ __m128 c[4];
+
+ Transformer(const Eigen::Matrix4f& tf)
+ {
+ for (size_t i = 0; i < 4; ++i)
+ c[i] = _mm_load_ps (tf.col (i).data ());
+ }
+
+ void so3 (const float* src, float* tgt) const
+ {
+ __m128 p0 = _mm_mul_ps (_mm_load_ps1 (&src[0]), c[0]);
+ __m128 p1 = _mm_mul_ps (_mm_load_ps1 (&src[1]), c[1]);
+ __m128 p2 = _mm_mul_ps (_mm_load_ps1 (&src[2]), c[2]);
+ _mm_store_ps (tgt, _mm_add_ps(p0, _mm_add_ps(p1, p2)));
+ }
+
+ void se3 (const float* src, float* tgt) const
+ {
+ __m128 p0 = _mm_mul_ps (_mm_load_ps1 (&src[0]), c[0]);
+ __m128 p1 = _mm_mul_ps (_mm_load_ps1 (&src[1]), c[1]);
+ __m128 p2 = _mm_mul_ps (_mm_load_ps1 (&src[2]), c[2]);
+ _mm_store_ps (tgt, _mm_add_ps(p0, _mm_add_ps(p1, _mm_add_ps(p2, c[3]))));
+ }
+ };
+
+#if !defined(__AVX__)
+
+ /** Optimized version for double-precision transform using SSE2 intrinsics. */
+ template<>
+ struct Transformer<double>
+ {
+ /// Columns of the transform matrix stored in XMM registers.
+ __m128d c[4][2];
+
+ Transformer(const Eigen::Matrix4d& tf)
+ {
+ for (size_t i = 0; i < 4; ++i)
+ {
+ c[i][0] = _mm_load_pd (tf.col (i).data () + 0);
+ c[i][1] = _mm_load_pd (tf.col (i).data () + 2);
+ }
+ }
+
+ void so3 (const float* src, float* tgt) const
+ {
+ __m128d xx = _mm_cvtps_pd (_mm_load_ps1 (&src[0]));
+ __m128d p0 = _mm_mul_pd (xx, c[0][0]);
+ __m128d p1 = _mm_mul_pd (xx, c[0][1]);
+
+ for (size_t i = 1; i < 3; ++i)
+ {
+ __m128d vv = _mm_cvtps_pd (_mm_load_ps1 (&src[i]));
+ p0 = _mm_add_pd (_mm_mul_pd (vv, c[i][0]), p0);
+ p1 = _mm_add_pd (_mm_mul_pd (vv, c[i][1]), p1);
+ }
+
+ _mm_store_ps (tgt, _mm_movelh_ps (_mm_cvtpd_ps (p0), _mm_cvtpd_ps (p1)));
+ }
+
+ void se3 (const float* src, float* tgt) const
+ {
+ __m128d p0 = c[3][0];
+ __m128d p1 = c[3][1];
+
+ for (size_t i = 0; i < 3; ++i)
+ {
+ __m128d vv = _mm_cvtps_pd (_mm_load_ps1 (&src[i]));
+ p0 = _mm_add_pd (_mm_mul_pd (vv, c[i][0]), p0);
+ p1 = _mm_add_pd (_mm_mul_pd (vv, c[i][1]), p1);
+ }
+
+ _mm_store_ps (tgt, _mm_movelh_ps (_mm_cvtpd_ps (p0), _mm_cvtpd_ps (p1)));
+ }
+
+ };
+
+#else
+
+ /** Optimized version for double-precision transform using AVX intrinsics. */
+ template<>
+ struct Transformer<double>
+ {
+ __m256d c[4];
+
+ Transformer(const Eigen::Matrix4d& tf)
+ {
+ for (size_t i = 0; i < 4; ++i)
+ c[i] = _mm256_load_pd (tf.col (i).data ());
+ }
+
+ void so3 (const float* src, float* tgt) const
+ {
+ __m256d p0 = _mm256_mul_pd (_mm256_cvtps_pd (_mm_load_ps1 (&src[0])), c[0]);
+ __m256d p1 = _mm256_mul_pd (_mm256_cvtps_pd (_mm_load_ps1 (&src[1])), c[1]);
+ __m256d p2 = _mm256_mul_pd (_mm256_cvtps_pd (_mm_load_ps1 (&src[2])), c[2]);
+ _mm_store_ps (tgt, _mm256_cvtpd_ps (_mm256_add_pd(p0, _mm256_add_pd(p1, p2))));
+ }
+
+ void se3 (const float* src, float* tgt) const
+ {
+ __m256d p0 = _mm256_mul_pd (_mm256_cvtps_pd (_mm_load_ps1 (&src[0])), c[0]);
+ __m256d p1 = _mm256_mul_pd (_mm256_cvtps_pd (_mm_load_ps1 (&src[1])), c[1]);
+ __m256d p2 = _mm256_mul_pd (_mm256_cvtps_pd (_mm_load_ps1 (&src[2])), c[2]);
+ _mm_store_ps (tgt, _mm256_cvtpd_ps (_mm256_add_pd(p0, _mm256_add_pd(p1, _mm256_add_pd(p2, c[3])))));
+ }
+
+ };
+
+#endif
+#endif
+
+ }
+
+}
+
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename Scalar> void
pcl::transformPointCloud (const pcl::PointCloud<PointT> &cloud_in,
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
}
+ pcl::detail::Transformer<Scalar> tf (transform.matrix ());
if (cloud_in.is_dense)
{
// If the dataset is dense, simply transform it!
for (size_t i = 0; i < cloud_out.points.size (); ++i)
- {
- //cloud_out.points[i].getVector3fMap () = transform * cloud_in.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[i].x, cloud_in[i].y, cloud_in[i].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
- }
+ tf.se3 (cloud_in[i].data, cloud_out[i].data);
}
else
{
!pcl_isfinite (cloud_in.points[i].y) ||
!pcl_isfinite (cloud_in.points[i].z))
continue;
- //cloud_out.points[i].getVector3fMap () = transform * cloud_in.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[i].x, cloud_in[i].y, cloud_in[i].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
+ tf.se3 (cloud_in[i].data, cloud_out[i].data);
}
}
}
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
+ pcl::detail::Transformer<Scalar> tf (transform.matrix ());
if (cloud_in.is_dense)
{
// If the dataset is dense, simply transform it!
// Copy fields first, then transform xyz data
if (copy_all_fields)
cloud_out.points[i] = cloud_in.points[indices[i]];
- //cloud_out.points[i].getVector3fMap () = transform*cloud_out.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[indices[i]].x, cloud_in[indices[i]].y, cloud_in[indices[i]].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
+ tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
}
}
else
!pcl_isfinite (cloud_in.points[indices[i]].y) ||
!pcl_isfinite (cloud_in.points[indices[i]].z))
continue;
- //cloud_out.points[i].getVector3fMap () = transform*cloud_out.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[indices[i]].x, cloud_in[indices[i]].y, cloud_in[indices[i]].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
+ tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
}
}
}
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
}
+ pcl::detail::Transformer<Scalar> tf (transform.matrix ());
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
for (size_t i = 0; i < cloud_out.points.size (); ++i)
{
- //cloud_out.points[i].getVector3fMap() = transform * cloud_in.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[i].x, cloud_in[i].y, cloud_in[i].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
-
- // Rotate normals (WARNING: transform.rotation () uses SVD internally!)
- //cloud_out.points[i].getNormalVector3fMap() = transform.rotation () * cloud_in.points[i].getNormalVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> nt (cloud_in[i].normal_x, cloud_in[i].normal_y, cloud_in[i].normal_z);
- cloud_out[i].normal_x = static_cast<float> (transform (0, 0) * nt.coeffRef (0) + transform (0, 1) * nt.coeffRef (1) + transform (0, 2) * nt.coeffRef (2));
- cloud_out[i].normal_y = static_cast<float> (transform (1, 0) * nt.coeffRef (0) + transform (1, 1) * nt.coeffRef (1) + transform (1, 2) * nt.coeffRef (2));
- cloud_out[i].normal_z = static_cast<float> (transform (2, 0) * nt.coeffRef (0) + transform (2, 1) * nt.coeffRef (1) + transform (2, 2) * nt.coeffRef (2));
+ tf.se3 (cloud_in[i].data, cloud_out[i].data);
+ tf.so3 (cloud_in[i].data_n, cloud_out[i].data_n);
}
}
// Dataset might contain NaNs and Infs, so check for them first.
!pcl_isfinite (cloud_in.points[i].y) ||
!pcl_isfinite (cloud_in.points[i].z))
continue;
-
- //cloud_out.points[i].getVector3fMap() = transform * cloud_in.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[i].x, cloud_in[i].y, cloud_in[i].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
-
- // Rotate normals
- //cloud_out.points[i].getNormalVector3fMap() = transform.rotation () * cloud_in.points[i].getNormalVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> nt (cloud_in[i].normal_x, cloud_in[i].normal_y, cloud_in[i].normal_z);
- cloud_out[i].normal_x = static_cast<float> (transform (0, 0) * nt.coeffRef (0) + transform (0, 1) * nt.coeffRef (1) + transform (0, 2) * nt.coeffRef (2));
- cloud_out[i].normal_y = static_cast<float> (transform (1, 0) * nt.coeffRef (0) + transform (1, 1) * nt.coeffRef (1) + transform (1, 2) * nt.coeffRef (2));
- cloud_out[i].normal_z = static_cast<float> (transform (2, 0) * nt.coeffRef (0) + transform (2, 1) * nt.coeffRef (1) + transform (2, 2) * nt.coeffRef (2));
+ tf.se3 (cloud_in[i].data, cloud_out[i].data);
+ tf.so3 (cloud_in[i].data_n, cloud_out[i].data_n);
}
}
}
cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
+ pcl::detail::Transformer<Scalar> tf (transform.matrix ());
// If the data is dense, we don't need to check for NaN
if (cloud_in.is_dense)
{
// Copy fields first, then transform
if (copy_all_fields)
cloud_out.points[i] = cloud_in.points[indices[i]];
- //cloud_out.points[i].getVector3fMap() = transform * cloud_in.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[indices[i]].x, cloud_in[indices[i]].y, cloud_in[indices[i]].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
-
- // Rotate normals
- //cloud_out.points[i].getNormalVector3fMap() = transform.rotation () * cloud_in.points[i].getNormalVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> nt (cloud_in[indices[i]].normal_x, cloud_in[indices[i]].normal_y, cloud_in[indices[i]].normal_z);
- cloud_out[i].normal_x = static_cast<float> (transform (0, 0) * nt.coeffRef (0) + transform (0, 1) * nt.coeffRef (1) + transform (0, 2) * nt.coeffRef (2));
- cloud_out[i].normal_y = static_cast<float> (transform (1, 0) * nt.coeffRef (0) + transform (1, 1) * nt.coeffRef (1) + transform (1, 2) * nt.coeffRef (2));
- cloud_out[i].normal_z = static_cast<float> (transform (2, 0) * nt.coeffRef (0) + transform (2, 1) * nt.coeffRef (1) + transform (2, 2) * nt.coeffRef (2));
+ tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
+ tf.so3 (cloud_in[indices[i]].data_n, cloud_out[i].data_n);
}
}
// Dataset might contain NaNs and Infs, so check for them first.
!pcl_isfinite (cloud_in.points[indices[i]].z))
continue;
- //cloud_out.points[i].getVector3fMap() = transform * cloud_in.points[i].getVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> pt (cloud_in[indices[i]].x, cloud_in[indices[i]].y, cloud_in[indices[i]].z);
- cloud_out[i].x = static_cast<float> (transform (0, 0) * pt.coeffRef (0) + transform (0, 1) * pt.coeffRef (1) + transform (0, 2) * pt.coeffRef (2) + transform (0, 3));
- cloud_out[i].y = static_cast<float> (transform (1, 0) * pt.coeffRef (0) + transform (1, 1) * pt.coeffRef (1) + transform (1, 2) * pt.coeffRef (2) + transform (1, 3));
- cloud_out[i].z = static_cast<float> (transform (2, 0) * pt.coeffRef (0) + transform (2, 1) * pt.coeffRef (1) + transform (2, 2) * pt.coeffRef (2) + transform (2, 3));
-
- // Rotate normals
- //cloud_out.points[i].getNormalVector3fMap() = transform.rotation () * cloud_in.points[i].getNormalVector3fMap ();
- Eigen::Matrix<Scalar, 3, 1> nt (cloud_in[indices[i]].normal_x, cloud_in[indices[i]].normal_y, cloud_in[indices[i]].normal_z);
- cloud_out[i].normal_x = static_cast<float> (transform (0, 0) * nt.coeffRef (0) + transform (0, 1) * nt.coeffRef (1) + transform (0, 2) * nt.coeffRef (2));
- cloud_out[i].normal_y = static_cast<float> (transform (1, 0) * nt.coeffRef (0) + transform (1, 1) * nt.coeffRef (1) + transform (1, 2) * nt.coeffRef (2));
- cloud_out[i].normal_z = static_cast<float> (transform (2, 0) * nt.coeffRef (0) + transform (2, 1) * nt.coeffRef (1) + transform (2, 2) * nt.coeffRef (2));
+ tf.se3 (cloud_in[indices[i]].data, cloud_out[i].data);
+ tf.so3 (cloud_in[indices[i]].data_n, cloud_out[i].data_n);
}
}
}
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform)
{
PointT ret = point;
- ret.getVector3fMap () = transform * point.getVector3fMap ();
-
+ pcl::detail::Transformer<Scalar> tf (transform.matrix ());
+ tf.se3 (point.data, ret.data);
return (ret);
}
const Eigen::Transform<Scalar, 3, Eigen::Affine> &transform)
{
PointT ret = point;
- ret.getVector3fMap () = transform * point.getVector3fMap ();
- ret.getNormalVector3fMap () = transform.rotation () * point.getNormalVector3fMap ();
-
+ pcl::detail::Transformer<Scalar> tf (transform.matrix ());
+ tf.se3 (point.data, ret.data);
+ tf.so3 (point.data_n, ret.data_n);
return (ret);
}
{
namespace common
{
- /** \brief Intensity field accessor provides access to the inetnsity filed of a PoinT
+ /** \brief Intensity field accessor provides access to the intensity filed of a PoinT
* implementation for specific types should be done in \file pcl/common/impl/intensity.hpp
*/
template<typename PointT>
p.intensity = intensity;
}
/** \brief subtract value from intensity field
- * \param p point for which to modify inetnsity
+ * \param p point for which to modify intensity
* \param[in] value value to be subtracted from point intensity
*/
inline void
p.intensity -= value;
}
/** \brief add value to intensity field
- * \param p point for which to modify inetnsity
+ * \param p point for which to modify intensity
* \param[in] value value to be added to point intensity
*/
inline void
}
/** \brief Determine the point of intersection of three non-parallel planes by solving the equations.
- * \note If using nearly parralel planes you can lower the determinant_tolerance value. This can
+ * \note If using nearly parallel planes you can lower the determinant_tolerance value. This can
* lead to inconsistent results.
* If the three planes intersects in a line the point will be anywhere on the line.
* \param[in] plane_a are the coefficients of the first plane in the form ax + by + cz + d = 0
namespace pcl
{
/** \brief Get the index of a specified field (i.e., dimension/channel)
- * \param[in] cloud the the point cloud message
+ * \param[in] cloud the point cloud message
* \param[in] field_name the string defining the field name
* \ingroup common
*/
}
/** \brief Get the index of a specified field (i.e., dimension/channel)
- * \param[in] cloud the the point cloud message
+ * \param[in] cloud the point cloud message
* \param[in] field_name the string defining the field name
* \param[out] fields a vector to the original \a PCLPointField vector that the raw PointCloud message contains
* \ingroup common
getFields (std::vector<pcl::PCLPointField> &fields);
/** \brief Get the list of all fields available in a given cloud
- * \param[in] cloud the the point cloud message
+ * \param[in] cloud the point cloud message
* \ingroup common
*/
template <typename PointT> inline std::string
return (pcl::PCLPointField::INT8);
if (type == 'U')
return (pcl::PCLPointField::UINT8);
+ break;
case 2:
if (type == 'I')
return (pcl::PCLPointField::INT16);
if (type == 'U')
return (pcl::PCLPointField::UINT16);
+ break;
case 4:
if (type == 'I')
return (pcl::PCLPointField::UINT32);
if (type == 'F')
return (pcl::PCLPointField::FLOAT32);
+ break;
case 8:
- return (pcl::PCLPointField::FLOAT64);
-
- default:
- return (-1);
+ if (type == 'F')
+ return (pcl::PCLPointField::FLOAT64);
+ break;
}
+ return (-1);
}
/** \brief Obtains the type of the PCLPointField from a specific PCLPointField as a char
* BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
- * BORDER_TRANSPARENT: mnopqr|abcdefgh|tuvwxyz where m-r and t-z are orignal values of cloud_out
+ * BORDER_TRANSPARENT: mnopqr|abcdefgh|tuvwxyz where m-r and t-z are original values of cloud_out
* \param value
* \throw pcl::BadArgumentException if any of top, bottom, left or right is negative.
* \ingroup common
, mean_ ()
, eigenvalues_ ()
{}
-
- /** \brief Constructor with direct computation
- * X input m*n matrix (ie n vectors of R(m))
- * basis_only flag to compute only the PCA basis
- */
- PCL_DEPRECATED ("Use PCA (bool basis_only); setInputCloud (X.makeShared ()); instead")
- PCA (const pcl::PointCloud<PointT>& X, bool basis_only = false);
/** Copy Constructor
* \param[in] pca PCA object
*/
inline void
reconstruct (const PointCloud& projection, PointCloud& input);
-
private:
inline bool
initCompute ();
namespace pcl
{
/**
- * \brief calculate 3D transformation based on point correspondencdes
+ * \brief calculate 3D transformation based on point correspondences
* \author Bastian Steder
* \ingroup common
*/
* custom values.
* \param[in] input the input point cloud
* \param[out] output the output point cloud
- * \param[in] val the point value to be insterted
+ * \param[in] val the point value to be inserted
* \param[in] amount the amount of rows to be added
*/
template <typename PointT> void
* custom values.
* \param[in] input the input point cloud
* \param[out] output the output point cloud
- * \param[in] val the point value to be insterted
+ * \param[in] val the point value to be inserted
* \param[in] amount the amount of columns to be added
*/
template <typename PointT> void
stop_watch_.reset ();
}
- /** \brief Notifies the class that the event occured. */
+ /** \brief Notifies the class that the event occurred. */
void event ()
{
event_time_queue_.push (stop_watch_.getTimeSeconds ());
/** \brief Destructor. */
~TimeTrigger ();
- /** \brief registeres a callback
+ /** \brief registers a callback
* \param[in] callback callback function to the list of callbacks. signature has to be boost::function<void()>
* \return connection the connection, which can be used to disable/enable and remove callback from list
*/
{
namespace utils
{
- /** \brief Check if val1 and val2 are equals to an epsilon extent
+ /** \brief Check if val1 and val2 are equal to an epsilon extent
* \param[in] val1 first number to check
* \param[in] val2 second number to check
* \param[in] eps epsilon
* \ingroup common
*/
template <typename real, int dimension>
- class VectorAverage
+ class VectorAverage
{
public:
//-----CONSTRUCTOR&DESTRUCTOR-----
/** Get the eigenvector corresponding to the smallest eigenvalue */
inline void
getEigenVector1 (Eigen::Matrix<real, dimension, 1>& eigen_vector1) const;
+
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
//-----VARIABLES-----
+
protected:
//-----METHODS-----
* \note find_switch is simply returning find_argument != -1.
*/
PCL_EXPORTS bool
- find_switch (int argc, char** argv, const char* argument_name);
+ find_switch (int argc, const char * const * argv, const char * argument_name);
/** \brief Finds the position of the argument with name "argument_name" in the argument list "argv"
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- find_argument (int argc, char** argv, const char* argument_name);
+ find_argument (int argc, const char * const * argv, const char * argument_name);
/** \brief Template version for parsing arguments. Template parameter needs to have input stream operator overloaded!
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
template<typename Type> int
- parse (int argc, char** argv, const char* argument_name, Type& value)
+ parse (int argc, const char * const * argv, const char * argument_name, Type& value)
{
int index = find_argument (argc, argv, argument_name) + 1;
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_argument (int argc, char** argv, const char* str, std::string &val);
+ parse_argument (int argc, const char * const * argv, const char * str, std::string &val);
/** \brief Parse for a specific given command line argument.
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_argument (int argc, char** argv, const char* str, bool &val);
+ parse_argument (int argc, const char * const * argv, const char * str, bool &val);
/** \brief Parse for a specific given command line argument.
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_argument (int argc, char** argv, const char* str, float &val);
+ parse_argument (int argc, const char * const * argv, const char * str, float &val);
/** \brief Parse for a specific given command line argument.
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_argument (int argc, char** argv, const char* str, double &val);
+ parse_argument (int argc, const char * const * argv, const char * str, double &val);
/** \brief Parse for a specific given command line argument.
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_argument (int argc, char** argv, const char* str, int &val);
+ parse_argument (int argc, const char * const * argv, const char * str, int &val);
/** \brief Parse for a specific given command line argument.
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_argument (int argc, char** argv, const char* str, unsigned int &val);
+ parse_argument (int argc, const char * const * argv, const char * str, unsigned int &val);
/** \brief Parse for a specific given command line argument.
* \param[in] argc the number of command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_argument (int argc, char** argv, const char* str, char &val);
+ parse_argument (int argc, const char * const * argv, const char * str, char &val);
/** \brief Parse for specific given command line arguments (2x values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_2x_arguments (int argc, char** argv, const char* str, float &f, float &s, bool debug = true);
+ parse_2x_arguments (int argc, const char * const * argv, const char * str, float &f, float &s, bool debug = true);
/** \brief Parse for specific given command line arguments (2x values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_2x_arguments (int argc, char** argv, const char* str, double &f, double &s, bool debug = true);
+ parse_2x_arguments (int argc, const char * const * argv, const char * str, double &f, double &s, bool debug = true);
/** \brief Parse for specific given command line arguments (2x values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_2x_arguments (int argc, char** argv, const char* str, int &f, int &s, bool debug = true);
+ parse_2x_arguments (int argc, const char * const * argv, const char * str, int &f, int &s, bool debug = true);
/** \brief Parse for specific given command line arguments (3x values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_3x_arguments (int argc, char** argv, const char* str, float &f, float &s, float &t, bool debug = true);
+ parse_3x_arguments (int argc, const char * const * argv, const char * str, float &f, float &s, float &t, bool debug = true);
/** \brief Parse for specific given command line arguments (3x values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_3x_arguments (int argc, char** argv, const char* str, double &f, double &s, double &t, bool debug = true);
+ parse_3x_arguments (int argc, const char * const * argv, const char * str, double &f, double &s, double &t, bool debug = true);
/** \brief Parse for specific given command line arguments (3x values comma
* separated).
* return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_3x_arguments (int argc, char** argv, const char* str, int &f, int &s, int &t, bool debug = true);
+ parse_3x_arguments (int argc, const char * const * argv, const char * str, int &f, int &s, int &t, bool debug = true);
/** \brief Parse for specific given command line arguments (3x values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_x_arguments (int argc, char** argv, const char* str, std::vector<double>& v);
+ parse_x_arguments (int argc, const char * const * argv, const char * str, std::vector<double>& v);
/** \brief Parse for specific given command line arguments (N values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_x_arguments (int argc, char** argv, const char* str, std::vector<float>& v);
+ parse_x_arguments (int argc, const char * const * argv, const char * str, std::vector<float>& v);
/** \brief Parse for specific given command line arguments (N values comma
* separated).
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS int
- parse_x_arguments (int argc, char** argv, const char* str, std::vector<int>& v);
+ parse_x_arguments (int argc, const char * const * argv, const char * str, std::vector<int>& v);
- /** \brief Parse for specific given command line arguments (multiple occurences
+ /** \brief Parse for specific given command line arguments (multiple occurrences
* of the same command line parameter).
* \param[in] argc the number of command line arguments
* \param[in] argv the command line arguments
* \return index of found argument or -1 if arguments do not appear in list
*/
PCL_EXPORTS bool
- parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<int> &values);
+ parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<int> &values);
- /** \brief Parse for specific given command line arguments (multiple occurences
+ /** \brief Parse for specific given command line arguments (multiple occurrences
* of the same command line parameter).
* \param[in] argc the number of command line arguments
* \param[in] argv the command line arguments
* \return true if found, false otherwise
*/
PCL_EXPORTS bool
- parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<float> &values);
+ parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<float> &values);
- /** \brief Parse for specific given command line arguments (multiple occurences
+ /** \brief Parse for specific given command line arguments (multiple occurrences
* of the same command line parameter).
* \param[in] argc the number of command line arguments
* \param[in] argv the command line arguments
* \return true if found, false otherwise
*/
PCL_EXPORTS bool
- parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<double> &values);
+ parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<double> &values);
- /** \brief Parse for a specific given command line argument (multiple occurences
+ /** \brief Parse for a specific given command line argument (multiple occurrences
* of the same command line parameter).
* \param[in] argc the number of command line arguments
* \param[in] argv the command line arguments
* \return true if found, false otherwise
*/
PCL_EXPORTS bool
- parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<std::string> &values);
+ parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<std::string> &values);
- /** \brief Parse command line arguments for file names with given extension (multiple occurences
+ /** \brief Parse command line arguments for file names with given extension (multiple occurrences
* of 2x argument groups, separated by commas).
* \param[in] argc the number of command line arguments
* \param[in] argv the command line arguments
* \return true if found, false otherwise
*/
PCL_EXPORTS bool
- parse_multiple_2x_arguments (int argc, char** argv, const char* str,
+ parse_multiple_2x_arguments (int argc, const char * const * argv, const char * str,
std::vector<double> &values_f,
std::vector<double> &values_s);
- /** \brief Parse command line arguments for file names with given extension (multiple occurences
+ /** \brief Parse command line arguments for file names with given extension (multiple occurrences
* of 3x argument groups, separated by commas).
* \param[in] argc the number of command line arguments
* \param[in] argv the command line arguments
* \return true if found, false otherwise
*/
PCL_EXPORTS bool
- parse_multiple_3x_arguments (int argc, char** argv, const char* str,
+ parse_multiple_3x_arguments (int argc, const char * const * argv, const char * str,
std::vector<double> &values_f,
std::vector<double> &values_s,
std::vector<double> &values_t);
+ /** \brief Parse command line arguments for file names with given extension vector
+ * \param[in] argc the number of command line arguments
+ * \param[in] argv the command line arguments
+ * \param[in] extensions the extensions to search for
+ * \return a vector with file names indices
+ */
+ PCL_EXPORTS std::vector<int>
+ parse_file_extension_argument (int argc, const char * const * argv,
+ const std::vector<std::string> &extensions);
+
/** \brief Parse command line arguments for file names with given extension
* \param[in] argc the number of command line arguments
* \param[in] argv the command line arguments
* \return a vector with file names indices
*/
PCL_EXPORTS std::vector<int>
- parse_file_extension_argument (int argc, char** argv, const std::string &ext);
+ parse_file_extension_argument (int argc, const char * const * argv, const std::string &ext);
}
}
namespace pcl
{
/** \brief Correspondence represents a match between two entities (e.g., points, descriptors, etc). This is
- * represesented via the indices of a \a source point and a \a target point, and the distance between them.
+ * represented via the indices of a \a source point and a \a target point, and the distance between them.
*
* \author Dirk Holz, Radu B. Rusu, Bastian Steder
* \ingroup common
* By default (true), vectors are internally sorted before determining their difference.
* If the order of correspondences in \a correspondences_after is not different (has not been changed)
* from the order in \b correspondences_before this pre-processing step can be disabled
- * in order to gain efficiency. In order to disable pre-sorting set \a presorting_requered to false.
+ * in order to gain efficiency. In order to disable pre-sorting set \a presorting_required to false.
*/
void
getRejectedQueryIndices (const pcl::Correspondences &correspondences_before,
/**
* \brief Representation of a (possible) correspondence between two points (e.g. from feature matching),
- * that encode complete 6DOF transoformations.
+ * that encode complete 6DOF transformations.
* \ingroup common
*/
struct PointCorrespondence6D : public PointCorrespondence3D
} ;
/** \class IsNotDenseException
- * \brief An exception that is thrown when a PointCloud is not dense but is attemped to be used as dense
+ * \brief An exception that is thrown when a PointCloud is not dense but is attempted to be used as dense
*/
class IsNotDenseException : public PCLException
{
};
/** \class BadArgumentException
- * \brief An exception that is thrown when the argments number or type is wrong/unhandled.
+ * \brief An exception that is thrown when the arguments number or type is wrong/unhandled.
*/
class BadArgumentException : public PCLException
{
#ifdef PCL_NO_PRECOMPILE
-#define PCL_INSTANTIATE_PRODUCT_IMPL(r, product)
#define PCL_INSTANTIATE_IMPL(r, TEMPLATE, POINT_TYPE)
#define PCL_INSTANTIATE(TEMPLATE, POINT_TYPES)
#define PCL_INSTANTIATE_PRODUCT_IMPL(r, product)
(pcl::NormalBasedSignature12) \
(pcl::FPFHSignature33) \
(pcl::VFHSignature308) \
+ (pcl::GASDSignature512) \
+ (pcl::GASDSignature984) \
+ (pcl::GASDSignature7992) \
(pcl::GRSDSignature21) \
(pcl::ESFSignature640) \
(pcl::BRISKSignature512) \
(pcl::NormalBasedSignature12) \
(pcl::FPFHSignature33) \
(pcl::VFHSignature308) \
+ (pcl::GASDSignature512) \
+ (pcl::GASDSignature984) \
+ (pcl::GASDSignature7992) \
(pcl::GRSDSignature21) \
(pcl::ESFSignature640) \
(pcl::BRISKSignature512) \
r = g = b = 0;
a = 255;
}
-
+
+ inline RGB (uint8_t _r, uint8_t _g, uint8_t _b)
+ {
+ r = _r;
+ g = _g;
+ b = _b;
+ a = 255;
+ }
+
friend std::ostream& operator << (std::ostream& os, const RGB& p);
};
-
struct _Intensity
{
PCL_EXPORTS std::ostream& operator << (std::ostream& os, const Intensity32u& p);
/** \brief A point structure representing the grayscale intensity in single-channel images.
- * Intensity is represented as a uint8_t value.
+ * Intensity is represented as a uint32_t value.
* \ingroup common
*/
struct Intensity32u: public _Intensity32u
data[3] = 1.0f;
r = g = b = 0;
a = 255;
- label = 255;
+ label = 0;
}
inline PointXYZRGBL (uint8_t _r, uint8_t _g, uint8_t _b, uint32_t _label)
{
friend std::ostream& operator << (std::ostream& os, const ESFSignature640& p);
};
+ PCL_EXPORTS std::ostream& operator << (std::ostream& os, const GASDSignature512& p);
+ /** \brief A point structure representing the Globally Aligned Spatial Distribution (GASD) shape descriptor.
+ * \ingroup common
+ */
+ struct GASDSignature512
+ {
+ float histogram[512];
+ static int descriptorSize() { return 512; }
+
+ friend std::ostream& operator << (std::ostream& os, const GASDSignature512& p);
+ };
+
+ PCL_EXPORTS std::ostream& operator << (std::ostream& os, const GASDSignature984& p);
+ /** \brief A point structure representing the Globally Aligned Spatial Distribution (GASD) shape and color descriptor.
+ * \ingroup common
+ */
+ struct GASDSignature984
+ {
+ float histogram[984];
+ static int descriptorSize() { return 984; }
+
+ friend std::ostream& operator << (std::ostream& os, const GASDSignature984& p);
+ };
+
+ PCL_EXPORTS std::ostream& operator << (std::ostream& os, const GASDSignature7992& p);
+ /** \brief A point structure representing the Globally Aligned Spatial Distribution (GASD) shape and color descriptor.
+ * \ingroup common
+ */
+ struct GASDSignature7992
+ {
+ float histogram[7992];
+ static int descriptorSize() { return 7992; }
+
+ friend std::ostream& operator << (std::ostream& os, const GASDSignature7992& p);
+ };
+
PCL_EXPORTS std::ostream& operator << (std::ostream& os, const GFPFHSignature16& p);
/** \brief A point structure representing the GFPFH descriptor with 16 bins.
* \ingroup common
union
{
- /** \brief Diameter of the meaningfull keypoint neighborhood. */
+ /** \brief Diameter of the meaningful keypoint neighborhood. */
float scale;
float size;
};
{
inline PointDEM (const _PointDEM &p)
{
- x = p.x; y = p.y; x = p.z; data[3] = 1.0f;
+ x = p.x; y = p.y; z = p.z; data[3] = 1.0f;
intensity = p.intensity;
intensity_variance = p.intensity_variance;
height_variance = p.height_variance;
typedef boost::shared_ptr <const std::vector<int> > IndicesConstPtr;
/////////////////////////////////////////////////////////////////////////////////////////
- /** \brief PCL base class. Implements methods that are used by most PCL algorithms.
- * \ingroup common
+ /** \brief PCL base class. Implements methods that are used by most PCL algorithms.
+ * \ingroup common
*/
template <typename PointT>
class PCLBase
/** \brief Empty constructor. */
PCLBase ();
-
+
/** \brief Copy constructor. */
PCLBase (const PCLBase& base);
input_.reset ();
indices_.reset ();
}
-
+
/** \brief Provide a pointer to the input dataset
* \param[in] cloud the const boost shared pointer to a PointCloud message
*/
- virtual void
+ virtual void
setInputCloud (const PointCloudConstPtr &cloud);
/** \brief Get a pointer to the input point cloud dataset. */
- inline PointCloudConstPtr const
+ inline PointCloudConstPtr const
getInputCloud () const { return (input_); }
/** \brief Provide a pointer to the vector of indices that represents the input data.
virtual void
setIndices (const PointIndicesConstPtr &indices);
- /** \brief Set the indices for the points laying within an interest region of
+ /** \brief Set the indices for the points laying within an interest region of
* the point cloud.
* \note you shouldn't call this method on unorganized point clouds!
* \param[in] row_start the offset on rows
* \param[in] nb_rows the number of rows to be considered row_start included
* \param[in] nb_cols the number of columns to be considered col_start included
*/
- virtual void
+ virtual void
setIndices (size_t row_start, size_t col_start, size_t nb_rows, size_t nb_cols);
/** \brief Get a pointer to the vector of indices used. */
- inline IndicesPtr const
+ inline IndicesPtr const
getIndices () { return (indices_); }
/** \brief Get a pointer to the vector of indices used. */
- inline IndicesConstPtr const
+ inline IndicesConstPtr const
getIndices () const { return (indices_); }
/** \brief Override PointCloud operator[] to shorten code
* or input_->points[(*indices_)[pos]]
* \param[in] pos position in indices_ vector
*/
- inline const PointT& operator[] (size_t pos) const
+ inline const PointT& operator[] (size_t pos) const
{
return ((*input_)[(*indices_)[pos]]);
}
/** \brief If no set of indices are given, we construct a set of fake indices that mimic the input PointCloud. */
bool fake_indices_;
- /** \brief This method should get called before starting the actual computation.
+ /** \brief This method should get called before starting the actual computation.
*
* Internally, initCompute() does the following:
* - checks if an input dataset is given, and returns false otherwise
* - checks whether a set of input indices has been given. Returns true if yes.
* - if no input indices have been given, a fake set is created, which will be used until:
- * - either a new set is given via setIndices(), or
+ * - either a new set is given via setIndices(), or
* - a new cloud is given that has a different set of points. This will trigger an update on the set of fake indices
*/
bool
initCompute ();
- /** \brief This method should get called after finishing the actual computation.
+ /** \brief This method should get called after finishing the actual computation.
*/
bool
deinitCompute ();
/** \brief Empty constructor. */
PCLBase ();
-
+
/** \brief destructor. */
virtual ~PCLBase()
{
/** \brief Provide a pointer to the input dataset
* \param cloud the const boost shared pointer to a PointCloud message
*/
- void
+ void
setInputCloud (const PCLPointCloud2ConstPtr &cloud);
/** \brief Get a pointer to the input point cloud dataset. */
- inline PCLPointCloud2ConstPtr const
- getInputCloud () { return (input_); }
+ inline PCLPointCloud2ConstPtr const
+ getInputCloud () const { return (input_); }
/** \brief Provide a pointer to the vector of indices that represents the input data.
* \param[in] indices a pointer to the indices that represent the input data.
setIndices (const PointIndicesConstPtr &indices);
/** \brief Get a pointer to the vector of indices used. */
- inline IndicesPtr const
- getIndices () { return (indices_); }
+ inline IndicesPtr const
+ getIndices () const { return (indices_); }
protected:
/** \brief The input point cloud dataset. */
#define PCL_EXPORTS_H_
// This header is created to include to NVCC compiled sources.
-// Header 'pcl_macros' is not suitable since it inludes <Eigen/Core>,
+// Header 'pcl_macros' is not suitable since it includes <Eigen/Core>,
// which can't be eaten by nvcc (it's too weak)
#if defined WIN32 || defined _WIN32 || defined WINCE || defined __MINGW32__
#define PCLAPI(rettype) PCL_EXTERN_C PCL_EXPORTS rettype PCL_CDECL
#endif
+// Macro for pragma operator
+#if (defined (__GNUC__) || defined(__clang__))
+ #define PCL_PRAGMA(x) _Pragma (#x)
+#elif _MSC_VER
+ #define PCL_PRAGMA(x) __pragma (#x)
+#else
+ #define PCL_PRAGMA
+#endif
+
+// Macro for emitting pragma warning
+#if (defined (__GNUC__) || defined(__clang__))
+ #define PCL_PRAGMA_WARNING(x) PCL_PRAGMA (GCC warning x)
+#elif _MSC_VER
+ #define PCL_PRAGMA_WARNING(x) PCL_PRAGMA (warning (x))
+#else
+ #define PCL_PRAGMA_WARNING
+#endif
+
+
// Macro to deprecate old functions
//
// Usage:
#define __has_extension(x) 0 // Compatibility with pre-3.0 compilers.
#endif
-#if (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) < PCL_LINEAR_VERSION(4,5,0) && ! defined(__clang__)) || defined(__INTEL_COMPILER)
-#define PCL_DEPRECATED(message) __attribute__ ((deprecated))
-#endif
-
+// check Intel compiler first since it usually also defines __GNUC__, __clang__, etc.
+#if defined(__INTEL_COMPILER)
+ #define PCL_DEPRECATED(message) __attribute((deprecated))
+#elif (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) < PCL_LINEAR_VERSION(4,5,0) && ! defined(__clang__)) || defined(__INTEL_COMPILER)
+ #define PCL_DEPRECATED(message) __attribute__ ((deprecated))
// gcc supports this starting from 4.5 : http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43666
-#if (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) >= PCL_LINEAR_VERSION(4,5,0)) || (defined(__clang__) && __has_extension(attribute_deprecated_with_message))
-#define PCL_DEPRECATED(message) __attribute__ ((deprecated(message)))
-#endif
-
-#ifdef _MSC_VER
-#define PCL_DEPRECATED(message) __declspec(deprecated(message))
-#endif
-
-#ifndef PCL_DEPRECATED
-#pragma message("WARNING: You need to implement PCL_DEPRECATED for this compiler")
-#define PCL_DEPRECATED(message)
+#elif (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) >= PCL_LINEAR_VERSION(4,5,0)) || (defined(__clang__) && __has_extension(attribute_deprecated_with_message))
+ #define PCL_DEPRECATED(message) __attribute__ ((deprecated(message)))
+#elif defined(_MSC_VER)
+ #define PCL_DEPRECATED(message) __declspec(deprecated(message))
+#else
+ #pragma message("WARNING: You need to implement PCL_DEPRECATED for this compiler")
+ #define PCL_DEPRECATED(message)
#endif
// NewClass() {}
// };
-#if (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) < PCL_LINEAR_VERSION(4,5,0) && ! defined(__clang__)) || defined(__INTEL_COMPILER)
-#define PCL_DEPRECATED_CLASS(func, message) __attribute__ ((deprecated)) func
-#endif
-
+// check Intel compiler first since it usually also defines __GNUC__, __clang__, etc.
+#if defined(__INTEL_COMPILER)
+ #define PCL_DEPRECATED_CLASS(func, message) __attribute((deprecated)) func
+#elif (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) < PCL_LINEAR_VERSION(4,5,0) && ! defined(__clang__)) || defined(__INTEL_COMPILER)
+ #define PCL_DEPRECATED_CLASS(func, message) __attribute__ ((deprecated)) func
// gcc supports this starting from 4.5 : http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43666
-#if (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) >= PCL_LINEAR_VERSION(4,5,0)) || (defined(__clang__) && __has_extension(attribute_deprecated_with_message))
-#define PCL_DEPRECATED_CLASS(func, message) __attribute__ ((deprecated(message))) func
-#endif
-
-#ifdef _MSC_VER
-#define PCL_DEPRECATED_CLASS(func, message) __declspec(deprecated(message)) func
-#endif
-
-#ifndef PCL_DEPRECATED_CLASS
-#pragma message("WARNING: You need to implement PCL_DEPRECATED_CLASS for this compiler")
-#define PCL_DEPRECATED_CLASS(func) func
+#elif (defined(__GNUC__) && PCL_LINEAR_VERSION(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) >= PCL_LINEAR_VERSION(4,5,0)) || (defined(__clang__) && __has_extension(attribute_deprecated_with_message))
+ #define PCL_DEPRECATED_CLASS(func, message) __attribute__ ((deprecated(message))) func
+#elif defined(_MSC_VER)
+ #define PCL_DEPRECATED_CLASS(func, message) __declspec(deprecated(message)) func
+#else
+ #pragma message("WARNING: You need to implement PCL_DEPRECATED_CLASS for this compiler")
+ #define PCL_DEPRECATED_CLASS(func) func
#endif
#if defined (__GNUC__) || defined (__PGI) || defined (__IBMCPP__) || defined (__SUNPRO_CC)
#endif
#if defined (HAVE_MM_MALLOC)
- #include <mm_malloc.h>
+ // Intel compiler defines an incompatible _mm_malloc signature
+ #if defined(__INTEL_COMPILER)
+ #include <malloc.h>
+ #else
+ #include <mm_malloc.h>
+ #endif
#endif
inline void*
<< "Which is: " << p1.getRGBAVector4i ().transpose ();
}
+ template <typename PointCloud1T, typename PointCloud2T>
+ ::testing::AssertionResult MetaDataEQ (const char* expr1,
+ const char* expr2,
+ const PointCloud1T& p1,
+ const PointCloud2T& p2)
+ {
+ if (!(p1.header == p2.header))
+ return ::testing::AssertionFailure () << "Headers are different";
+ if (p1.width != p2.width)
+ return ::testing::AssertionFailure ()
+ << "Value of: " << expr2 << ".width" << std::endl
+ << " Actual: " << p2.width << std::endl
+ << "Expected: " << expr1 << ".width" << std::endl
+ << "Which is: " << p1.width << std::endl;
+ if (p1.height != p2.height)
+ return ::testing::AssertionFailure ()
+ << "Value of: " << expr2 << ".height" << std::endl
+ << " Actual: " << p2.height << std::endl
+ << "Expected: " << expr1 << ".height" << std::endl
+ << "Which is: " << p1.height << std::endl;
+ if (p1.is_dense != p2.is_dense)
+ return ::testing::AssertionFailure ()
+ << "Value of: " << expr2 << ".is_dense" << std::endl
+ << " Actual: " << p2.is_dense << std::endl
+ << "Expected: " << expr1 << ".is_dense" << std::endl
+ << "Which is: " << p1.is_dense << std::endl;
+ if (p1.sensor_origin_ != p2.sensor_origin_)
+ return ::testing::AssertionFailure () << "Sensor origins are different";
+ if (p1.sensor_orientation_.coeffs () != p2.sensor_orientation_.coeffs ())
+ return ::testing::AssertionFailure () << "Sensor orientations are different";
+ return ::testing::AssertionSuccess ();
+ }
+
}
}
/// Assert that differences between x, y, and z fields in
/// two points are each within abs_error.
#define ASSERT_XYZ_NEAR(expected, actual, abs_error) \
- EXPECT_PRED_FORMAT3(::pcl::test::internal::XYZNear, \
+ ASSERT_PRED_FORMAT3(::pcl::test::internal::XYZNear, \
(expected), (actual), abs_error)
/// Expect that each of normal_x, normal_y, and normal_z
/// and normal_z fields in two points are each within
/// abs_error.
#define ASSERT_NORMAL_NEAR(expected, actual, abs_error) \
- EXPECT_PRED_FORMAT3(::pcl::test::internal::NormalNear, \
+ ASSERT_PRED_FORMAT3(::pcl::test::internal::NormalNear, \
(expected), (actual), abs_error)
/// Expect that each of r, g, and b fields are equal in
ASSERT_PRED_FORMAT2(::pcl::test::internal::RGBAEQ, \
(expected), (actual))
+/// Assert that the metadata (header, width, height,
+/// is_dense, sensor origin and orientation) are equal
+/// in two point clouds.
+#define ASSERT_METADATA_EQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::pcl::test::internal::MetaDataEQ, \
+ expected, actual)
+
+/// Expect that the metadata (header, width, height,
+/// is_dense, sensor origin and orientation) are equal
+/// in two point clouds.
+#define EXPECT_METADATA_EQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::pcl::test::internal::MetaDataEQ, \
+ expected, actual)
+
#endif
#define PCL_POINT_CLOUD_H_
#ifdef __GNUC__
-#pragma GCC system_header
+#pragma GCC system_header
#endif
#include <Eigen/StdVector>
// Forward declarations
template <typename PointT> class PointCloud;
typedef std::vector<detail::FieldMapping> MsgFieldMap;
-
+
/** \brief Helper functor structure for copying data between an Eigen type and a PointT. */
template <typename PointOutT>
struct NdCopyEigenPointFunctor
{
typedef typename traits::POD<PointOutT>::type Pod;
-
+
/** \brief Constructor
* \param[in] p1 the input Eigen type
* \param[out] p2 the output Point type
f_idx_ (0) { }
/** \brief Operator. Data copy happens here. */
- template<typename Key> inline void
+ template<typename Key> inline void
operator() ()
{
//boost::fusion::at_key<Key> (p2_) = p1_[f_idx_++];
struct NdCopyPointEigenFunctor
{
typedef typename traits::POD<PointInT>::type Pod;
-
+
/** \brief Constructor
* \param[in] p1 the input Point type
* \param[out] p2 the output Eigen type
: p1_ (reinterpret_cast<const Pod&>(p1)), p2_ (p2), f_idx_ (0) { }
/** \brief Operator. Data copy happens here. */
- template<typename Key> inline void
+ template<typename Key> inline void
operator() ()
{
//p2_[f_idx_++] = boost::fusion::at_key<Key> (p1_);
*
* \code
* pcl::PointCloud<pcl::PointXYZ> cloud;
- * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
- * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
- * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
- * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
+ * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
+ * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
+ * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
+ * cloud.push_back (pcl::PointXYZ (rand (), rand (), rand ()));
* \endcode
*
* The PointCloud class contains the following elements:
* and \ref height to 0, and the \ref sensor_origin_ and \ref
* sensor_orientation_ to identity.
*/
- PointCloud () :
+ PointCloud () :
header (), points (), width (0), height (0), is_dense (true),
sensor_origin_ (Eigen::Vector4f::Zero ()), sensor_orientation_ (Eigen::Quaternionf::Identity ()),
mapping_ ()
/** \brief Copy constructor (needed by compilers such as Intel C++)
* \param[in] pc the cloud to copy into this
*/
- PointCloud (PointCloud<PointT> &pc) :
- header (), points (), width (0), height (0), is_dense (true),
+ PointCloud (PointCloud<PointT> &pc) :
+ header (), points (), width (0), height (0), is_dense (true),
sensor_origin_ (Eigen::Vector4f::Zero ()), sensor_orientation_ (Eigen::Quaternionf::Identity ()),
mapping_ ()
{
/** \brief Copy constructor (needed by compilers such as Intel C++)
* \param[in] pc the cloud to copy into this
*/
- PointCloud (const PointCloud<PointT> &pc) :
- header (), points (), width (0), height (0), is_dense (true),
+ PointCloud (const PointCloud<PointT> &pc) :
+ header (), points (), width (0), height (0), is_dense (true),
sensor_origin_ (Eigen::Vector4f::Zero ()), sensor_orientation_ (Eigen::Quaternionf::Identity ()),
mapping_ ()
{
* \param[in] pc the cloud to copy into this
* \param[in] indices the subset to copy
*/
- PointCloud (const PointCloud<PointT> &pc,
+ PointCloud (const PointCloud<PointT> &pc,
const std::vector<int> &indices) :
header (pc.header), points (indices.size ()), width (indices.size ()), height (1), is_dense (pc.is_dense),
sensor_origin_ (pc.sensor_origin_), sensor_orientation_ (pc.sensor_orientation_),
/** \brief Add a point cloud to the current cloud.
* \param[in] rhs the cloud to add to the current cloud
* \return the new cloud as a concatenation of the current cloud and the new given cloud
- */
+ */
inline PointCloud&
operator += (const PointCloud& rhs)
{
/** \brief Add a point cloud to another cloud.
* \param[in] rhs the cloud to add to the current cloud
* \return the new cloud as a concatenation of the current cloud and the new given cloud
- */
+ */
inline const PointCloud
operator + (const PointCloud& rhs)
{
return (PointCloud (*this) += rhs);
}
-
- /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
+
+ /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
* datasets (those that have height != 1).
* \param[in] column the column coordinate
* \param[in] row the row coordinate
if (this->height > 1)
return (points.at (row * this->width + column));
else
- throw IsNotDenseException ("Can't use 2D indexing with a unorganized point cloud");
+ throw UnorganizedPointCloudException ("Can't use 2D indexing with an unorganized point cloud");
}
- /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
+ /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
* datasets (those that have height != 1).
* \param[in] column the column coordinate
* \param[in] row the row coordinate
if (this->height > 1)
return (points.at (row * this->width + column));
else
- throw IsNotDenseException ("Can't use 2D indexing with a unorganized point cloud");
+ throw UnorganizedPointCloudException ("Can't use 2D indexing with an unorganized point cloud");
}
- /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
+ /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
* datasets (those that have height != 1).
* \param[in] column the column coordinate
* \param[in] row the row coordinate
return (points[row * this->width + column]);
}
- /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
+ /** \brief Obtain the point given by the (column, row) coordinates. Only works on organized
* datasets (those that have height != 1).
* \param[in] column the column coordinate
* \param[in] row the row coordinate
{
return (height > 1);
}
-
+
/** \brief Return an Eigen MatrixXf (assumes float values) mapped to the specified dimensions of the PointCloud.
* \anchor getMatrixXfMap
* \note This method is for advanced users only! Use with care!
- *
+ *
* \attention Since 1.4.0, Eigen matrices are forced to Row Major to increase the efficiency of the algorithms in PCL
- * This means that the behavior of getMatrixXfMap changed, and is now correctly mapping 1-1 with a PointCloud structure,
+ * This means that the behavior of getMatrixXfMap changed, and is now correctly mapping 1-1 with a PointCloud structure,
* that is: number of points in a cloud = rows in a matrix, number of point dimensions = columns in a matrix
*
* \param[in] dim the number of dimensions to consider for each point
* \param[in] offset the number of dimensions to skip from the beginning of each point
* (stride = offset + dim + x, where x is the number of dimensions to skip from the end of each point)
* \note for getting only XYZ coordinates out of PointXYZ use dim=3, stride=4 and offset=0 due to the alignment.
- * \attention PointT types are most of the time aligned, so the offsets are not continuous!
+ * \attention PointT types are most of the time aligned, so the offsets are not continuous!
*/
- inline Eigen::Map<Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >
+ inline Eigen::Map<Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >
getMatrixXfMap (int dim, int stride, int offset)
{
if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
/** \brief Return an Eigen MatrixXf (assumes float values) mapped to the specified dimensions of the PointCloud.
* \anchor getMatrixXfMap
* \note This method is for advanced users only! Use with care!
- *
+ *
* \attention Since 1.4.0, Eigen matrices are forced to Row Major to increase the efficiency of the algorithms in PCL
- * This means that the behavior of getMatrixXfMap changed, and is now correctly mapping 1-1 with a PointCloud structure,
+ * This means that the behavior of getMatrixXfMap changed, and is now correctly mapping 1-1 with a PointCloud structure,
* that is: number of points in a cloud = rows in a matrix, number of point dimensions = columns in a matrix
*
* \param[in] dim the number of dimensions to consider for each point
* \param[in] offset the number of dimensions to skip from the beginning of each point
* (stride = offset + dim + x, where x is the number of dimensions to skip from the end of each point)
* \note for getting only XYZ coordinates out of PointXYZ use dim=3, stride=4 and offset=0 due to the alignment.
- * \attention PointT types are most of the time aligned, so the offsets are not continuous!
+ * \attention PointT types are most of the time aligned, so the offsets are not continuous!
*/
inline const Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >
getMatrixXfMap (int dim, int stride, int offset) const
if (Eigen::MatrixXf::Flags & Eigen::RowMajorBit)
return (Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(const_cast<PointT*>(&points[0]))+offset, points.size (), dim, Eigen::OuterStride<> (stride)));
else
- return (Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(const_cast<PointT*>(&points[0]))+offset, dim, points.size (), Eigen::OuterStride<> (stride)));
+ return (Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >(reinterpret_cast<float*>(const_cast<PointT*>(&points[0]))+offset, dim, points.size (), Eigen::OuterStride<> (stride)));
}
/** \brief Return an Eigen MatrixXf (assumes float values) mapped to the PointCloud.
* \note This method is for advanced users only! Use with care!
- * \attention PointT types are most of the time aligned, so the offsets are not continuous!
+ * \attention PointT types are most of the time aligned, so the offsets are not continuous!
* See \ref getMatrixXfMap for more information.
*/
inline Eigen::Map<Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >
- getMatrixXfMap ()
+ getMatrixXfMap ()
{
return (getMatrixXfMap (sizeof (PointT) / sizeof (float), sizeof (PointT) / sizeof (float), 0));
}
/** \brief Return an Eigen MatrixXf (assumes float values) mapped to the PointCloud.
* \note This method is for advanced users only! Use with care!
- * \attention PointT types are most of the time aligned, so the offsets are not continuous!
+ * \attention PointT types are most of the time aligned, so the offsets are not continuous!
* See \ref getMatrixXfMap for more information.
*/
inline const Eigen::Map<const Eigen::MatrixXf, Eigen::Aligned, Eigen::OuterStride<> >
/** \brief The point cloud height (if organized as an image-structure). */
uint32_t height;
- /** \brief True if no points are invalid (e.g., have NaN or Inf values). */
+ /** \brief True if no points are invalid (e.g., have NaN or Inf values in any of their floating point fields). */
bool is_dense;
/** \brief Sensor acquisition pose (origin/translation). */
/** \brief Resize the cloud
* \param[in] n the new cloud size
*/
- inline void resize (size_t n)
- {
+ inline void resize (size_t n)
+ {
points.resize (n);
if (width * height != n)
{
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] pt the point to insert
*/
- inline void
+ inline void
push_back (const PointT& pt)
{
points.push_back (pt);
* \param[in] pt the point to insert
* \return returns the new position iterator
*/
- inline iterator
+ inline iterator
insert (iterator position, const PointT& pt)
{
iterator it = points.insert (position, pt);
* \param[in] n the number of times to insert the point
* \param[in] pt the point to insert
*/
- inline void
+ inline void
insert (iterator position, size_t n, const PointT& pt)
{
points.insert (position, n, pt);
* \param[in] first where to start inserting the points from
* \param[in] last where to stop inserting the points from
*/
- template <class InputIterator> inline void
+ template <class InputIterator> inline void
insert (iterator position, InputIterator first, InputIterator last)
{
points.insert (position, first, last);
height = 1;
}
- /** \brief Erase a point in the cloud.
+ /** \brief Erase a point in the cloud.
* \note This breaks the organized structure of the cloud by setting the height to 1!
* \param[in] position what data point to erase
* \return returns the new position iterator
*/
- inline iterator
+ inline iterator
erase (iterator position)
{
- iterator it = points.erase (position);
+ iterator it = points.erase (position);
width = static_cast<uint32_t> (points.size ());
height = 1;
return (it);
* \param[in] last where to stop erasing points from
* \return returns the new position iterator
*/
- inline iterator
+ inline iterator
erase (iterator first, iterator last)
{
iterator it = points.erase (first, last);
/** \brief Swap a point cloud with another cloud.
* \param[in,out] rhs point cloud to swap this with
- */
- inline void
+ */
+ inline void
swap (PointCloud<PointT> &rhs)
{
+ std::swap (header, rhs.header);
this->points.swap (rhs.points);
std::swap (width, rhs.width);
std::swap (height, rhs.height);
}
/** \brief Removes all points in a cloud and sets the width and height to 0. */
- inline void
+ inline void
clear ()
{
points.clear ();
* The changes of the returned cloud are not mirrored back to this one.
* \return shared pointer to the copy of the cloud
*/
- inline Ptr
+ inline Ptr
makeShared () const { return Ptr (new PointCloud<PointT> (*this)); }
protected:
template <typename PointT> std::ostream&
operator << (std::ostream& s, const pcl::PointCloud<PointT> &p)
{
+ s << "header: " << p.header << std::endl;
s << "points[]: " << p.points.size () << std::endl;
s << "width: " << p.width << std::endl;
s << "height: " << p.height << std::endl;
s << "is_dense: " << p.is_dense << std::endl;
- s << "sensor origin (xyz): [" <<
- p.sensor_origin_.x () << ", " <<
- p.sensor_origin_.y () << ", " <<
- p.sensor_origin_.z () << "] / orientation (xyzw): [" <<
- p.sensor_orientation_.x () << ", " <<
- p.sensor_orientation_.y () << ", " <<
- p.sensor_orientation_.z () << ", " <<
+ s << "sensor origin (xyz): [" <<
+ p.sensor_origin_.x () << ", " <<
+ p.sensor_origin_.y () << ", " <<
+ p.sensor_origin_.z () << "] / orientation (xyzw): [" <<
+ p.sensor_orientation_.x () << ", " <<
+ p.sensor_orientation_.y () << ", " <<
+ p.sensor_orientation_.z () << ", " <<
p.sensor_orientation_.w () << "]" <<
std::endl;
return (s);
/** \brief @b PointRepresentation provides a set of methods for converting a point structs/object into an
* n-dimensional vector.
* \note This is an abstract class. Subclasses must set nr_dimensions_ to the appropriate value in the constructor
- * and provide an implemention of the pure virtual copyToFloatArray method.
+ * and provide an implementation of the pure virtual copyToFloatArray method.
* \author Michael Dixon
*/
template <typename PointT>
/** \brief Empty destructor */
virtual ~PointRepresentation () {}
- /** \brief Copy point data from input point to a float array. This method must be overriden in all subclasses.
+ /** \brief Copy point data from input point to a float array. This method must be overridden in all subclasses.
* \param[in] p The input point
* \param[out] out A pointer to a float array.
*/
template <>
class DefaultPointRepresentation <VFHSignature308> : public DefaultFeatureRepresentation <VFHSignature308>
{};
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ template <>
+ class DefaultPointRepresentation <GASDSignature512> : public DefaultFeatureRepresentation <GASDSignature512>
+ {};
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ template <>
+ class DefaultPointRepresentation <GASDSignature984> : public DefaultFeatureRepresentation <GASDSignature984>
+ {};
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ template <>
+ class DefaultPointRepresentation <GASDSignature7992> : public DefaultFeatureRepresentation <GASDSignature7992>
+ {};
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
class DefaultPointRepresentation <Narf36> : public PointRepresentation <Narf36>
namespace pcl
{
+ namespace deprecated
+ {
+ /** \class DeprecatedType
+ * \brief A dummy type to aid in template parameter deprecation
+ */
+ struct T {};
+ }
namespace fields
{
/** \brief Get the value at a specified field in a point
* \param[in] pt the point to get the value from
* \param[in] field_offset the offset of the field
- * \param[out] value the value to retreive
+ * \param[out] value the value to retrieve
*/
template <typename PointT, typename ValT> inline void
getFieldValue (const PointT &pt, size_t field_offset, ValT &value)
*/
struct ESFSignature640;
+ /** \brief Members: float gasd[512]
+ * \ingroup common
+ */
+ struct GASDSignature512;
+
+ /** \brief Members: float gasd[984]
+ * \ingroup common
+ */
+ struct GASDSignature984;
+
+ /** \brief Members: float gasd[7992]
+ * \ingroup common
+ */
+ struct GASDSignature7992;
+
/** \brief Members: float histogram[16]
* \ingroup common
*/
(float[640], histogram, esf)
)
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::GASDSignature512,
+ (float[512], histogram, gasd)
+)
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::GASDSignature984,
+ (float[984], histogram, gasd)
+)
+
+POINT_CLOUD_REGISTER_POINT_STRUCT(pcl::GASDSignature7992,
+ (float[7992], histogram, gasd)
+)
+
POINT_CLOUD_REGISTER_POINT_STRUCT (pcl::Narf36,
(float[36], descriptor, descriptor)
)
#include <limits>
+#include <pcl/point_cloud.h>
+#include <pcl/point_types.h>
+
namespace pcl
{
- // r,g,b, i values are from 0 to 1
+ // r,g,b, i values are from 0 to 255
// h = [0,360]
// s, v values are from 0 to 1
- // if s = 0 > h = -1 (undefined)
+ // if s = 0 => h = 0
/** \brief Convert a XYZRGB point type to a XYZI
* \param[in] in the input XYZRGB point
PointRGBtoI (const RGB& in,
Intensity8u& out)
{
- out.intensity = static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() * 0.299f * static_cast <float> (in.r)
+ out.intensity = static_cast<uint8_t>(0.299f * static_cast <float> (in.r)
+ 0.587f * static_cast <float> (in.g) + 0.114f * static_cast <float> (in.b));
}
PointRGBtoI (const RGB& in,
Intensity32u& out)
{
- out.intensity = static_cast<uint32_t>(static_cast<float>(std::numeric_limits<uint32_t>::max()) * 0.299f * static_cast <float> (in.r)
+ out.intensity = static_cast<uint32_t>(0.299f * static_cast <float> (in.r)
+ 0.587f * static_cast <float> (in.g) + 0.114f * static_cast <float> (in.b));
}
if (out.h < 0.f) out.h += 360.f;
}
- /** \brief Convert a XYZRGB point type to a XYZHSV
- * \param[in] in the input XYZRGB point
+ /** \brief Convert a XYZRGBA point type to a XYZHSV
+ * \param[in] in the input XYZRGBA point
* \param[out] out the output XYZHSV point
* \todo include the A parameter but how?
*/
if (max == 0) // division by zero
{
out.s = 0.f;
- out.h = 0.f; // h = -1.f;
+ out.h = 0.f;
return;
}
}
}
- /** \brief Convert a RGB point cloud to a Intensity
+ /** \brief Convert a RGB point cloud to an Intensity
* \param[in] in the input RGB point cloud
* \param[out] out the output Intensity point cloud
*/
}
}
- /** \brief Convert a RGB point cloud to a Intensity
+ /** \brief Convert a RGB point cloud to an Intensity
* \param[in] in the input RGB point cloud
* \param[out] out the output Intensity point cloud
*/
}
}
- /** \brief Convert a RGB point cloud to a Intensity
+ /** \brief Convert a RGB point cloud to an Intensity
* \param[in] in the input RGB point cloud
* \param[out] out the output Intensity point cloud
*/
for (size_t u = 0; u < width_; u++)
{
PointXYZRGBA pt;
- pt.a = 0;
float depth_ = depth.at (u, v).intensity;
if (depth_ == 0)
getTransformationToWorldSystem () const { return to_world_system_;}
/** Getter for the angular resolution of the range image in x direction in radians per pixel.
- * Provided for downwards compatability */
+ * Provided for downwards compatibility */
inline float
getAngularResolution () const { return angular_resolution_x_;}
getViewingDirection (const Eigen::Vector3f& point, Eigen::Vector3f& viewing_direction) const;
/** Return a newly created Range image.
- * Can be reimplmented in derived classes like RangeImagePlanar to return an image of the same type. */
+ * Can be reimplemented in derived classes like RangeImagePlanar to return an image of the same type. */
PCL_EXPORTS virtual RangeImage*
getNew () const { return new RangeImage; }
Eigen::Affine3f to_world_system_; /**< Inverse of to_range_image_system_ */
float angular_resolution_x_; /**< Angular resolution of the range image in x direction in radians per pixel */
float angular_resolution_y_; /**< Angular resolution of the range image in y direction in radians per pixel */
- float angular_resolution_x_reciprocal_; /**< 1.0/angular_resolution_x_ - provided for better performace of
+ float angular_resolution_x_reciprocal_; /**< 1.0/angular_resolution_x_ - provided for better performance of
* multiplication compared to division */
- float angular_resolution_y_reciprocal_; /**< 1.0/angular_resolution_y_ - provided for better performace of
+ float angular_resolution_y_reciprocal_; /**< 1.0/angular_resolution_y_ - provided for better performance of
* multiplication compared to division */
int image_offset_x_, image_offset_y_; /**< Position of the top left corner of the range image compared to
* an image of full size (360x180 degrees) */
PCL_EXPORTS virtual ~RangeImagePlanar ();
/** Return a newly created RangeImagePlanar.
- * Reimplmentation to return an image of the same type. */
+ * Reimplementation to return an image of the same type. */
virtual RangeImage*
getNew () const { return new RangeImagePlanar; }
- /** Copy *this to other. Derived version - also copying additonal RangeImagePlanar members */
+ /** Copy *this to other. Derived version - also copying additional RangeImagePlanar members */
PCL_EXPORTS virtual void
copyTo (RangeImage& other) const;
PCL_EXPORTS virtual ~RangeImageSpherical () {}
/** Return a newly created RangeImagePlanar.
- * Reimplmentation to return an image of the same type. */
+ * Reimplementation to return an image of the same type. */
virtual RangeImage*
getNew () const { return new RangeImageSpherical; }
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2012, Willow Garage, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#ifndef PCL_ROS_CONVERSIONS_H_
-#define PCL_ROS_CONVERSIONS_H_
-
-#ifdef __DEPRECATED
-#warning The <pcl/ros/conversions.h> header is deprecated. please use \
-<pcl/conversions.h> instead.
-#endif
-
-#include <pcl/conversions.h>
-
-namespace pcl
-{
- /** \brief Convert a PCLPointCloud2 binary data blob into a pcl::PointCloud<T> object using a field_map.
- * \param[in] msg the PCLPointCloud2 binary blob
- * \param[out] cloud the resultant pcl::PointCloud<T>
- * \param[in] field_map a MsgFieldMap object
- *
- * \note Use fromROSMsg (PCLPointCloud2, PointCloud<T>) directly or create you
- * own MsgFieldMap using:
- *
- * \code
- * MsgFieldMap field_map;
- * createMapping<PointT> (msg.fields, field_map);
- * \endcode
- */
- template <typename PointT>
- PCL_DEPRECATED ("pcl::fromROSMsg is deprecated, please use fromPCLPointCloud2 instead.")
- void
- fromROSMsg (const pcl::PCLPointCloud2& msg, pcl::PointCloud<PointT>& cloud,
- const MsgFieldMap& field_map)
- {
- fromPCLPointCloud2 (msg, cloud, field_map);
- }
-
- /** \brief Convert a PCLPointCloud2 binary data blob into a pcl::PointCloud<T> object.
- * \param[in] msg the PCLPointCloud2 binary blob
- * \param[out] cloud the resultant pcl::PointCloud<T>
- */
- template<typename PointT>
- PCL_DEPRECATED ("pcl::fromROSMsg is deprecated, please use fromPCLPointCloud2 instead.")
- void
- fromROSMsg (const pcl::PCLPointCloud2& msg, pcl::PointCloud<PointT>& cloud)
- {
- fromPCLPointCloud2 (msg, cloud);
- }
-
- /** \brief Convert a pcl::PointCloud<T> object to a PCLPointCloud2 binary data blob.
- * \param[in] cloud the input pcl::PointCloud<T>
- * \param[out] msg the resultant PCLPointCloud2 binary blob
- */
- template<typename PointT>
- PCL_DEPRECATED ("pcl::toROSMsg is deprecated, please use toPCLPointCloud2 instead.")
- void
- toROSMsg (const pcl::PointCloud<PointT>& cloud, pcl::PCLPointCloud2& msg)
- {
- toPCLPointCloud2 (cloud, msg);
- }
-
- /** \brief Copy the RGB fields of a PointCloud into pcl::PCLImage format
- * \param[in] cloud the point cloud message
- * \param[out] msg the resultant pcl::PCLImage
- * CloudT cloud type, CloudT should be akin to pcl::PointCloud<pcl::PointXYZRGBA>
- * \note will throw std::runtime_error if there is a problem
- */
- template<typename CloudT>
- PCL_DEPRECATED ("pcl::toROSMsg is deprecated, please use toPCLPointCloud2 instead.")
- void
- toROSMsg (const CloudT& cloud, pcl::PCLImage& msg)
- {
- toPCLPointCloud2 (cloud, msg);
- }
-
- /** \brief Copy the RGB fields of a PCLPointCloud2 msg into pcl::PCLImage format
- * \param cloud the point cloud message
- * \param msg the resultant pcl::PCLImage
- * will throw std::runtime_error if there is a problem
- */
- inline void
- PCL_DEPRECATED ("pcl::toROSMsg is deprecated, please use toPCLPointCloud2 instead.")
- toROSMsg (const pcl::PCLPointCloud2& cloud, pcl::PCLImage& msg)
- {
- toPCLPointCloud2 (cloud, msg);
- }
-}
-
-#endif //#ifndef PCL_ROS_CONVERSIONS_H_
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2010-2012, Willow Garage, Inc.
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id$
- *
- */
-
-#ifndef PCL_ROS_REGISTER_POINT_STRUCT_H_
-#define PCL_ROS_REGISTER_POINT_STRUCT_H_
-
-#ifdef __DEPRECATED
-#warning The <pcl/ros/register_point_struct.h> header is deprecated. please use \
-<pcl/register_point_struct.h> instead.
-#endif
-
-#include <pcl/register_point_struct.h>
-
-
-#endif //#ifndef PCL_ROS_REGISTER_POINT_STRUCT_H_
#include <pcl/point_types.h>
#include <pcl/common/colors.h>
-/// Lookup table
+/// Glasbey lookup table
static const unsigned char GLASBEY_LUT[] =
{
77 , 175, 74 ,
117, 143, 207,
};
+/// Viridis lookup table
+static const unsigned char VIRIDIS_LUT[] =
+{
+ 68 , 1 , 84 ,
+ 68 , 2 , 85 ,
+ 69 , 3 , 87 ,
+ 69 , 5 , 88 ,
+ 69 , 6 , 90 ,
+ 70 , 8 , 91 ,
+ 70 , 9 , 93 ,
+ 70 , 11 , 94 ,
+ 70 , 12 , 96 ,
+ 71 , 14 , 97 ,
+ 71 , 15 , 98 ,
+ 71 , 17 , 100,
+ 71 , 18 , 101,
+ 71 , 20 , 102,
+ 72 , 21 , 104,
+ 72 , 22 , 105,
+ 72 , 24 , 106,
+ 72 , 25 , 108,
+ 72 , 26 , 109,
+ 72 , 28 , 110,
+ 72 , 29 , 111,
+ 72 , 30 , 112,
+ 72 , 32 , 113,
+ 72 , 33 , 115,
+ 72 , 34 , 116,
+ 72 , 36 , 117,
+ 72 , 37 , 118,
+ 72 , 38 , 119,
+ 72 , 39 , 120,
+ 71 , 41 , 121,
+ 71 , 42 , 121,
+ 71 , 43 , 122,
+ 71 , 44 , 123,
+ 71 , 46 , 124,
+ 70 , 47 , 125,
+ 70 , 48 , 126,
+ 70 , 49 , 126,
+ 70 , 51 , 127,
+ 69 , 52 , 128,
+ 69 , 53 , 129,
+ 69 , 54 , 129,
+ 68 , 56 , 130,
+ 68 , 57 , 131,
+ 68 , 58 , 131,
+ 67 , 59 , 132,
+ 67 , 60 , 132,
+ 67 , 62 , 133,
+ 66 , 63 , 133,
+ 66 , 64 , 134,
+ 65 , 65 , 134,
+ 65 , 66 , 135,
+ 65 , 67 , 135,
+ 64 , 69 , 136,
+ 64 , 70 , 136,
+ 63 , 71 , 136,
+ 63 , 72 , 137,
+ 62 , 73 , 137,
+ 62 , 74 , 137,
+ 61 , 75 , 138,
+ 61 , 77 , 138,
+ 60 , 78 , 138,
+ 60 , 79 , 138,
+ 59 , 80 , 139,
+ 59 , 81 , 139,
+ 58 , 82 , 139,
+ 58 , 83 , 139,
+ 57 , 84 , 140,
+ 57 , 85 , 140,
+ 56 , 86 , 140,
+ 56 , 87 , 140,
+ 55 , 88 , 140,
+ 55 , 89 , 140,
+ 54 , 91 , 141,
+ 54 , 92 , 141,
+ 53 , 93 , 141,
+ 53 , 94 , 141,
+ 52 , 95 , 141,
+ 52 , 96 , 141,
+ 51 , 97 , 141,
+ 51 , 98 , 141,
+ 51 , 99 , 141,
+ 50 , 100, 142,
+ 50 , 101, 142,
+ 49 , 102, 142,
+ 49 , 103, 142,
+ 48 , 104, 142,
+ 48 , 105, 142,
+ 47 , 106, 142,
+ 47 , 107, 142,
+ 47 , 108, 142,
+ 46 , 109, 142,
+ 46 , 110, 142,
+ 45 , 111, 142,
+ 45 , 112, 142,
+ 45 , 112, 142,
+ 44 , 113, 142,
+ 44 , 114, 142,
+ 43 , 115, 142,
+ 43 , 116, 142,
+ 43 , 117, 142,
+ 42 , 118, 142,
+ 42 , 119, 142,
+ 41 , 120, 142,
+ 41 , 121, 142,
+ 41 , 122, 142,
+ 40 , 123, 142,
+ 40 , 124, 142,
+ 40 , 125, 142,
+ 39 , 126, 142,
+ 39 , 127, 142,
+ 38 , 128, 142,
+ 38 , 129, 142,
+ 38 , 130, 142,
+ 37 , 131, 142,
+ 37 , 131, 142,
+ 37 , 132, 142,
+ 36 , 133, 142,
+ 36 , 134, 142,
+ 35 , 135, 142,
+ 35 , 136, 142,
+ 35 , 137, 142,
+ 34 , 138, 141,
+ 34 , 139, 141,
+ 34 , 140, 141,
+ 33 , 141, 141,
+ 33 , 142, 141,
+ 33 , 143, 141,
+ 32 , 144, 141,
+ 32 , 145, 140,
+ 32 , 146, 140,
+ 32 , 147, 140,
+ 31 , 147, 140,
+ 31 , 148, 140,
+ 31 , 149, 139,
+ 31 , 150, 139,
+ 31 , 151, 139,
+ 30 , 152, 139,
+ 30 , 153, 138,
+ 30 , 154, 138,
+ 30 , 155, 138,
+ 30 , 156, 137,
+ 30 , 157, 137,
+ 30 , 158, 137,
+ 30 , 159, 136,
+ 30 , 160, 136,
+ 31 , 161, 136,
+ 31 , 162, 135,
+ 31 , 163, 135,
+ 31 , 163, 134,
+ 32 , 164, 134,
+ 32 , 165, 134,
+ 33 , 166, 133,
+ 33 , 167, 133,
+ 34 , 168, 132,
+ 35 , 169, 131,
+ 35 , 170, 131,
+ 36 , 171, 130,
+ 37 , 172, 130,
+ 38 , 173, 129,
+ 39 , 174, 129,
+ 40 , 175, 128,
+ 41 , 175, 127,
+ 42 , 176, 127,
+ 43 , 177, 126,
+ 44 , 178, 125,
+ 46 , 179, 124,
+ 47 , 180, 124,
+ 48 , 181, 123,
+ 50 , 182, 122,
+ 51 , 183, 121,
+ 53 , 183, 121,
+ 54 , 184, 120,
+ 56 , 185, 119,
+ 57 , 186, 118,
+ 59 , 187, 117,
+ 61 , 188, 116,
+ 62 , 189, 115,
+ 64 , 190, 114,
+ 66 , 190, 113,
+ 68 , 191, 112,
+ 70 , 192, 111,
+ 72 , 193, 110,
+ 73 , 194, 109,
+ 75 , 194, 108,
+ 77 , 195, 107,
+ 79 , 196, 106,
+ 81 , 197, 105,
+ 83 , 198, 104,
+ 85 , 198, 102,
+ 88 , 199, 101,
+ 90 , 200, 100,
+ 92 , 201, 99 ,
+ 94 , 201, 98 ,
+ 96 , 202, 96 ,
+ 98 , 203, 95 ,
+ 101, 204, 94 ,
+ 103, 204, 92 ,
+ 105, 205, 91 ,
+ 108, 206, 90 ,
+ 110, 206, 88 ,
+ 112, 207, 87 ,
+ 115, 208, 85 ,
+ 117, 208, 84 ,
+ 119, 209, 82 ,
+ 122, 210, 81 ,
+ 124, 210, 79 ,
+ 127, 211, 78 ,
+ 129, 212, 76 ,
+ 132, 212, 75 ,
+ 134, 213, 73 ,
+ 137, 213, 72 ,
+ 139, 214, 70 ,
+ 142, 215, 68 ,
+ 144, 215, 67 ,
+ 147, 216, 65 ,
+ 149, 216, 63 ,
+ 152, 217, 62 ,
+ 155, 217, 60 ,
+ 157, 218, 58 ,
+ 160, 218, 57 ,
+ 163, 219, 55 ,
+ 165, 219, 53 ,
+ 168, 220, 51 ,
+ 171, 220, 50 ,
+ 173, 221, 48 ,
+ 176, 221, 46 ,
+ 179, 221, 45 ,
+ 181, 222, 43 ,
+ 184, 222, 41 ,
+ 187, 223, 39 ,
+ 189, 223, 38 ,
+ 192, 223, 36 ,
+ 195, 224, 35 ,
+ 197, 224, 33 ,
+ 200, 225, 32 ,
+ 203, 225, 30 ,
+ 205, 225, 29 ,
+ 208, 226, 28 ,
+ 211, 226, 27 ,
+ 213, 226, 26 ,
+ 216, 227, 25 ,
+ 219, 227, 24 ,
+ 221, 227, 24 ,
+ 224, 228, 24 ,
+ 226, 228, 24 ,
+ 229, 228, 24 ,
+ 232, 229, 25 ,
+ 234, 229, 25 ,
+ 237, 229, 26 ,
+ 239, 230, 27 ,
+ 242, 230, 28 ,
+ 244, 230, 30 ,
+ 247, 230, 31 ,
+ 249, 231, 33 ,
+ 251, 231, 35 ,
+ 254, 231, 36 ,
+};
+
/// Number of colors in Glasbey lookup table
-static const unsigned int GLASBEY_LUT_SIZE = sizeof (GLASBEY_LUT) / (sizeof (GLASBEY_LUT[0]) * 3);
+static const size_t GLASBEY_LUT_SIZE = sizeof (GLASBEY_LUT) / (sizeof (GLASBEY_LUT[0]) * 3);
-pcl::RGB
-pcl::GlasbeyLUT::at (unsigned int color_id)
+/// Number of colors in Viridis lookup table
+static const size_t VIRIDIS_LUT_SIZE = sizeof (VIRIDIS_LUT) / (sizeof (VIRIDIS_LUT[0]) * 3);
+
+static const unsigned char* LUTS[] = { GLASBEY_LUT, VIRIDIS_LUT };
+static const size_t LUT_SIZES[] = { GLASBEY_LUT_SIZE, VIRIDIS_LUT_SIZE };
+
+template<typename pcl::ColorLUTName T> pcl::RGB
+pcl::ColorLUT<T>::at (size_t color_id)
{
- assert (color_id < GLASBEY_LUT_SIZE);
+ assert (color_id < LUT_SIZES[T]);
pcl::RGB color;
- color.r = GLASBEY_LUT[color_id * 3 + 0];
- color.g = GLASBEY_LUT[color_id * 3 + 1];
- color.b = GLASBEY_LUT[color_id * 3 + 2];
+ color.r = LUTS[T][color_id * 3 + 0];
+ color.g = LUTS[T][color_id * 3 + 1];
+ color.b = LUTS[T][color_id * 3 + 2];
return (color);
}
-size_t
-pcl::GlasbeyLUT::size ()
+template<typename pcl::ColorLUTName T> size_t
+pcl::ColorLUT<T>::size ()
{
- return GLASBEY_LUT_SIZE;
+ return LUT_SIZES[T];
}
-const unsigned char*
-pcl::GlasbeyLUT::data ()
+template<typename pcl::ColorLUTName T> const unsigned char*
+pcl::ColorLUT<T>::data ()
{
- return GLASBEY_LUT;
+ return LUTS[T];
}
pcl::RGB
color.b = uint8_t (b * 255.0);
return (color);
}
+
+template class PCL_EXPORTS pcl::ColorLUT<pcl::LUT_GLASBEY>;
+template class PCL_EXPORTS pcl::ColorLUT<pcl::LUT_VIRIDIS>;
#include "pcl/common/fft/_kiss_fft_guts.h"
/* The guts header contains all the multiplication and addition macros that are defined for
- fixed or floating point complex numbers. It also delares the kf_ internal functions.
+ fixed or floating point complex numbers. It also declares the kf_ internal functions.
*/
static void kf_bfly2(
////////////////////////////////////////////////////////////////////////////////
bool
-pcl::console::find_switch (int argc, char** argv, const char* argument_name)
+pcl::console::find_switch (int argc, const char * const * argv, const char * argument_name)
{
return (find_argument (argc, argv, argument_name) != -1);
}
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::find_argument (int argc, char** argv, const char* argument_name)
+pcl::console::find_argument (int argc, const char * const * argv, const char * argument_name)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_argument (int argc, char** argv, const char* str, std::string &val)
+pcl::console::parse_argument (int argc, const char * const * argv, const char * str, std::string &val)
{
int index = find_argument (argc, argv, str) + 1;
if (index > 0 && index < argc )
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_argument (int argc, char** argv, const char* str, bool &val)
+pcl::console::parse_argument (int argc, const char * const * argv, const char * str, bool &val)
{
int index = find_argument (argc, argv, str) + 1;
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_argument (int argc, char** argv, const char* str, double &val)
+pcl::console::parse_argument (int argc, const char * const * argv, const char * str, double &val)
{
int index = find_argument (argc, argv, str) + 1;
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_argument (int argc, char** argv, const char* str, float &val)
+pcl::console::parse_argument (int argc, const char * const * argv, const char * str, float &val)
{
int index = find_argument (argc, argv, str) + 1;
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_argument (int argc, char** argv, const char* str, int &val)
+pcl::console::parse_argument (int argc, const char * const * argv, const char * str, int &val)
{
int index = find_argument (argc, argv, str) + 1;
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_argument (int argc, char** argv, const char* str, unsigned int &val)
+pcl::console::parse_argument (int argc, const char * const * argv, const char * str, unsigned int &val)
{
int index = find_argument (argc, argv, str) + 1;
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_argument (int argc, char** argv, const char* str, char &val)
+pcl::console::parse_argument (int argc, const char * const * argv, const char * str, char &val)
{
int index = find_argument (argc, argv, str) + 1;
////////////////////////////////////////////////////////////////////////////////
std::vector<int>
-pcl::console::parse_file_extension_argument (int argc, char** argv, const std::string &extension)
+pcl::console::parse_file_extension_argument (int argc, const char * const * argv,
+ const std::vector<std::string> &extension)
{
std::vector<int> indices;
for (int i = 1; i < argc; ++i)
{
std::string fname = std::string (argv[i]);
- std::string ext = extension;
+ for (size_t j = 0; j < extension.size (); ++j)
+ {
+ std::string ext = extension[j];
- // Needs to be at least 4: .ext
- if (fname.size () <= 4)
- continue;
+ // Needs to be at least 4: .ext
+ if (fname.size () <= 4)
+ continue;
- // For being case insensitive
- std::transform (fname.begin (), fname.end (), fname.begin (), tolower);
- std::transform (ext.begin (), ext.end (), ext.begin (), tolower);
+ // For being case insensitive
+ std::transform (fname.begin (), fname.end (), fname.begin (), tolower);
+ std::transform (ext.begin (), ext.end (), ext.begin (), tolower);
- // Check if found
- std::string::size_type it;
- if ((it = fname.rfind (ext)) != std::string::npos)
- {
- // Additional check: we want to be able to differentiate between .p and .png
- if ((ext.size () - (fname.size () - it)) == 0)
- indices.push_back (i);
+ // Check if found
+ std::string::size_type it;
+ if ((it = fname.rfind (ext)) != std::string::npos)
+ {
+ // Additional check: we want to be able to differentiate between .p and .png
+ if ((ext.size () - (fname.size () - it)) == 0)
+ {
+ indices.push_back (i);
+ break;
+ }
+ }
}
}
return (indices);
}
+////////////////////////////////////////////////////////////////////////////////
+std::vector<int>
+pcl::console::parse_file_extension_argument (int argc, const char * const * argv,
+ const std::string &ext)
+{
+ std::vector<std::string> extensions;
+ extensions.push_back (ext);
+ return parse_file_extension_argument (argc, argv, extensions);
+}
+
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_2x_arguments (int argc, char** argv, const char* str, float &f, float &s, bool debug)
+pcl::console::parse_2x_arguments (int argc, const char * const * argv, const char * str, float &f, float &s, bool debug)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_2x_arguments (int argc, char** argv, const char* str, double &f, double &s, bool debug)
+pcl::console::parse_2x_arguments (int argc, const char * const * argv, const char * str, double &f, double &s, bool debug)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_2x_arguments (int argc, char** argv, const char* str, int &f, int &s, bool debug)
+pcl::console::parse_2x_arguments (int argc, const char * const * argv, const char * str, int &f, int &s, bool debug)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_3x_arguments (int argc, char** argv, const char* str, float &f, float &s, float &t, bool debug)
+pcl::console::parse_3x_arguments (int argc, const char * const * argv, const char * str, float &f, float &s, float &t, bool debug)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_3x_arguments (int argc, char** argv, const char* str, double &f, double &s, double &t, bool debug)
+pcl::console::parse_3x_arguments (int argc, const char * const * argv, const char * str, double &f, double &s, double &t, bool debug)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_3x_arguments (int argc, char** argv, const char* str, int &f, int &s, int &t, bool debug)
+pcl::console::parse_3x_arguments (int argc, const char * const * argv, const char * str, int &f, int &s, int &t, bool debug)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_x_arguments (int argc, char** argv, const char* str, std::vector<double>& v)
+pcl::console::parse_x_arguments (int argc, const char * const * argv, const char * str, std::vector<double>& v)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_x_arguments (int argc, char** argv, const char* str, std::vector<float>& v)
+pcl::console::parse_x_arguments (int argc, const char * const * argv, const char * str, std::vector<float>& v)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
int
-pcl::console::parse_x_arguments (int argc, char** argv, const char* str, std::vector<int>& v)
+pcl::console::parse_x_arguments (int argc, const char * const * argv, const char * str, std::vector<int>& v)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
bool
-pcl::console::parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<int> &values)
+pcl::console::parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<int> &values)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
bool
-pcl::console::parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<double> &values)
+pcl::console::parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<double> &values)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
bool
-pcl::console::parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<float> &values)
+pcl::console::parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<float> &values)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
bool
-pcl::console::parse_multiple_arguments (int argc, char** argv, const char* str, std::vector<std::string> &values)
+pcl::console::parse_multiple_arguments (int argc, const char * const * argv, const char * str, std::vector<std::string> &values)
{
for (int i = 1; i < argc; ++i)
{
////////////////////////////////////////////////////////////////////////////////
bool
-pcl::console::parse_multiple_2x_arguments (int argc, char** argv, const char* str, std::vector<double> &values_f, std::vector<double> &values_s)
+pcl::console::parse_multiple_2x_arguments (int argc, const char * const * argv, const char * str, std::vector<double> &values_f, std::vector<double> &values_s)
{
double f, s;
for (int i = 1; i < argc; ++i)
////////////////////////////////////////////////////////////////////////////////
bool
-pcl::console::parse_multiple_3x_arguments (int argc, char** argv, const char* str,
+pcl::console::parse_multiple_3x_arguments (int argc, const char * const * argv, const char * str,
std::vector<double> &values_f,
std::vector<double> &values_s,
std::vector<double> &values_t)
return (os);
}
+ std::ostream&
+ operator << (std::ostream& os, const Intensity32u& p)
+ {
+ os << "( " << p.intensity << " )";
+ return (os);
+ }
+
std::ostream&
operator << (std::ostream& os, const PointXYZI& p)
{
return (os);
}
+ std::ostream&
+ operator << (std::ostream& os, const GASDSignature512& p)
+ {
+ for (int i = 0; i < 512; ++i)
+ os << (i == 0 ? "(" : "") << p.histogram[i] << (i < 511 ? ", " : ")");
+ return (os);
+ }
+
+ std::ostream&
+ operator << (std::ostream& os, const GASDSignature984& p)
+ {
+ for (int i = 0; i < 984; ++i)
+ os << (i == 0 ? "(" : "") << p.histogram[i] << (i < 983 ? ", " : ")");
+ return (os);
+ }
+
+ std::ostream&
+ operator << (std::ostream& os, const GASDSignature7992& p)
+ {
+ for (int i = 0; i < 7992; ++i)
+ os << (i == 0 ? "(" : "") << p.histogram[i] << (i < 7991 ? ", " : ")");
+ return (os);
+ }
+
std::ostream&
operator << (std::ostream& os, const GFPFHSignature16& p)
{
pcl::TransformationFromCorrespondences transformation_from_correspondeces;
// The following loop structure goes through the pairs in the order 12, 13, 23, 14, 24, 34, ...,
- // testing the best correspondences pairs first, without beeing stuck too long with one specific
+ // testing the best correspondences pairs first, without being stuck too long with one specific
// (possibly wrong) correspondence.
bool done = false;
for (int correspondence2_idx = 0; correspondence2_idx < max_correspondence_idx && !done; ++correspondence2_idx)
pcl::TransformationFromCorrespondences transformation_from_correspondeces;
// The following loop structure goes through the triples in the order 123, 124, 134, 234, 125, 135, 235, ...,
- // testing the best correspondences triples first, without beeing stuck too long with one specific
+ // testing the best correspondences triples first, without being stuck too long with one specific
// (possibly wrong) correspondence.
bool done = false;
for (int correspondence3_idx = 0; correspondence3_idx < max_correspondence_idx && !done; ++correspondence3_idx)
swap (roots.x, roots.y);
}
- if (roots.x <= 0.0f) // eigenval for symetric positive semi-definite matrix can not be negative! Set it to 0
+ if (roots.x <= 0.0f) // eigenval for symmetric positive semi-definite matrix can not be negative! Set it to 0
computeRoots2 (c2, c1, roots);
}
}
, sqr_radius_(sqr_radius)
, sqrt_desired_nr_neighbors_ (sqrt_desired_nr_neighbors)
{}
-
+
template <typename Tuple>
inline __host__ __device__
- float4 operator () (Tuple &t)
+ float4 operator () (const Tuple &t)
{
float3 query_pt = thrust::get<0>(t);
float4 normal = thrust::get<1>(t);
(centroid.z - query_pt.z) / sqrt(sqr_radius_) ,
0);
}
-
+
const PointXYZRGB *points_;
float focallength_;
OrganizedRadiusSearch<CloudConstPtr> search_;
template <typename Tuple>
inline __host__ __device__
- PointXYZRGB operator () (Tuple &t)
+ PointXYZRGB operator () (const Tuple &t)
{
PointXYZRGB &pt = thrust::get<0>(t);
char4 rgb = colors_[thrust::get<1>(t)];
// vector for nearest neighbor candidates
std::vector<nearestNeighborCandidate> nearestNeighbors;
- // iterator for radius seach lookup table
+ // iterator for radius search lookup table
typename std::vector<radiusSearchLoopkupEntry>::const_iterator radiusSearchLookup_Iterator;
radiusSearchLookup_Iterator = radiusSearchLookup_.begin ();
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
- if (!isfinite (thrust::get<0>(t).x))
+ if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
//TODO: make threshold adaptive, depending on z
- return (fabs (thrust::get<0>(t).x * coefficients.x +
- thrust::get<0>(t).y * coefficients.y +
- thrust::get<0>(t).z * coefficients.z + coefficients.w) < threshold);
+ return (fabs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
+ thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
+ thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
- if (!isfinite (thrust::get<0>(t).x))
+ if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
- return (fabs (thrust::get<0>(t).x * coefficients.x +
- thrust::get<0>(t).y * coefficients.y +
- thrust::get<0>(t).z * coefficients.z + coefficients.w) < threshold);
+ return (fabs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
+ thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
+ thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
vector<SegmLink> edges;
edges.reserve(g.numv);
- // Prepare edges connecting differnet components
+ // Prepare edges connecting different components
for (int v = 0; v < g.numv; ++v)
{
int c1 = comps.find(v);
}
}
- // Sort all graph's edges connecting differnet components (in asceding order)
+ // Sort all graph's edges connecting different components (in ascending order)
sort(edges.begin(), edges.end());
// Exclude small components (starting from the nearest couple)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.pngmath', 'sphinxcontrib.doxylink.doxylink']
+extensions = ['sphinx.ext.imgmath', 'sphinxcontrib.doxylink.doxylink']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
html_show_copyright = False
html_show_sphinx = False
html_add_permalinks = None
-needs_sphinx = 1.0
+needs_sphinx = '1.0'
file_insertion_enabled = True
raw_enabled = True
# Set up doxylink
An in-depth discussion about the PCL 2.x API can be found here.
-Commiting changes to the git master
+Committing changes to the git master
-----------------------------------
In order to oversee the commit messages more easier and that the changelist looks homogenous please keep the following format:
* **bool** :pcl:`is_dense <pcl::PointCloud::is_dense>` - true if the data contains only valid numbers (e.g., no NaN or -/+Inf, etc). False otherwise.
* **Eigen::Vector4f** :pcl:`sensor_origin_ <pcl::PointCloud::sensor_origin_>` - the origin (pose) of the acquisition sensor in the current data coordinate system.
- * **Eigen::Quaternionf** :pcl:`sensor_orientation_ <pcl::PointCloud::sensor_orientation_>` - the origin (orientation) of hte acquisition sensor in the current data coordinate system.
+ * **Eigen::Quaternionf** :pcl:`sensor_orientation_ <pcl::PointCloud::sensor_orientation_>` - the origin (orientation) of the acquisition sensor in the current data coordinate system.
Proposals for the 2.x API:
#. Eigen::Vector4f or Eigen::Vector3f ??
- #. Large points cause significant perfomance penalty for GPU. Let's assume that point sizes up to 16 bytes are suitable. This is some compromise between SOA and AOS. Structures like pcl::Normal (size = 32) is not desirable. SOA is better in this case.
+ #. Large points cause significant performance penalty for GPU. Let's assume that point sizes up to 16 bytes are suitable. This is some compromise between SOA and AOS. Structures like pcl::Normal (size = 32) is not desirable. SOA is better in this case.
1.3 GPU support
if(WITH_DOCS)
- find_package(Doxygen)
+ find_package(Doxygen REQUIRED)
if(DOXYGEN_FOUND)
find_package(HTMLHelp)
endif(DOXYGEN_FOUND)
EXAMPLE_PATH =
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = NO
-IMAGE_PATH =
+IMAGE_PATH = "@PCL_SOURCE_DIR@/doc/doxygen"
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
MATHJAX_EXTENSIONS =
SEARCHENGINE = @DOCUMENTATION_SEARCHENGINE@
-SERVER_BASED_SEARCH = YES
+SERVER_BASED_SEARCH = NO
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
* `PointXYZRGBA` - Members: float x, y, z; uint32_t rgba;
- Similar to `PointXYZI`, except `rgba` containts the RGBA information packed
+ Similar to `PointXYZI`, except `rgba` contains the RGBA information packed
into a single integer.
.. code-block:: cpp
* `InterestPoint` - float x, y, z, strength;
- Similar to `PointXYZI`, except `strength` containts a measure of the strength
+ Similar to `PointXYZI`, except `strength` contains a measure of the strength
of the keypoint.
.. code-block:: cpp
* `PointWithRange` - float x, y, z (union with float point[4]), range;
- Similar to `PointXYZI`, except `range` containts a measure of the distance
- from the acqusition viewpoint to the point in the world.
+ Similar to `PointXYZI`, except `range` contains a measure of the distance
+ from the acquisition viewpoint to the point in the world.
.. code-block:: cpp
* `PointWithViewpoint` - float x, y, z, vp_x, vp_y, vp_z;
- Similar to `PointXYZI`, except `vp_x`, `vp_y`, and `vp_z` containt the
+ Similar to `PointXYZI`, except `vp_x`, `vp_y`, and `vp_z` contain the
acquisition viewpoint as a 3D point.
.. code-block:: cpp
* `PointWithScale` - float x, y, z, scale;
- Similar to `PointXYZI`, except `scale` containts the scale at which a certain
+ Similar to `PointXYZI`, except `scale` contains the scale at which a certain
point was considered for a geometric operation (e.g. the radius of the sphere
for its nearest neighbors computation, the window size, etc).
that resemble an organized image (or matrix) like structure, where the
data is split into rows and columns. Examples of such point clouds
include data coming from stereo cameras or Time Of Flight cameras. The
- advantages of a organized dataset is that by knowing the relationship
+ advantages of an organized dataset is that by knowing the relationship
between adjacent points (e.g. pixels), nearest neighbor operations are
much more efficient, thus speeding up the computation and lowering the
costs of certain algorithms in PCL.
Example::
- cloud.width = 640; // Image-like organized structure, with 640 rows and 480 columns,
+ cloud.width = 640; // Image-like organized structure, with 480 rows and 640 columns,
cloud.height = 480; // thus 640*480=307200 points total in the dataset
Example::
---------------
This document introduces benchmarking concepts for 3D algorithms. By
-*benchmarking* here we refer to the posibility of testing different
+*benchmarking* here we refer to the possibility of testing different
computational pipelines in an **easy manner**. The goal is to test their
reproductibility with respect to a particular problem of general interest.
* a combination of the above.
-In addtion, feature descriptors can be:
+In addition, feature descriptors can be:
* **local** - estimated only at a set of discrete keypoints, using the information from neighboring pixels/points;
* **global**, or meta-local - estimated on entire objects or the entire input dataset.
This document introduces benchmarking concepts for filtering algorithms. By
*benchmarking* here we refer to the possibility of testing different
-parameters for each filter algorithm on a specific point cloud in an **easy manner**. The goal is to find the best paramaters of a certain filter that best describe the original point cloud without removing useful data.
+parameters for each filter algorithm on a specific point cloud in an **easy manner**. The goal is to find the best parameters of a certain filter that best describe the original point cloud without removing useful data.
Benchmarking Filter Algorithms
-------------------------------
You can also change the build type:
-* **Debug**: means that no optimization is done and all the debugging symbols are imbedded into the libraries file. This is plateform and compiler dependent. On Linux with gcc this is equivalent to running gcc with `-O0 -g -ggdb -Wall`
+* **Debug**: means that no optimization is done and all the debugging symbols are embedded into the libraries file. This is platform and compiler dependent. On Linux with gcc this is equivalent to running gcc with `-O0 -g -ggdb -Wall`
-* **Release**: the compiled code is optimized and no debug information will be print out. This will lead to `-O3` for gcc and `-O5` for clang
+* **Release**: the compiled code is optimized and no debug information will be printed out. This will lead to `-O3` for gcc and `-O5` for clang
-* **RelWithDebInfo**: the compiled code is optimized but debugging data is also imbedded in the libraries. This is a tradeoff between the two former ones.
+* **RelWithDebInfo**: the compiled code is optimized but debugging data is also embedded in the libraries. This is a tradeoff between the two former ones.
* **MinSizeRel**: this, normally, results in the smallest libraries you can build. This is interesting when building for Android or a restricted memory/space system.
Now we are done with all the basic stuff. To turn on advanced cache
options hit `t` while in ccmake.
Advanced options become especially useful when you have dependencies
-installed in unusal locations and thus cmake hangs with
+installed in unusual locations and thus cmake hangs with
`XXX_NOT_FOUND` this can even prevent you from building PCL although
you have all the dependencies installed. In this section we will
discuss each dependency entry so that you can configure/build or
variables and give it the value that best fits your needs.
UNIX users generally don't have to bother with debug vs release versions
-they are fully complient. You would just loose debug symbols if you use
+they are fully compliant. You would just loose debug symbols if you use
release libraries version instead of debug while you will end up with much
more verbose output and slower execution. This said, Windows MSVC users
and Apple iCode ones can build debug/release from the same project, thus
Where to build binaries: C:/PCL_dependencies/boost-cmake/build
Before clicking on "Configure", click on "Add Entry" button in the top right of CMake gui, in
- the popup window, fill the fiels as follows::
+ the popup window, fill the fields as follows::
Name : LIBPREFIX
Type : STRING
+-- optional python bindings disabled since PYTHON_FOUND is false.
+ tr1
- Now, click "Generate". A Visual Studio solution file will be genrated inside the build folder
+ Now, click "Generate". A Visual Studio solution file will be generated inside the build folder
(e.g. C:/PCL_dependencies/boost-cmake/build). Open the `Boost.sln` file, then right click on
`INSTALL` project and choose `Build`. The `INSTALL`project will trigger the build of all the projects
in the solution file, and then will install the build libraries along with the header files to the default
Where to build binaries: C:/PCL_dependencies/flann-1.7.1-src/build
Hit the "Configure" button. Proceed and be sure to choose the correct "Generator" on the next window.
- You can safley ignore any warning message about hdf5.
+ You can safely ignore any warning message about hdf5.
Now, on my machine I had to manually set the `BUILD_PYTHON_BINDINGS`
and `BUILD_MATLAB_BINDINGS` to OFF otherwise it would not continue to the next
Where to build binaries: C:/PCL_dependencies/qhull-2011.1/build
Before clicking on "Configure", click on "Add Entry" button in the top right of CMake gui, in
- the popup window, fill the fiels as follows::
+ the popup window, fill the fields as follows::
Name : CMAKE_DEBUG_POSTFIX
Type : STRING
- **GTest** :
- In case you want PCL tests (not recommanded for users), you need to compile the `googletest` library (GTest).
+ In case you want PCL tests (not recommended for users), you need to compile the `googletest` library (GTest).
Setup the CMake fields as usual::
Where is my source code: C:/PCL_dependencies/gtest-1.6.0
make -j2 install
-Or alternatively, if you did not change the variable which declares where PCL should be installed, do::
+Or alternatively, if you did not change the variable which declares where PCL should be installed, do::
sudo make -j2 install
"**Visual Studio 10**" generator. If you want to build 64bit PCL, then pick the "**Visual Studio 10 Win64**".
Make sure you have installed the right third party dependencies. You cannot mix 32bit and 64bit code, and it is
- highly recommanded to not mix codes compiled with different compilers.
+ highly recommended to not mix codes compiled with different compilers.
.. image:: images/windows/cmake_generator.png
:alt: Choosing a generator
:alt: Boost
:align: center
- Let's tell CMake where boost headers are by specifiying the headers path in **Boost_INCLUDE_DIR** variable. For example, my boost
+ Let's tell CMake where boost headers are by specifying the headers path in **Boost_INCLUDE_DIR** variable. For example, my boost
headers are in C:\\Program Files\\PCL-Boost\\include (C:\\Program Files\\Boost\\include for newer installers).
Then, let's hit `configure` again ! Hopefully, CMake is now able to find all the other items (the libraries).
- **Eigen** :
- Eigen is a header-only library, thus, we need only **EIGEN_INCLUDE_DIR** to be set. Hopefully, CMake did fing Eigen.
+ Eigen is a header-only library, thus, we need only **EIGEN_INCLUDE_DIR** to be set. Hopefully, CMake did find Eigen.
.. image:: images/windows/cmake_eigen_include_dir.png
:alt: Eigen include dir
:alt: PCL
:align: center
-- **PCL_SHARED_LIBS** is checked by default. Uncheck it if you want static PCL libs (not recommanded).
+- **PCL_SHARED_LIBS** is checked by default. Uncheck it if you want static PCL libs (not recommended).
- **CMAKE_INSTALL_PREFIX** is where PCL will be installed after building it (more information on this later).
Installing PCL
--------------
-To install the built libraries and executbles, you need to build the "INSTALL" project in the solution explorer.
+To install the built libraries and executables, you need to build the "INSTALL" project in the solution explorer.
This utility project will copy PCL headers, libraries and executable to the directory defined by the **CMAKE_INSTALL_PREFIX**
CMake variable.
.. note::
- It is highly recommanded to add the bin folder in PCL installation tree (e.g. C:\\Program Files\\PCL\\bin)
+ It is highly recommended to add the bin folder in PCL installation tree (e.g. C:\\Program Files\\PCL\\bin)
to your **PATH** environment variable.
Advanced topics
- **LOW_RES_ONLINE_COMPRESSION_WITH_COLOR** 1 cubic centimeter resolution, color, fast online encoding
- - **MED_RES_ONLINE_COMPRESSION_WITHOUT_COLOR** 5 cubic milimeter resolution, no color, fast online encoding
+ - **MED_RES_ONLINE_COMPRESSION_WITHOUT_COLOR** 5 cubic millimeter resolution, no color, fast online encoding
- - **MED_RES_ONLINE_COMPRESSION_WITH_COLOR** 5 cubic milimeter resolution, color, fast online encoding
+ - **MED_RES_ONLINE_COMPRESSION_WITH_COLOR** 5 cubic millimeter resolution, color, fast online encoding
- - **HIGH_RES_ONLINE_COMPRESSION_WITHOUT_COLOR** 1 cubic milimeter resolution, no color, fast online encoding
+ - **HIGH_RES_ONLINE_COMPRESSION_WITHOUT_COLOR** 1 cubic millimeter resolution, no color, fast online encoding
- - **HIGH_RES_ONLINE_COMPRESSION_WITH_COLOR** 1 cubic milimeter resolution, color, fast online encoding
+ - **HIGH_RES_ONLINE_COMPRESSION_WITH_COLOR** 1 cubic millimeter resolution, color, fast online encoding
- **LOW_RES_OFFLINE_COMPRESSION_WITHOUT_COLOR** 1 cubic centimeter resolution, no color, efficient offline encoding
- **LOW_RES_OFFLINE_COMPRESSION_WITH_COLOR** 1 cubic centimeter resolution, color, efficient offline encoding
- - **MED_RES_OFFLINE_COMPRESSION_WITHOUT_COLOR** 5 cubic milimeter resolution, no color, efficient offline encoding
+ - **MED_RES_OFFLINE_COMPRESSION_WITHOUT_COLOR** 5 cubic millimeter resolution, no color, efficient offline encoding
- - **MED_RES_OFFLINE_COMPRESSION_WITH_COLOR** 5 cubic milimeter resolution, color, efficient offline encoding
+ - **MED_RES_OFFLINE_COMPRESSION_WITH_COLOR** 5 cubic millimeter resolution, color, efficient offline encoding
- - **HIGH_RES_OFFLINE_COMPRESSION_WITHOUT_COLOR** 1 cubic milimeter resolution, no color, efficient offline encoding
+ - **HIGH_RES_OFFLINE_COMPRESSION_WITHOUT_COLOR** 1 cubic millimeter resolution, no color, efficient offline encoding
- - **HIGH_RES_OFFLINE_COMPRESSION_WITH_COLOR** 1 cubic milimeter resolution, color, efficient offline encoding
+ - **HIGH_RES_OFFLINE_COMPRESSION_WITH_COLOR** 1 cubic millimeter resolution, color, efficient offline encoding
- **MANUAL_CONFIGURATION** enables manual configuration for advanced parametrization
decreased compression performance. This enables a trade-off between high frame/update rates and compression efficiency.
- **doVoxelGridDownDownSampling_arg**: If activated, only the hierarchical octree data structure is encoded. The decoder generated points at the voxel centers. In this
- way, the point cloud becomes downsampled during compression while archieving high compression performance.
+ way, the point cloud becomes downsampled during compression while achieving high compression performance.
- - **iFrameRate_arg**: The point cloud compression scheme differentially encodes point clouds. In this way, differences between the incoming point cloud and the previously encoded pointcloud is encoded in order to archive maximum compression performance. The iFrameRate_arg allows to specify the rate of frames in the stream at which incoming point clouds are **not** differentially encoded (similar to I/P-frames in video coding).
+ - **iFrameRate_arg**: The point cloud compression scheme differentially encodes point clouds. In this way, differences between the incoming point cloud and the previously encoded pointcloud is encoded in order to achieve maximum compression performance. The iFrameRate_arg allows to specify the rate of frames in the stream at which incoming point clouds are **not** differentially encoded (similar to I/P-frames in video coding).
- **doColorEncoding_arg**: This option enables color component encoding.
Concatenate the points of two Point Clouds
------------------------------------------
-In this tutorial we will learn how to concatenating the points of two different
+In this tutorial we will learn how to concatenate the points of two different
point clouds. The constraint imposed here is that the type and number of fields
-in the two datasets has to be equal. We will also learn how to concatenate the fields (e.g.,
+in the two datasets have to be equal. We will also learn how to concatenate the fields (e.g.,
dimensions) of two different point clouds. The constraint imposed here is that
the number of points in the two datasets has to be equal.
In this tutorial we will learn how to concatenating the points of two different
point clouds. The constraint imposed here is that the type and number of fields
-in the two datasets has to be equal.
+in the two datasets have to be equal.
The code
--------
The Code
--------
-First, download the dataset `Statues_4.pcd <https://raw.github.com/PointCloudLibrary/data/master/Trimble/Outdoor1/Statues_4.pcd>`_ and save it somewhere to disk.
+First, download the dataset `Statues_4.pcd <https://sourceforge.net/projects/pointclouds/files/PCD datasets/Trimble/Outdoor1/Statues_4.pcd.zip>`_ and extract the PCD file from the archive.
This is a very large data set of an outdoor environment where we aim to cluster the separate objects and also want to separate the building from the ground plane even though it is attached in a Euclidean sense.
Now create a file, let's say, ``conditional_euclidean_clustering.cpp`` in your favorite editor, and place the following inside it:
:language: cpp
:lines: 97-109
-When the output point cloud is opened with PCL's standard PCD viewer, pressing '5' will switch to the intenisty channel visualization.
+When the output point cloud is opened with PCL's standard PCD viewer, pressing '5' will switch to the intensity channel visualization.
The too-small clusters will be colored red, the too-large clusters will be colored blue, and the actual clusters/objects of interest will be colored randomly in between yellow and cyan hues.
Compiling and running the program
Removing outliers using a ConditionalRemoval filter
---------------------------------------------------
-This document demonstrates how to use the ConditionalRemoval filter to remove points from a PointCloud that do no satisfy a specific or multiple conditions.
+This document demonstrates how to use the ConditionalRemoval filter to remove points from a PointCloud that do not satisfy a specific or multiple conditions.
The code
--------
:language: cpp
:lines: 8-27
-Then, we create the condition which a given point must satisfy so that it remains in our PointCloud. To do this we must add two comparisons to the condition, greater than 0.0, and less than 0.8. This condition is then used to build the filter.
+Then, we create the condition which a given point must satisfy so that it remains in our PointCloud. To do this we must add two comparisons to the condition, greater than 0.0, and less than 0.8. This condition is then used to build the filter.
.. literalinclude:: sources/conditional_removal/conditional_removal.cpp
:language: cpp
:lines: 28-39
-This last bit of code just applies the filter to our original PointCloud, and removes all of the points that do not satisfy the conditions we specified. Then it outputs all of the points remaining in the PointCloud.
+This last bit of code just applies the filter to our original PointCloud, and removes all of the points that do not satisfy the conditions we specified. Then it outputs all of the points remaining in the PointCloud.
.. literalinclude:: sources/conditional_removal/conditional_removal.cpp
:language: cpp
You can run the tool with `--help` option to view the usage guide.
-The video below demontrates the features of the DepthSense viewer tool. Please
+The video below demonstrates the features of the DepthSense viewer tool. Please
note that the bilateral filtering (which can be observed in the end of the
video) is currently disabled is the tool.
The primary motivation behind DoN is the observation that surface normals estimated at any given radius reflect the underlying geometry of the surface at the scale of the support radius. Although there are many different methods of estimating the surface normals, normals are always estimated with a support radius (or via a fixed number of neighbours). This support radius determines the scale in the surface structure which the normal represents.
-The above diagram illustrates this effect in 1D. Normals, :math:`$\mathbf{\hat{n}}$`, and tangents, :math:`$T$`, estimated with a small support radius :math:`$r_s$` are affected by small-scale surface structure (and similarly by noise). On the other hand, normals and tangent planes estimated with a large support radius $r_l$ are less affected by small-scale structure, and represent the geometry of larger scale surface structures. In fact a similair set of features is seen in the DoN feature vectors for real-world street curbs in a LiDAR image shown below.
+The above diagram illustrates this effect in 1D. Normals, :math:`$\mathbf{\hat{n}}$`, and tangents, :math:`$T$`, estimated with a small support radius :math:`$r_s$` are affected by small-scale surface structure (and similarly by noise). On the other hand, normals and tangent planes estimated with a large support radius $r_l$ are less affected by small-scale structure, and represent the geometry of larger scale surface structures. In fact a similar set of features is seen in the DoN feature vectors for real-world street curbs in a LiDAR image shown below.
.. figure:: images/don_curb_closeup_small.jpg
:align: center
The Data Set
============
-For this tutorial we suggest the use of publically available (creative commons licensed) urban LiDAR data from the [KITTI]_ project. This data is collected from a Velodyne LiDAR scanner mounted on a car, for the purpose of evaluating self-driving cars. To convert the data set to PCL compatible point clouds please see [KITTIPCL]_. Examples and an example data set will be posted here in future as part of the tutorial.
+For this tutorial we suggest the use of publicly available (creative commons licensed) urban LiDAR data from the [KITTI]_ project. This data is collected from a Velodyne LiDAR scanner mounted on a car, for the purpose of evaluating self-driving cars. To convert the data set to PCL compatible point clouds please see [KITTIPCL]_. Examples and an example data set will be posted here in future as part of the tutorial.
-.. For this tutorial we will use publically available (creative commons licensed) urban LiDAR data from the [KITTI]_ project. This data is collected from a Velodyne LiDAR scanner mounted on a car, for the purpose of evaluating self-driving cars. To convert the data set to PCL compatible point clouds please see [KITTIPCL]_. An example scan is presented here from the KITTI data set in PCL form, is available for use with this example, `<https://raw.github.com/PointCloudLibrary/data/master/tutorials/don_segmentation_tutorial.pcd>`_.
+.. For this tutorial we will use publicly available (creative commons licensed) urban LiDAR data from the [KITTI]_ project. This data is collected from a Velodyne LiDAR scanner mounted on a car, for the purpose of evaluating self-driving cars. To convert the data set to PCL compatible point clouds please see [KITTIPCL]_. An example scan is presented here from the KITTI data set in PCL form, is available for use with this example, `<https://raw.github.com/PointCloudLibrary/data/master/tutorials/don_segmentation_tutorial.pcd>`_.
The Code
========
:language: cpp
:lines: 90-102
-Next we calculate the normals using our normal estimation class for both the large and small radius. It is important to use the ``NormalEstimation.setRadiusSearch()`` method v.s. the ``NormalEstimation.setMaximumNeighbours()`` method or equivilant. If the normal estimate is restricted to a set number of neighbours, it may not be based on the complete surface of the given radius, and thus is not suitable for the Difference of Normals features.
+Next we calculate the normals using our normal estimation class for both the large and small radius. It is important to use the ``NormalEstimation.setRadiusSearch()`` method v.s. the ``NormalEstimation.setMaximumNeighbours()`` method or equivalent. If the normal estimate is restricted to a set number of neighbours, it may not be based on the complete surface of the given radius, and thus is not suitable for the Difference of Normals features.
.. note::
For large supporting radii in dense point clouds, calculating the normal would be a very computationally intensive task potentially utilizing thousands of points in the calculation, when hundreds are more than enough for an accurate estimate. A simple method to speed up the calculation is to uniformly subsample the pointcloud when doing a large radius search, see the full example code in the PCL distribution at ``examples/features/example_difference_of_normals.cpp`` for more details.
If you want to perform extrinsic calibration of the sensor, please first make sure your EnsensoSDK version is greater than 1.3.
A fully automated extrinsic calibration ROS package is available to help you calibrate the sensor mounted on a robot arm,
-the package can be found in the `Institut Maupertuis repository <https://github.com/InstitutMaupertuis/ensenso_extrinsic_calibration>`_.
+the package can be found in the `Institut Maupertuis repository <https://gitlab.com/InstitutMaupertuis/ensenso_extrinsic_calibration>`_.
The following video shows the automatic calibration procedure on a Fanuc R1000iA 80f industrial robot:
The next block of code deals with the parametric segmentation. To keep the
-tutorial simple, its its explanation will be skipped for now. Please see the
+tutorial simple, its explanation will be skipped for now. Please see the
**segmentation** tutorials (in particular :ref:`planar_segmentation`) for more
information.
--- /dev/null
+.. _gasd_estimation:
+
+Globally Aligned Spatial Distribution (GASD) descriptors
+--------------------------------------------------------
+
+This document describes the Globally Aligned Spatial Distribution ([GASD]_) global descriptor to be used for efficient object recognition and pose estimation.
+
+GASD is based on the estimation of a reference frame for the whole point cloud that represents an object instance, which is used for aligning it with the canonical coordinate system. After that, a descriptor is computed for the aligned point cloud based on how its 3D points are spatially distributed. Such descriptor may also be extended with color distribution throughout the aligned point cloud. The global alignment transforms of matched point clouds are used for computing object pose. For more information please see [GASD]_.
+
+Theoretical primer
+------------------
+
+The Globally Aligned Spatial Distribution (or GASD) global description method takes as input a 3D point cloud that represents a partial view of a given object. The first step consists in estimating a reference frame for the point cloud, which allows the computation of a transform that aligns it to the canonical coordinate system, making the descriptor pose invariant. After alignment, a shape descriptor is computed for the point cloud based on the spatial distribution of the 3D points. Color distribution along the point cloud can also be taken into account for obtaining a shape and color descriptor with a higher discriminative power. Object recognition is then performed by matching query and train descriptors of partial views. The pose of each recognized object is also computed from the alignment transforms of matched query and train partial views.
+
+The reference frame is estimated using a Principal Component Analysis (PCA) approach. Given a set of 3D points :math:`\boldsymbol{P_i}` that represents a partial view of an object, with :math:`i\in\{1, ..., n\}`, the first step consists in computing their centroid :math:`\boldsymbol{\overline{P}}`, which is the origin of the reference frame. Then a covariance matrix :math:`\boldsymbol{C}` is computed from :math:`\boldsymbol{P_i}` and :math:`\boldsymbol{\overline{P}}` as follows:
+
+.. math::
+
+ \boldsymbol{C}=\frac{1}{n}\sum_{i=1}^{n}(\boldsymbol{P_i}-\boldsymbol{\overline{P}})(\boldsymbol{P_i}-\boldsymbol{\overline{P}})^T.
+
+After that, the eigenvalues :math:`\lambda_j` and corresponding eigenvectors :math:`\boldsymbol{v_j}` of :math:`\boldsymbol{C}` are obtained, with :math:`j\in\{1, 2, 3\}`, such that :math:`\boldsymbol{C}\boldsymbol{v_j}=\lambda_j\boldsymbol{v_j}`. Considering that the eigenvalues are arranged in ascending order, the eigenvector :math:`\boldsymbol{v_1}` associated with the minimal eigenvalue is used as the :math:`z` axis of the reference frame. If the angle between :math:`\boldsymbol{v_1}` and the viewing direction is in the :math:`[-90^{\circ}, 90^{\circ}]` range, then :math:`\boldsymbol{v_1}` is negated. This ensures that the :math:`z` axis always points towards the viewer. The :math:`x` axis of the reference frame is the eigenvector :math:`\boldsymbol{v_3}` associated with the maximal eigenvalue. The :math:`y` axis is given by :math:`\boldsymbol{v_2}=\boldsymbol{v_1}\times\boldsymbol{v_3}`.
+
+From the reference frame, it is possible to compute a transform :math:`[\boldsymbol{R} | \boldsymbol{t}]` that aligns it with the canonical coordinate system. All the points :math:`\boldsymbol{P_i}` of the partial view are then transformed with :math:`[\boldsymbol{R} | \boldsymbol{t}]`, which is defined as follows:
+
+.. math::
+
+ \begin{bmatrix}
+ \boldsymbol{R} & \boldsymbol{t} \\
+ \boldsymbol{0} & 1
+ \end{bmatrix}=
+ \begin{bmatrix}
+ \boldsymbol{v_3}^T & -\boldsymbol{v_3}^T\boldsymbol{\overline{P}} \\
+ \boldsymbol{v_2}^T & -\boldsymbol{v_2}^T\boldsymbol{\overline{P}} \\
+ \boldsymbol{v_1}^T & -\boldsymbol{v_1}^T\boldsymbol{\overline{P}} \\
+ \boldsymbol{0} & 1
+ \end{bmatrix}.
+
+Once the point cloud is aligned using the reference frame, a pose invariant global shape descriptor can be computed from it. The point cloud axis-aligned bounding cube centered on the origin is divided into an :math:`m_s \times m_s \times m_s` regular grid. For each grid cell, a histogram with :math:`l_s` bins is computed. If :math:`l_s=1`, then each histogram bin will store the number of points that belong to its correspondent cell in the 3D regular grid. If :math:`l_s>1`, then for each cell it will be computed a histogram of normalized distances between each sample and the cloud centroid.
+
+The contribution of each sample to the histogram is normalized with respect to the total number of points in the cloud. Optionally, interpolation may be used to distribute the value of each sample into adjacent cells, in an attempt to avoid boundary effects that may cause abrupt changes to the histogram when a sample shifts from being within one cell to another. The descriptor is then obtained by concatenating the computed histograms.
+
+.. image:: images/gasd_estimation/grid.png
+ :width: 24%
+.. image:: images/gasd_estimation/grid_top_side_bottom_view.png
+ :width: 72%
+
+Color information can also be incorporated to the descriptor in order to increase its discriminative power. The color component of the descriptor is computed with an :math:`m_c \times m_c \times m_c` grid similar to the one used for the shape component, but a color histogram is generated for each cell based on the colors of the points that belong to it. Point cloud color is represented in the HSV space and the hue values are accumulated in histograms with :math:`l_c` bins. Similarly to shape component computation, normalization with respect to number of points is performed. Additionally, interpolation of histograms samples may also be performed. The shape and color components are concatenated, resulting in the final descriptor.
+
+Query and train descriptors are matched using a nearest neighbor search approach. After that, for each matched object instance, a coarse pose is computed using the alignment transforms obtained from the reference frames of the respective query and train partial views. Given the transforms :math:`[\mathbf{R_{q}} | \mathbf{t_{q}}]` and :math:`[\mathbf{R_{t}} | \mathbf{t_{t}}]` that align the query and train partial views, respectively, the object coarse pose :math:`[\mathbf{R_{c}} | \mathbf{t_{c}}]` is obtained by
+
+.. math::
+
+ \begin{bmatrix}
+ \mathbf{R_{c}} & \mathbf{t_{c}} \\
+ \mathbf{0} & 1
+ \end{bmatrix}=
+ {\begin{bmatrix}
+ \mathbf{R_{q}} & \mathbf{t_{q}} \\
+ \mathbf{0} & 1
+ \end{bmatrix}}^{-1}
+ \begin{bmatrix}
+ \mathbf{R_{t}} & \mathbf{t_{t}} \\
+ \mathbf{0} & 1
+ \end{bmatrix}.
+
+The coarse pose :math:`[\mathbf{R_{c}} | \mathbf{t_{c}}]` can then be refined using the Iterative Closest Point (ICP) algorithm.
+
+Estimating GASD features
+------------------------
+
+The Globally Aligned Spatial Distribution is implemented in PCL as part of the
+`pcl_features <http://docs.pointclouds.org/trunk/group__features.html>`_
+library.
+
+The default values for color GASD parameters are: :math:`m_s=6` (half size of 3), :math:`l_s=1`, :math:`m_c=4` (half size of 2) and :math:`l_c=12` and no histogram interpolation (INTERP_NONE). This results in an array of 984 float values. These are stored in a **pcl::GASDSignature984** point type. The default values for shape only GASD parameters are: :math:`m_s=8` (half size of 4), :math:`l_s=1` and trilinear histogram interpolation (INTERP_TRILINEAR). This results in an array of 512 float values, which may be stored in a **pcl::GASDSignature512** point type. It is also possible to use quadrilinear histogram interpolation (INTERP_QUADRILINEAR).
+
+The following code snippet will estimate a GASD shape + color descriptor for an input colored point cloud.
+
+.. code-block:: cpp
+ :linenos:
+
+ #include <pcl/point_types.h>
+ #include <pcl/features/gasd.h>
+
+ {
+ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBA>);
+
+ ... read, pass in or create a point cloud ...
+
+ // Create the GASD estimation class, and pass the input dataset to it
+ pcl::GASDColorEstimation<pcl::PointXYZRGBA, pcl::GASDSignature984> gasd;
+ gasd.setInputCloud (cloud);
+
+ // Output datasets
+ pcl::PointCloud<pcl::GASDSignature984> descriptor;
+
+ // Compute the descriptor
+ gasd.compute (descriptor);
+
+ // Get the alignment transform
+ Eigen::Matrix4f trans = gasd.getTransform (trans);
+
+ // Unpack histogram bins
+ for (size_t i = 0; i < size_t( descriptor[0].descriptorSize ()); ++i)
+ {
+ descriptor[0].histogram[i];
+ }
+ }
+
+The following code snippet will estimate a GASD shape only descriptor for an input point cloud.
+
+.. code-block:: cpp
+ :linenos:
+
+ #include <pcl/point_types.h>
+ #include <pcl/features/gasd.h>
+
+ {
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
+
+ ... read, pass in or create a point cloud ...
+
+ // Create the GASD estimation class, and pass the input dataset to it
+ pcl::GASDEstimation<pcl::PointXYZ, pcl::GASDSignature512> gasd;
+ gasd.setInputCloud (cloud);
+
+ // Output datasets
+ pcl::PointCloud<pcl::GASDSignature512> descriptor;
+
+ // Compute the descriptor
+ gasd.compute (descriptor);
+
+ // Get the alignment transform
+ Eigen::Matrix4f trans = gasd.getTransform (trans);
+
+ // Unpack histogram bins
+ for (size_t i = 0; i < size_t( descriptor[0].descriptorSize ()); ++i)
+ {
+ descriptor[0].histogram[i];
+ }
+ }
+
+.. [GASD] http://www.cin.ufpe.br/~jpsml/uploads/8/2/6/7/82675770/pid4349755.pdf
+.. note::
+ @InProceedings{Lima16SIBGRAPI,
+ author = {Joao Paulo Lima and Veronica Teichrieb},
+ title = {An Efficient Global Point Cloud Descriptor for Object Recognition and Pose Estimation},
+ booktitle = {Proceedings of the 29th SIBGRAPI - Conference on Graphics, Patterns and Images},
+ year = {2016},
+ address = {Sao Jose dos Campos, Brazil},
+ month = {October}
+ }
+
$ ccmake ..
Press c to configure ccmake, press t to toggle to the advanced mode as a number of options
-only appear in advanced mode. The latest CUDA algorithms are beeing kept in the GPU project, for
+only appear in advanced mode. The latest CUDA algorithms are being kept in the GPU project, for
this the BUILD_GPU option needs to be on and the BUILD_gpu_<X> indicate the different
GPU subprojects.
This shows how to detect people with an Primesense device, the full version
working on oni and pcd files can be found in the git master.
-The code assumes a organised and projectable pointcloud, and should work with other
+The code assumes an organised and projectable pointcloud, and should work with other
sensors then the Primesense device.
.. image:: images/gpu/people/ss26_1.jpg
The ``people_detector`` object receives as input the current cloud and the estimated ground coefficients and
computes people clusters properties, which are stored in :pcl:`PersonCluster <pcl::people::PersonCluster>` objects.
The ground plane coefficients are re-estimated at every frame by using the previous frame estimate as initial condition.
-This procedure allows to adapt to small changes which can occurr to the ground plane equation if the camera is slowly moving.
+This procedure allows to adapt to small changes which can occur to the ground plane equation if the camera is slowly moving.
.. literalinclude:: sources/ground_based_rgbd_people_detection/src/main_ground_based_people_detection.cpp
:language: cpp
The Velodyne HDL is a network-based 3D LiDAR system that produces
360 degree point clouds containing over 700,000 points every second.
-The HDL Grabber provided in PCL mimicks other Grabbers, making it *almost*
+The HDL Grabber provided in PCL mimics other Grabbers, making it *almost*
plug-and-play. Because the HDL devices are network based, however, there
are a few gotchas on some platforms.
The HDL-64e and HDL-32e, by default, produce UDP network packets
on the 192.168.3 subnet. Starting with the HDL-32e (Firmware Version 2),
-the user can customize this network subnet.
+the user can customize this network subnet.
The HDL can be connected either directly into your computer, or into a
network switch (to include a network switch with a built-in Wireless Access Point).
`Wireshark <http://www.wireshark.org/>`_ is a popular Network Packet Analyzer Program which
is available for most platforms, including Linux, MacOS and Windows. This tool uses a defacto
standard network packet capture file format called `PCAP <http://en.wikipedia.org/wiki/Pcap>`_.
-Many publically available Velodyne HDL packet captures use this PCAP file format as a means of
+Many publicly available Velodyne HDL packet captures use this PCAP file format as a means of
recording and playback. These PCAP files can be used with the HDL Grabber if PCL is compiled with
PCAP support.
.. code-block:: cpp
:linenos:
-
+
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/hdl_grabber.h>
using namespace std;
using namespace pcl::console;
using namespace pcl::visualization;
-
+
class SimpleHDLViewer
{
public:
boost::mutex::scoped_lock lock (cloud_mutex_);
cloud_ = cloud;
}
-
+
void run ()
{
cloud_viewer_->addCoordinateSystem (3.0);
CloudConstPtr cloud_;
pcl::visualization::PointCloudColorHandler<pcl::PointXYZI> &handler_;
};
-
+
int main (int argc, char ** argv)
{
std::string hdlCalibration, pcapFile;
Add the following lines to your CMakeLists.txt file:
-.. literalinclude:: sources/openni_grabber/CMakeLists.txt
- :language: cmake
+.. code-block:: cmake
:linenos:
+ cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
+
+ project(pcl_hdl_viewer_simple)
+
+ find_package(PCL 1.2 REQUIRED)
+
+ include_directories(${PCL_INCLUDE_DIRS})
+ link_directories(${PCL_LIBRARY_DIRS})
+ add_definitions(${PCL_DEFINITIONS})
+
+ add_executable(pcl_hdl_viewer_simple hdl_viewer_simple.cpp)
+ target_link_libraries(pcl_hdl_viewer_simple ${PCL_LIBRARIES})
+
+
_`Disabling Reverse Path Filter`
--------------------------------
x.y.z.255 for a Class C Network [where x.y.z are the first three octets of a Class C network, such as
192.168.1]).
-The Source IP Address, on the otherhand, indicates where the packet originated from. Packets
+The Source IP Address, on the other hand, indicates where the packet originated from. Packets
can be hand-crafted for spoofing-type attacks (eg, pretending to come from somewhere they really
didn't). The Reverse Path Filter attempts to detect these instances. The default rule that it uses is
that if a packet is received on Network Interface *A*, then if there is no **route** to the **Source IP Address**
em1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.128.108 netmask 255.255.255.0 broadcast 192.168.128.255
-
+
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.3.1 netmask 255.255.255.0 broadcast 192.168.3.255
Next, let's look at our routing table (again, some items removed for brevity)::
$ route -n
-
+
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.128.1 0.0.0.0 UG 0 0 0 em1
192.168.3.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
192.168.128.0 0.0.0.0 255.255.255.0 U 0 0 0 em1
-
+
To add a route to the HDL, assume that the HDL Source IP is 192.168.12.84. You would use the
following command::
To verify that the route has been added, type the following::
$ route -n
-
+
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.128.1 0.0.0.0 UG 0 0 0 em1
192.168.3.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
192.168.12.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
192.168.128.0 0.0.0.0 255.255.255.0 U 0 0 0 em1
-
+
Now, there is a route back to the source IP address of the HDL on the same interface
that the packet came from!
* **setIndices() = true, setSearchSurface() = true** - this is probably the rarest case, where both indices and a search surface is given. In this case, features will be estimated for only a subset from the <input, indices> pair, using the search surface information given in **setSearchSurface()**.
- Finally, un the figure above, this corresponds to the last (rightmost) case. Here, we assume that q_2's index is not part of the indices vector given for Q, so no neighbors or features will be estimated at q2.
+ Finally, in the figure above, this corresponds to the last (rightmost) case. Here, we assume that q_2's index is not part of the indices vector given for Q, so no neighbors or features will be estimated at q2.
The most useful example when **setSearchSurface()** should be used, is when we have a very dense input dataset, but we do not want to estimate features at all the points in it, but rather at some keypoints discovered using the methods in `pcl_keypoints`, or at a downsampled version of the cloud (e.g., obtained using a `pcl::VoxelGrid<T>` filter). In this case, we pass the downsampled/keypoints input via **setInputCloud()**, and the original data as **setSearchSurface()**.
// Create a set of indices to be used. For simplicity, we're going to be using the first 10% of the points in cloud
std::vector<int> indices (floor (cloud->points.size () / 10));
- for (size_t i = 0; indices.size (); ++i) indices[i] = i;
+ for (size_t i = 0; i < indices.size (); ++i) indices[i] = i;
// Create the normal estimation class, and pass the input dataset to it
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
// cloud_normals->points.size () should have the same size as the input cloud_downsampled->points.size ()
}
-.. [RusuDissertation] http://files.rbrusu.com/publications/RusuPhDThesis.pdf
+.. [RusuDissertation] http://mediatum.ub.tum.de/doc/800632/941254.pdf
.. note::
@PhDThesis{RusuDoctoralDissertation,
author = {Radu Bogdan Rusu},
In the following lines of code, a segmentation object is created and some
parameters are set. We use the SACMODEL_PLANE to segment this PointCloud, and
the method used to find this model is SAC_RANSAC. The actual segmentation
-takes place when `seg.segment (*inliers, *coefficents);` is called. This
+takes place when `seg.segment (*inliers, *coefficients);` is called. This
function stores all of the inlying points (on the plane) to `inliers`, and it
-stores the coefficents to the plane `(a * x + b * y + c * z = d)` in
+stores the coefficients to the plane `(a * x + b * y + c * z = d)` in
`coefficients`.
.. literalinclude:: sources/concave_hull_2d/concave_hull_2d.cpp
The next bit of code projects the inliers onto the plane model and creates
another cloud. One way that we could do this is by just extracting the inliers
-that we found before, but in this case we are going to use the coefficents we
+that we found before, but in this case we are going to use the coefficients we
found before. We set the model type we are looking for and then set the
-coefficents, and from that the object knows which points to project from
+coefficients, and from that the object knows which points to project from
cloud_filtered to cloud_projected.
.. literalinclude:: sources/concave_hull_2d/concave_hull_2d.cpp
.. |fe_9| image:: images/rops_feature.png
:height: 100px
+ * :ref:`gasd_estimation`
+
+ ======= ======
+ |fe_10| Title: **Globally Aligned Spatial Distribution (GASD) descriptors**
+
+ Author: *Joao Paulo Lima*
+
+ Compatibility: >= PCL 1.9
+
+ This document describes the Globally Aligned Spatial Distribution (GASD) global descriptor to be used for efficient object recognition and pose estimation.
+ ======= ======
+
+ .. |fe_10| image:: images/gasd_estimation.png
+ :height: 100px
+
.. _filtering_tutorial:
Filtering
.. image:: images/interactive_icp/add_sub.png
:height: 500
-Configure the subdivision to 2 or 3 for example : dont forget to apply the modifier
+Configure the subdivision to 2 or 3 for example : don't forget to apply the modifier
.. image:: images/interactive_icp/sub2.png
:height: 203
:language: cpp
:lines: 14-24
-This functions takes the reference of a 4x4 matrix and prints the rigid transformation in an human
+This function takes the reference of a 4x4 matrix and prints the rigid transformation in an human
readable way.
.. literalinclude:: sources/interactive_icp/interactive_icp.cpp
As before we check if ICP as converged, if not we exit the program.
**printf("\033[11A");** is a little trick to go up 11 lines in the terminal to write
over the last matrix displayed. In short it allows to replace text instead of writing
-new lines; making the ouptut more readable.
+new lines; making the output more readable.
We increment **iterations** to update the text value in the visualizer.
Now we want to display the rigid transformation from the original transformed point cloud to
matrix[ICP 0->1]*matrix[ICP 1->2]*matrix[ICP 2->3] = matrix[ICP 0->3]
-While this is mathematically true, you will easilly notice that this is not true in this program due to roundings.
+While this is mathematically true, you will easily notice that this is not true in this program due to roundings.
This is why I introduced the initial ICP iteration parameters. Try to launch the program with 20 initial iterations
and save the matrix in a text file. Launch the same program with 1 initial iteration and press space till you go to 20
iterations. You will a notice a slight difference. The matrix with 20 initial iterations is much more accurate than the
:language: cpp
:lines: 37-39
-This creates an instance of an IterativeClosestPoint and gives it some useful information. "icp.setInputCloud(cloud_in);" sets cloud_in as the PointCloud to begin from and "icp.setInputTarget(cloud_out);" sets cloud_out as the PointCloud which we want cloud_in to look like.
+This creates an instance of an IterativeClosestPoint and gives it some useful information. "icp.setInputSource(cloud_in);" sets cloud_in as the PointCloud to begin from and "icp.setInputTarget(cloud_out);" sets cloud_out as the PointCloud which we want cloud_in to look like.
Next,
:language: cpp
:lines: 47-62
-We now load the PCD/PLY file and check if the file was loaded successfuly. Otherwise terminate
+We now load the PCD/PLY file and check if the file was loaded successfully. Otherwise terminate
the program.
This means no transformation (no rotation and no translation). We do not use the
last row of the matrix.
-The first 3 rows and colums (top left) components are the rotation
+The first 3 rows and columns (top left) components are the rotation
matrix. The first 3 rows of the last column is the translation.
.. literalinclude:: sources/matrix_transform/matrix_transform.cpp
:lines: 92-105
This second approach is easier to understand and is less error prone.
-Be carefull if you want to apply several rotations; rotations are not commutative ! This means than in most cases:
+Be careful if you want to apply several rotations; rotations are not commutative ! This means than in most cases:
rotA * rotB != rotB * rotA.
.. literalinclude:: sources/matrix_transform/matrix_transform.cpp
Radius that occurs in the formula is the input parameter for this algorithm and can be roughly considered as the range from objects center
outside of which there are no points that belong to foreground (objects horizontal radius).
- #. After all the preparations the search of the minimum cut is made. Based on an analysis of this cut, cloud is divided on forground and
+ #. After all the preparations the search of the minimum cut is made. Based on an analysis of this cut, cloud is divided on foreground and
background points.
For more comprehensive information please refer to the article
The resulting PointCloud contains the type Narf36 (see
common/include/pcl/point_types.h) and store the descriptor as a 36 elements
float and x,y,z,roll,pitch,yaw to describe the local coordinate frame at which
-the feature was extracted. The descriptors can now be compared, e.g., whith the
-Manhatten distance (sum of absolute differences).
+the feature was extracted. The descriptors can now be compared, e.g., with the
+Manhattan distance (sum of absolute differences).
The remaining code just visualizes the keypoint positions in a range image
widget and also in a 3D viewer.
effects of selecting a smaller scale (i.e., small **r** or **k**) versus a
larger scale (i.e., large **r** or **k**). The left part of the figures depicts
a reasonable well chosen scale factor, with estimated surface normals
-approximatively perpendicular for the two planar surfaces and small edges
+approximately perpendicular for the two planar surfaces and small edges
visible all across the table. If the scale factor however is too big (right
part), and thus the set of neighbors is larger covering points from adjacent
surfaces, the estimated point feature representations get distorted, with
Then we create an octree instance which is initialized with its resolution. This octree keeps a vector of point indices within its leaf nodes.
-The resolution parameter describes the length of the smalles voxels at lowest octree level. The depth of the octree is therefore a function of the resolution as well as
+The resolution parameter describes the length of the smallest voxels at lowest octree level. The depth of the octree is therefore a function of the resolution as well as
the spatial dimension of the pointcloud. If a bounding box of the pointcloud is know, it should be assigned to the octree by using the defineBoundingBox method.
Then we assign a pointer to the PointCloud and add all points to the octree.
new *OpenNIGrabber* interface. The next line might seem a bit intimidating at
first, but it's not that bad. We create a *boost::bind* object with the address
of the callback *cloud_cb_*, we pass a reference to our *SimpleOpenNIViewer*
-and the argument palce holder *_1*.
+and the argument place holder *_1*.
The *bind* then gets casted to a *boost::function* object which is templated on
the callback function type, in this case *void (const
Troubleshooting
---------------
-Q: I get an error that theres now device connected:
+Q: I get an error that there's no device connected:
.. note::
Example::
- WIDTH 640 # Image-like organized structure, with 640 rows and 480 columns,
+ WIDTH 640 # Image-like organized structure, with 480 rows and 640 columns,
HEIGHT 480 # thus 640*480=307200 points total in the dataset
Example::
==========
PCLPlotter provides a very straightforward and easy interface for plotting graphs. One can visualize all sort of important plots -
-from polynomial functions to histograms - inside the library without going to any other softwares (like MATLAB).
+from polynomial functions to histograms - inside the library without going to any other software (like MATLAB).
Please go through the `documentation <http://docs.pointclouds.org/trunk/a01175.html>`_ when some specific concepts are introduced in this tutorial to know the exact method signatures.
The code for the visualization of a plot are usually as simple as the following snippet.
fifth parameter and use it in all other calls where we only want to
affect that viewport.
-We also set the background colour of this viewport, give it a lable
+We also set the background colour of this viewport, give it a label
based on what we are using the viewport to distinguish, and add our
point cloud to it, using an RGB colour handler.
.. image:: images/project_inliers_2.png
-Note that the coordinate axis are represented as red (x), green (y), and blue
+Note that the coordinate axes are represented as red (x), green (y), and blue
(z). The five points are represented with red as the points before projection
and green as the points after projection. Note that their z now lies on the X-Y
plane.
Create the folder tree and download the sources files from `github <https://github.com/PointCloudLibrary/pcl/tree/master/doc/tutorials/content/sources/qt_visualizer>`_.
.. note::
- File paths should not contain any special caracter or the compilation might fail with a ``moc: Cannot open options file specified with @`` error message.
+ File paths should not contain any special character or the compilation might fail with a ``moc: Cannot open options file specified with @`` error message.
Qt configuration
================
:language: cpp
| Here we include the headers for the class PCLViewer and the headers for QApplication and QMainWindow.
-| Then the main functions consists of instanciating a QApplication `a` which manages the GUI application's control flow and main settings.
-| A ``PCLViewer`` object called `w` is instanciated and it's method ``show()`` is called.
+| Then the main functions consists of instantiating a QApplication `a` which manages the GUI application's control flow and main settings.
+| A ``PCLViewer`` object called `w` is instantiated and it's method ``show()`` is called.
| Finally we return the state of our program exit through the QApplication `a`.
pclviewer.h
| This is the last part of our constructor; we add the point cloud to the visualizer, call the method ``pSliderValueChanged`` to change the point size to 2.
-We finaly reset the camera within the PCL Visualizer not avoid the user having to zoom out and update the qvtkwidget to be
+We finally reset the camera within the PCL Visualizer not avoid the user having to zoom out and update the qvtkwidget to be
sure the modifications will be displayed.
.. literalinclude:: sources/qt_visualizer/pclviewer.cpp
:align: right
:height: 200px
-The pictures to the left and right (From [Wikipedia]_) show a simple application of the RANSAC algorithm on a 2-dimensional set of data. The image on our left is a visual representation of a data set containg both inliers and outliers. The image on our right shows all of the outliers in red, and shows inliers in blue. The blue line is the result of the work done by RANSAC. In this case the model that we are trying to fit to the data is a line, and it looks like it's a fairly good fit to our data.
+The pictures to the left and right (From [Wikipedia]_) show a simple application of the RANSAC algorithm on a 2-dimensional set of data. The image on our left is a visual representation of a data set containing both inliers and outliers. The image on our right shows all of the outliers in red, and shows inliers in blue. The blue line is the result of the work done by RANSAC. In this case the model that we are trying to fit to the data is a line, and it looks like it's a fairly good fit to our data.
The code
--------
$ ./random_sample_consensus -f
-the program will display only the indices of the original PointCloud which satisfy the paticular model we have chosen (in this case plane) as found by RandomSampleConsens in the viewer.
+the program will display only the indices of the original PointCloud which satisfy the particular model we have chosen (in this case plane) as found by RandomSampleConsens in the viewer.
.. image:: images/ransac_inliers_plane.png
:align: center
:language: cpp
:lines: 29-33
-The remaining code creates the range image from the point cloud with the given paramters and outputs some information on the terminal.
+The remaining code creates the range image from the point cloud with the given parameters and outputs some information on the terminal.
The range image is derived from the PointCloud class and its points have the members x,y,z and range. There are three kinds of points. Valid points have a real range greater zero. Unobserved points have x=y=z=NAN and range=-INFINITY. Far range points have x=y=z=NAN and range=INFINITY.
:language: cpp
:lines: 37-37
-This value is similar to that which was used in the :ref:`region_growing_segmentation` tutorial. In addition to that, it is used for merging process mentioned in the begining.
+This value is similar to that which was used in the :ref:`region_growing_segmentation` tutorial. In addition to that, it is used for merging process mentioned in the beginning.
If cluster has less points than was set through ``setMinClusterSize`` method, then it will be merged with the nearest neighbour.
.. literalinclude:: sources/region_growing_rgb_segmentation/region_growing_rgb_segmentation.cpp
:lines: 65-73
The ``pcl::RegionGrowing`` class provides a method that returns the colored cloud where each cluster has its own color.
-So in this part of code the ``pcl::visualization::CloudViewer`` is instanciated for viewing the result of the segmentation - the same colored cloud.
+So in this part of code the ``pcl::visualization::CloudViewer`` is instantiated for viewing the result of the segmentation - the same colored cloud.
You can learn more about cloud visualization in the :ref:`cloud_viewer` tutorial.
Compiling and running the program
.. image:: images/registration/block_diagram_single_iteration.jpg
:align: center
-The computational steps for two datasets are straighforward:
+The computational steps for two datasets are straightforward:
* from a set of points, identify **interest points** (i.e., **keypoints**) that best represent the scene in both datasets;
* at each keypoint, compute a **feature descriptor**;
RadiusOutlierRemoval Background
-------------------------------
-The picture below helps to visualize what the RadiusOutlierRemoval filter object does. The user specifies a number of neighbors which every indice must have within a specified radius to remain in the PointCloud. For example if 1 neighbor is specified, only the yellow point will be removed from the PointCloud. If 2 neighbors are specified then both the yellow and green points will be removed from the PointCloud.
+The picture below helps to visualize what the RadiusOutlierRemoval filter object does. The user specifies a number of neighbors which every index must have within a specified radius to remain in the PointCloud. For example if 1 neighbor is specified, only the yellow point will be removed from the PointCloud. If 2 neighbors are specified then both the yellow and green points will be removed from the PointCloud.
.. image:: images/radius_outlier.png
:language: cpp
:lines: 38-53
-Basically, we create the condition which a given point must satisfy for it to remain in our PointCloud. In this example, we use add two comparisons to the conditon: greater than (GT) 0.0 and less than (LT) 0.8. This condition is then used to build the filter.
+Basically, we create the condition which a given point must satisfy for it to remain in our PointCloud. In this example, we use add two comparisons to the condition: greater than (GT) 0.0 and less than (LT) 0.8. This condition is then used to build the filter.
In both cases the code above creates the filter object that we are going to use and sets certain parameters that are necessary for the filtering to take place.
* PointInT - type of the input points;
* PointOutT - type of the output points.
-Immediately after that we set the input all the necessary data neede for the feature computation.
+Immediately after that we set the input all the necessary data needed for the feature computation.
.. literalinclude:: sources/rops_feature/rops_feature.cpp
:language: cpp
bool
enforceCurvatureOrIntensitySimilarity (const PointTypeFull& point_a, const PointTypeFull& point_b, float squared_distance)
{
- Eigen::Map<const Eigen::Vector3f> point_a_normal = point_a.normal, point_b_normal = point_b.normal;
+ Eigen::Map<const Eigen::Vector3f> point_a_normal = point_a.getNormalVector3fMap (), point_b_normal = point_b.getNormalVector3fMap ();
if (fabs (point_a.intensity - point_b.intensity) < 5.0f)
return (true);
if (fabs (point_a_normal.dot (point_b_normal)) < 0.05)
bool
customRegionGrowing (const PointTypeFull& point_a, const PointTypeFull& point_b, float squared_distance)
{
- Eigen::Map<const Eigen::Vector3f> point_a_normal = point_a.normal, point_b_normal = point_b.normal;
+ Eigen::Map<const Eigen::Vector3f> point_a_normal = point_a.getNormalVector3fMap (), point_b_normal = point_b.getNormalVector3fMap ();
if (squared_distance < 10000)
{
if (fabs (point_a.intensity - point_b.intensity) < 8.0f)
if (!don.initCompute ())
{
- std::cerr << "Error: Could not intialize DoN feature operator" << std::endl;
+ std::cerr << "Error: Could not initialize DoN feature operator" << std::endl;
exit (EXIT_FAILURE);
}
float cg_thresh_ (5.0f);
int icp_max_iter_ (5);
float icp_corr_distance_ (0.005f);
+float hv_resolution_ (0.005f);
+float hv_occupancy_grid_resolution_ (0.01f);
float hv_clutter_reg_ (5.0f);
float hv_inlier_th_ (0.005f);
float hv_occlusion_th_ (0.01f);
GoHv.setSceneCloud (scene); // Scene Cloud
GoHv.addModels (registered_instances, true); //Models to verify
-
+ GoHv.setResolution (hv_resolution_);
+ GoHv.setResolutionOccupancyGrid (hv_occupancy_grid_resolution_);
GoHv.setInlierThreshold (hv_inlier_th_);
GoHv.setOcclusionThreshold (hv_occlusion_th_);
GoHv.setRegularizer (hv_regularizer_);
}
/* Use IterativeClosestPoint to find a precise alignment from the source cloud to the target cloud,
- * starting with an intial guess
+ * starting with an initial guess
* Inputs:
* source_points
* The "source" points, i.e., the points that must be transformed to align with the target point cloud
pcl::ModelCoefficients::Ptr
fitPlane (const PointCloudPtr & input, float distance_threshold, float max_iterations)
{
- // Intialize the SACSegmentation object
+ // Initialize the SACSegmentation object
pcl::SACSegmentation<PointT> seg;
seg.setOptimizeCoefficients (true);
seg.setModelType (pcl::SACMODEL_PLANE);
PointCloudPtr tgt_points = loadPoints (argv[2]);
Eigen::Matrix4f tform = Eigen::Matrix4f::Identity ();
- // Compute the intial alignment
+ // Compute the initial alignment
double min_sample_dist, max_correspondence_dist, nr_iters;
bool compute_intial_alignment =
pcl::console::parse_3x_arguments (argc, argv, "-i", min_sample_dist, max_correspondence_dist, nr_iters) > 0;
// Visualization
pcl::visualization::PCLVisualizer viewer ("ICP demo");
- // Create two verticaly separated viewports
+ // Create two vertically separated viewports
int v1 (0);
int v2 (1);
viewer.createViewPort (0.0, 0.0, 0.5, 1.0, v1);
/* Use IterativeClosestPoint to find a precise alignment from the source cloud to the target cloud,
- * starting with an intial guess
+ * starting with an initial guess
* Inputs:
* source_points
* The "source" points, i.e., the points that must be transformed to align with the target point cloud
}
/* Use IterativeClosestPoint to find a precise alignment from the source cloud to the target cloud,
- * starting with an intial guess
+ * starting with an initial guess
* Inputs:
* source_points
* The "source" points, i.e., the points that must be transformed to align with the target point cloud
pcl::ModelCoefficients::Ptr
fitPlane (const PointCloudPtr & input, float distance_threshold, float max_iterations)
{
- // Intialize the SACSegmentation object
+ // Initialize the SACSegmentation object
pcl::SACSegmentation<PointT> seg;
seg.setOptimizeCoefficients (true);
seg.setModelType (pcl::SACMODEL_PLANE);
PointCloudPtr tgt_points = loadPoints (argv[2]);
Eigen::Matrix4f tform = Eigen::Matrix4f::Identity ();
- // Compute the intial alignment
+ // Compute the initial alignment
double min_sample_dist, max_correspondence_dist, nr_iters;
bool compute_intial_alignment =
pcl::console::parse_3x_arguments (argc, argv, "-i", min_sample_dist, max_correspondence_dist, nr_iters) > 0;
std::cout << " " << cloud_out->points[i].x << " " <<
cloud_out->points[i].y << " " << cloud_out->points[i].z << std::endl;
pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> icp;
- icp.setInputCloud(cloud_in);
+ icp.setInputSource(cloud_in);
icp.setInputTarget(cloud_out);
pcl::PointCloud<pcl::PointXYZ> Final;
icp.align(Final);
// Define a translation of 2.5 meters on the x axis.
transform_2.translation() << 2.5, 0.0, 0.0;
- // The same rotation matrix as before; theta radians arround Z axis
+ // The same rotation matrix as before; theta radians around Z axis
transform_2.rotate (Eigen::AngleAxisf (theta, Eigen::Vector3f::UnitZ()));
// Print the transformation
else
{
setUnseenToMaxRange = true;
- cout << "\nNo *.pcd file given => Genarating example point cloud.\n\n";
+ cout << "\nNo *.pcd file given => Generating example point cloud.\n\n";
for (float x=-0.5f; x<=0.5f; x+=0.01f)
{
for (float y=-0.5f; y<=0.5f; y+=0.01f)
else
{
setUnseenToMaxRange = true;
- cout << "\nNo *.pcd file given => Genarating example point cloud.\n\n";
+ cout << "\nNo *.pcd file given => Generating example point cloud.\n\n";
for (float x=-0.5f; x<=0.5f; x+=0.01f)
{
for (float y=-0.5f; y<=0.5f; y+=0.01f)
// ------------------------------------
pcl::PointCloud<pcl::PointXYZ>::Ptr basic_cloud_ptr (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr (new pcl::PointCloud<pcl::PointXYZRGB>);
- std::cout << "Genarating example point clouds.\n\n";
+ std::cout << "Generating example point clouds.\n\n";
// We're going to make an ellipse extruded along the z-axis. The colour for
// the XYZRGB cloud will gradually go from red to green to blue.
uint8_t r(255), g(15), b(15);
-cmake_minimum_required (VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 2.8.11)
-project (pcl-colorize_cloud)
-find_package (Qt4 REQUIRED)
-find_package (VTK REQUIRED)
-find_package (PCL 1.7.1 REQUIRED)
+project(pcl_colorize_cloud)
-include_directories (${PCL_INCLUDE_DIRS})
-link_directories (${PCL_LIBRARY_DIRS})
-add_definitions (${PCL_DEFINITIONS})
+# init_qt: Let's do the CMake job for us
+set(CMAKE_AUTOMOC ON) # For meta object compiler
+set(CMAKE_AUTORCC ON) # Resource files
+set(CMAKE_AUTOUIC ON) # UI files
-set (project_SOURCES main.cpp pclviewer.cpp)
-set (project_HEADERS pclviewer.h)
-set (project_FORMS pclviewer.ui)
-set (VTK_LIBRARIES vtkRendering vtkGraphics vtkHybrid QVTK)
+# Find includes in corresponding build directories
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
-QT4_WRAP_CPP (project_HEADERS_MOC ${project_HEADERS})
-QT4_WRAP_UI (project_FORMS_HEADERS ${project_FORMS})
+# Find the QtWidgets library
+find_package(Qt5 REQUIRED Widgets)
-INCLUDE (${QT_USE_FILE})
-ADD_DEFINITIONS (${QT_DEFINITIONS})
+find_package(VTK REQUIRED)
+find_package(PCL 1.7.1 REQUIRED)
-ADD_EXECUTABLE (colorize_cloud ${project_SOURCES}
- ${project_FORMS_HEADERS}
- ${project_HEADERS_MOC})
+# Fix a compilation bug under ubuntu 16.04 (Xenial)
+list(REMOVE_ITEM PCL_LIBRARIES "vtkproj4")
-TARGET_LINK_LIBRARIES (colorize_cloud ${QT_LIBRARIES} ${PCL_LIBRARIES} ${VTK_LIBRARIES})
+include_directories(${PCL_INCLUDE_DIRS})
+add_definitions(${PCL_DEFINITIONS})
+set(project_SOURCES main.cpp pclviewer.cpp)
+
+add_executable(${PROJECT_NAME} ${project_SOURCES})
+
+target_link_libraries(${PROJECT_NAME} ${PCL_LIBRARIES})
+
+qt5_use_modules(${PROJECT_NAME} Widgets)
#include "pclviewer.h"
-#include "../build/ui_pclviewer.h"
+#include "ui_pclviewer.h"
PCLViewer::PCLViewer (QWidget *parent) :
QMainWindow (parent),
-cmake_minimum_required (VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 2.8.11)
-project (pcl-visualizer)
-find_package (Qt4 REQUIRED)
-find_package (VTK REQUIRED)
-find_package (PCL 1.7.1 REQUIRED)
+project(pcl_visualizer)
-include_directories (${PCL_INCLUDE_DIRS})
-link_directories (${PCL_LIBRARY_DIRS})
-add_definitions (${PCL_DEFINITIONS})
+# init_qt: Let's do the CMake job for us
+set(CMAKE_AUTOMOC ON) # For meta object compiler
+set(CMAKE_AUTORCC ON) # Resource files
+set(CMAKE_AUTOUIC ON) # UI files
-set (project_SOURCES main.cpp pclviewer.cpp)
-set (project_HEADERS pclviewer.h)
-set (project_FORMS pclviewer.ui)
-set (VTK_LIBRARIES vtkRendering vtkGraphics vtkHybrid QVTK)
+# Find includes in corresponding build directories
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
-QT4_WRAP_CPP (project_HEADERS_MOC ${project_HEADERS})
-QT4_WRAP_UI (project_FORMS_HEADERS ${project_FORMS})
+# Find the QtWidgets library
+find_package(Qt5 REQUIRED Widgets)
-INCLUDE (${QT_USE_FILE})
-ADD_DEFINITIONS (${QT_DEFINITIONS})
+find_package(VTK REQUIRED)
+find_package(PCL 1.7.1 REQUIRED)
-ADD_EXECUTABLE (pcl_visualizer ${project_SOURCES}
- ${project_FORMS_HEADERS}
- ${project_HEADERS_MOC})
+# Fix a compilation bug under ubuntu 16.04 (Xenial)
+list(REMOVE_ITEM PCL_LIBRARIES "vtkproj4")
-TARGET_LINK_LIBRARIES (pcl_visualizer ${QT_LIBRARIES} ${PCL_LIBRARIES} ${VTK_LIBRARIES})
+include_directories(${PCL_INCLUDE_DIRS})
+add_definitions(${PCL_DEFINITIONS})
+set(project_SOURCES main.cpp pclviewer.cpp)
+
+add_executable(${PROJECT_NAME} ${project_SOURCES})
+
+target_link_libraries(${PROJECT_NAME} ${PCL_LIBRARIES})
+
+qt5_use_modules(${PROJECT_NAME} Widgets)
#include "pclviewer.h"
-#include "../build/ui_pclviewer.h"
+#include "ui_pclviewer.h"
PCLViewer::PCLViewer (QWidget *parent) :
QMainWindow (parent),
// copies all inliers of the model computed to another PointCloud
pcl::copyPointCloud<pcl::PointXYZ>(*cloud, inliers, *final);
- // creates the visualization object and adds either our orignial cloud or all of the inliers
+ // creates the visualization object and adds either our original cloud or all of the inliers
// depending on the command line arguments specified.
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer;
if (pcl::console::find_argument (argc, argv, "-f") >= 0 || pcl::console::find_argument (argc, argv, "-sf") >= 0)
}
else
{
- cout << "\nNo *.pcd file given => Genarating example point cloud.\n\n";
+ cout << "\nNo *.pcd file given => Generating example point cloud.\n\n";
for (float x=-0.5f; x<=0.5f; x+=0.01f)
{
for (float y=-0.5f; y<=0.5f; y+=0.01f)
}
else
{
- std::cout << "\nNo *.pcd file given => Genarating example point cloud.\n\n";
+ std::cout << "\nNo *.pcd file given => Generating example point cloud.\n\n";
for (float x=-0.5f; x<=0.5f; x+=0.01f)
{
for (float y=-0.5f; y<=0.5f; y+=0.01f)
max_correspondence_distance_ (0.01f*0.01f),
nr_iterations_ (500)
{
- // Intialize the parameters in the Sample Consensus Intial Alignment (SAC-IA) algorithm
+ // Initialize the parameters in the Sample Consensus Initial Alignment (SAC-IA) algorithm
sac_ia_.setMinSampleDistance (min_sample_distance_);
sac_ia_.setMaxCorrespondenceDistance (max_correspondence_distance_);
sac_ia_.setMaximumIterations (nr_iterations_);
the global distances mean and standard deviation can be considered as outliers
and trimmed from the dataset.
-The following picture show the effects of the sparse outlier analysis and
+The following picture shows the effects of the sparse outlier analysis and
removal: the original dataset is shown on the left, while the resultant one on
the right. The graphic shows the mean k-nearest neighbor distances in a point
neighborhood before and after filtering.
:scale: 50%
:align: center
- **From right to left, 6 (faces), 18 (faces,egdes), and 26 (faces, edges, vertices) adjacency**
+ **From right to left, 6 (faces), 18 (faces,edges), and 26 (faces, edges, vertices) adjacency**
The adjacency graph of supervoxels (and the underlying voxels) is maintained efficiently within the octree by specifying that neighbors are voxels within R_voxel of one another, where R_voxel specifies the octree leaf resolution. This adjacency graph is used extensively for both the region growing used to generate the supervoxels, as well as determining adjacency of the resulting supervoxels themselves.
.. important::
- By default, the algorithm will use a special tranform compressing the depth in Z if your input cloud is organized (eg, from an RGBD sensor like the Kinect). You MUST set use_transform to false if you are using an organized cloud which doesn't have the camera at (0,0,0) and depth in positive Z. The transform is specifically designed to help improve Kinect data by increasing voxel bin size as distance from the camera increases. If your cloud is unorganized, this transform will not be used by default, but can be enabled by using setUseSingleCameraTransform(true).
+ By default, the algorithm will use a special transform compressing the depth in Z if your input cloud is organized (eg, from an RGBD sensor like the Kinect). You MUST set use_transform to false if you are using an organized cloud which doesn't have the camera at (0,0,0) and depth in positive Z. The transform is specifically designed to help improve Kinect data by increasing voxel bin size as distance from the camera increases. If your cloud is unorganized, this transform will not be used by default, but can be enabled by using setUseSingleCameraTransform(true).
.. literalinclude:: sources/supervoxel_clustering/supervoxel_clustering.cpp
:language: cpp
:language: cpp
:lines: 253-260
-We also downsample the point cloud with a spacing of 5mm, which reduces the ammount of computation that's required.
+We also downsample the point cloud with a spacing of 5mm, which reduces the amount of computation that's required.
.. literalinclude:: sources/template_alignment/template_alignment.cpp
:language: cpp
The pcl_tracking library contains data structures and mechanism for 3D tracking which uses Particle Filter Algorithm. This tracking will enable you to implement 6D-pose (position and rotation) tracking which is optimized to run in real time.
-At each loop, tracking program proceeds along with following algorythm.(see fig2)
+At each loop, tracking program proceeds along with following algorithm.(see fig2)
1. (At t = t - 1) At first, using previous Pariticle's information about position and rotation, it will predict each position and rotation of them at the next frame.
-In drawResult function, you can get model infomation about position and rotation.
+In drawResult function, you can get model information about position and rotation.
Compiling and running the program
---------------------------------
3. Don't move the target and the device until you launch tracking program.
4. Output only target point cloud with your other code (See :ref:`planar_segmentation` tutorial) and save as tracking_target.pcd
-After you created model point cloud and the executable, you can then launch tracking_sample. Set device_id as second arguement and pcd file's name you made in above 4 as third.
+After you created model point cloud and the executable, you can then launch tracking_sample. Set device_id as second argument and pcd file's name you made in above 4 as third.
$ ./tracking_sample “#1” tracking_target.pcd
have only included the PCL headers so the compilers knows about the
methods we are calling. We need also to make the linker knows about
the libraries we are linking against. As said before the, PCL
-found libraries are refered to using ``PCL_LIBRARIES`` variable, all
+found libraries are referred to using ``PCL_LIBRARIES`` variable, all
that remains is to trigger the link operation which we do calling
``target_link_libraries()`` macro.
PCLConfig.cmake uses a CMake special feature named `EXPORT` which
allows for using others' projects targets as if you built them
yourself. When you are using such targets they are called `imported
-targets` and acts just like anyother target.
+targets` and acts just like any other target.
Compiling and running the project
---------------------------------
--------------------------
*libpcl_visualization* contains a special **PCLHistogramVisualization** class,
-which is also used by **pcl_viewer** to automaticall display the VFH
+which is also used by **pcl_viewer** to automatically display the VFH
descriptors as a histogram of float values. For more information, please see
http://www.pointclouds.org/documentation/overview/visualization.php.
.. image:: images/vfh_recognition/scene_segmented.jpg
-Since we're only trying to cover the explicity training/testing of VFH
+Since we're only trying to cover the explicit training/testing of VFH
signatures in this tutorial, we provide a set of datasets already collected at:
`vfh_recognition_tutorial_data.tbz
<https://raw.github.com/PointCloudLibrary/data/master/tutorials/vfh_recognition/vfh_recognition_tutorial_data.tbz>`_.
======================== ======================== ========================
Filters_ Features_ Keypoints_
|filters_small| |features_small| |keypoints_small|
-Registration_ KdTree_ Octree_
+Registration_ KdTree_ Octree_
|registration_small| |kdtree_small| |octree_small|
Segmentation_ `Sample Consensus`_ Surface_
|segmentation_small| |sample_consensus_small| |surface_small|
-`Range Image`_ `I/O`_ Visualization_
+`Range Image`_ `I/O`_ Visualization_
|range_image_small| |io_small| |visualization_small|
-Common_ Search_
+Common Search_
|pcl_logo| |pcl_logo|
======================== ======================== ========================
.. image:: images/statistical_removal_2.jpg
-**Documentation:** http://docs.pointclouds.org/trunk/a02945.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__filters.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#filtering-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02944.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__features.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#features-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- * Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/features/``
+ * Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/features/``
* Binaries_: ``$(PCL_PREFIX)/bin/``
* ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- * Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ * Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
* Binaries_: ``$(PCL_PREFIX)/bin/``
* ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/features/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/features/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02949.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__keypoints.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#keypoints-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/keypoints/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/keypoints/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/keypoints/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/keypoints/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02953.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__registration.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#registration-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/registration/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/registration/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/registration/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/registration/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02948.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__kdtree.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#kdtree-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/kdtree/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/kdtree/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/kdtree/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/kdtree/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02950.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__octree.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#octree-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/octree/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/octree/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/octree/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/octree/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02956.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__segmentation.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#segmentation-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/segmentation/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/segmentation/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/segmentation/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/segmentation/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02954.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__sample__consensus.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#sample-consensus
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/sample_consensus/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/sample_consensus/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/sample_consensus/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/sample_consensus/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02957.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__surface.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#surface-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/surface/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/surface/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/surface/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/surface/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
The *range_image* library contains two classes for representing and working with range images. A range image (or depth map) is an image whose pixel values represent a distance or depth from the sensor's origin. Range images are a common 3D representation and are often generated by stereo or time-of-flight cameras. With knowledge of the camera's intrinsic calibration parameters, a range image can be converted into a point cloud.
+ Note: *range_image* is now a part of Common_ module.
+
.. image:: images/range_image.jpg
|
-**Documentation:** http://docs.pointclouds.org/trunk/a01344.html
-
**Tutorials:** http://pointclouds.org/documentation/tutorials/#range-images
**Interacts with:** Common_
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/range_image/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/range_image/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/range_image/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/range_image/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02947.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__io.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#i-o
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/io/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/io/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/io/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/io/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
|
-**Documentation:** http://docs.pointclouds.org/trunk/a02958.html
+**Documentation:** http://docs.pointclouds.org/trunk/group__visualization.html
**Tutorials:** http://pointclouds.org/documentation/tutorials/#visualization-tutorial
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/visualization/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/visualization/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/filters/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/filters/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/visualization/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/visualization/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/common/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/common/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/common/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/common/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/common/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/common/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
**Location:**
* MAC OS X (Homebrew installation)
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/search/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/search/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Linux
- - Header files: ``$(PCL_PREFIX)/pcl-1.6/pcl/search/``
+ - Header files: ``$(PCL_PREFIX)/pcl-$(PCL_VERSION)/pcl/search/``
- Binaries_: ``$(PCL_PREFIX)/bin/``
- ``$(PCL_PREFIX)`` is the ``cmake`` installation prefix ``CMAKE_INSTALL_PREFIX``, e.g., ``/usr/local/``
* Windows
- - Header files: ``$(PCL_DIRECTORY)/include/pcl-1.6/pcl/search/``
+ - Header files: ``$(PCL_DIRECTORY)/include/pcl-$(PCL_VERSION)/pcl/search/``
- Binaries_: ``$(PCL_DIRECTORY)/bin/``
- - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL 1.6\``
+ - ``$(PCL_DIRECTORY)`` is the PCL installation directory, e.g., ``C:\Program Files\PCL $(PCL_VERSION)\``
Top_
**Syntax is: mesh2pcd input.{ply,obj} output.pcd <options>**, where options are:
- -level X = tesselated sphere level (default: 2)
+ -level X = tessellated sphere level (default: 2)
-resolution X = the sphere resolution in angle increments (default: 100 deg)
<http://www.pointclouds.org/documentation/advanced/pcl_style_guide.php>`_ to
familiarize yourself with the concepts.
-There's two different ways we could set up the structure: i) set up the code
+There're two different ways we could set up the structure: i) set up the code
separately, as a standalone PCL class, but outside of the PCL code tree; or ii)
set up the files directly in the PCL code tree. Since our assumption is that
the end result will be contributed back to PCL, it's best to concentrate on the
We also need a name for our new class. Let's call it `BilateralFilter`.
-.. [*] The PCL Filtering API specifies that two definitions and implementations must be available for every algorithm: one operating on PointCloud<T> and another one operating on PCLPointCloud2. For the purpose of this tutorial, we will concentrate only on the former.
+.. [*] Some PCL filter algorithms provide two implementations: one for PointCloud<T> types and another one operating on legacy PCLPointCloud2 types. This is no longer required.
bilateral.h
===========
#include <pcl/filters/bilateral.h>
- #endif // PCL_FILTERS_BILATERAL_H_
+ #endif // PCL_FILTERS_BILATERAL_IMPL_H_
This should be straightforward. We haven't declared any methods for
`BilateralFilter` yet, therefore there is no implementation.
#include <pcl/filters/impl/bilateral.hpp>
Because we are writing templated code in PCL (1.x) where the template parameter
-is a point type (see :ref:`adding_custom_ptype`), we want to explicitely
+is a point type (see :ref:`adding_custom_ptype`), we want to explicitly
instantiate the most common use cases in *bilateral.cpp*, so that users don't
have to spend extra cycles when compiling code that uses our
`BilateralFilter`. To do this, we need to access both the header
bilateral.cpp
=============
-As previously mentioned, we're going to explicitely instantiate and
+As previously mentioned, we're going to explicitly instantiate and
*precompile* a number of templated specializations for the `BilateralFilter`
class. While this might lead to an increased compilation time for the PCL
Filtering library, it will save users the pain of processing and compiling the
Note that at this point we haven't declared the PCL_INSTANTIATE template for
`BilateralFilter`, nor did we actually implement the pure virtual functions in
-the abstract class :pcl:`pcl::Filter<pcl::Filter>` so attemping to compile the
+the abstract class :pcl:`pcl::Filter<pcl::Filter>` so attempting to compile the
code will result in errors like::
filters/src/bilateral.cpp:6:32: error: expected constructor, destructor, or type conversion before ‘(’ token
}
double
- getSigmaS ()
+ getSigmaS () const
{
return (sigma_s_);
}
}
double
- getSigmaR ()
+ getSigmaR () const
{
return (sigma_r_);
}
}
double
- getSigmaS ()
+ getSigmaS () const
{
return (sigma_s_);
}
}
double
- getSigmaR ()
+ getSigmaR () const
{
return (sigma_r_);
}
bilateral.hpp
=============
-There's two methods that we need to implement here, namely `applyFilter` and
+There're two methods that we need to implement here, namely `applyFilter` and
`computePointWeight`.
.. code-block:: cpp
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_H_
+ #endif // PCL_FILTERS_BILATERAL_IMPL_H_
One additional thing that we can do is error checking on:
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_H_
+ #endif // PCL_FILTERS_BILATERAL_IMPL_H_
Taking advantage of other PCL concepts
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_H_
+ #endif // PCL_FILTERS_BILATERAL_IMPL_H_
To make :pcl:`indices_<pcl::PCLBase::indices_>` work without typing the full
construct, we need to add a new line to *bilateral.h* that specifies the class
/** \brief Get the half size of the Gaussian bilateral filter window as set by the user. */
double
- getHalfSize ()
+ getHalfSize () const
{
return (sigma_s_);
}
/** \brief Get the value of the current standard deviation parameter of the bilateral filter. */
double
- getStdDev ()
+ getStdDev () const
{
return (sigma_r_);
}
#endif // PCL_FILTERS_BILATERAL_H_
-And the *bilateral.hpp* like:
+And the *bilateral.hpp* likes:
.. code-block:: cpp
:linenos:
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
- #endif // PCL_FILTERS_BILATERAL_H_
+ #endif // PCL_FILTERS_BILATERAL_IMPL_H_
Testing the new class
:language: cpp
:lines: 2-3
-pcl/io/pcd_io.h is the header that contains the definitions for PCD I/O
-operations, and pcl/point_types.h contains definitions for several PointT type
-structures (pcl::PointXYZ in our case).
+The first file is the header that contains the definitions for PCD I/O
+operations, and second one contains definitions for several point type
+structures, including ``pcl::PointXYZ`` that we will use.
.. literalinclude:: sources/pcd_write/pcd_write.cpp
:language: cpp
:lines: 8
describes the templated PointCloud structure that we will create. The type of
-each point is set to pcl::PointXYZ, which is:
-
-.. code-block:: cpp
-
- // \brief A point structure representing Euclidean xyz coordinates.
- struct PointXYZ
- {
- float x;
- float y;
- float z;
- };
+each point is set to ``pcl::PointXYZ``, which is a structure that has ``x``,
+``y``, and ``z`` fields.
The lines:
don.setNormalScaleSmall(normals_small_scale);
if(!don.initCompute ()){
- std::cerr << "Error: Could not intialize DoN feature operator" << std::endl;
+ std::cerr << "Error: Could not initialize DoN feature operator" << std::endl;
exit(EXIT_FAILURE);
}
gradient_est.setSearchMethod(treept2);
gradient_est.setRadiusSearch(0.25);
gradient_est.compute(*cloud_ig);
- std::cout<<" Intesity Gradient estimated";
+ std::cout<<" Intensity Gradient estimated";
std::cout<<" with size "<< cloud_ig->points.size() <<std::endl;
CPCSegmentation Parameters: \n\
-cut <max_cuts>,<cutting_min_segments>,<min_cut_score> - Plane cutting parameters for splitting of segments\n\
<max_cuts> - Perform cuts up to this recursion level. Cuts are performed in each segment separately (default 25)\n\
- <cutting_min_segments> - Minumum number of supervoxels in the segment to perform cutting (default 400).\n\
- <min_cut_score> - Minumum score a proposed cut needs to have for being cut (default 0.16)\n\
+ <cutting_min_segments> - Minimum number of supervoxels in the segment to perform cutting (default 400).\n\
+ <min_cut_score> - Minimum score a proposed cut needs to have for being cut (default 0.16)\n\
-clocal - Use locally constrained cuts (recommended flag)\n\
-cdir - Use directed weigths (recommended flag) \n\
-cclean - Use clean cuts. \n\
{
pcl::console::parse (argc, argv, "-o", outputname);
- // If no filename is given, get output filename from inputname (strip seperators and file extension)
+ // If no filename is given, get output filename from inputname (strip separators and file extension)
if (outputname.empty () || (outputname.at (0) == '-'))
{
outputname = pcd_filename;
textcolor = bg_white?0:1;
pcl::console::print_info ("Maximum cuts: %d\n", max_cuts);
- pcl::console::print_info ("Minumum segment siz: %d\n", cutting_min_segments);
+ pcl::console::print_info ("Minimum segment size: %d\n", cutting_min_segments);
pcl::console::print_info ("Use local constrain: %d\n", use_local_constrain);
pcl::console::print_info ("Use directed weights: %d\n", use_directed_cutting);
pcl::console::print_info ("Use clean cuts: %d\n", use_clean_cutting);
/// Get the cloud of supervoxel centroid with normals and the colored cloud with supervoxel coloring (this is used for visulization)
pcl::PointCloud<pcl::PointNormal>::Ptr sv_centroid_normal_cloud = pcl::SupervoxelClustering<PointT>::makeSupervoxelNormalCloud (supervoxel_clusters);
- /// Set paramters for LCCP preprocessing and CPC (CPC inherits from LCCP, thus it includes LCCP's functionality)
+ /// Set parameters for LCCP preprocessing and CPC (CPC inherits from LCCP, thus it includes LCCP's functionality)
PCL_INFO ("Starting Segmentation\n");
pcl::CPCSegmentation<PointT> cpc;
const unsigned char concave_color [3] = {255, 0, 0};
const unsigned char cut_color [3] = { 0,255, 0};
const unsigned char* convex_color = bg_white ? black_color : white_color;
- const unsigned char* color;
+ const unsigned char* color = NULL;
//The vertices in the supervoxel adjacency list are the supervoxel centroids
//This iterates through them, finding the edges
color = concave_color;
// two times since we add also two points per edge
+#if (VTK_MAJOR_VERSION < 7) || (VTK_MAJOR_VERSION == 7 && VTK_MINOR_VERSION == 0)
colors->InsertNextTupleValue (color);
colors->InsertNextTupleValue (color);
-
+#else
+ colors->InsertNextTypedTuple (color);
+ colors->InsertNextTypedTuple (color);
+#endif
pcl::Supervoxel<PointT>::Ptr supervoxel = supervoxel_clusters.at (sv_label);
pcl::PointXYZRGBA vert_curr = supervoxel->centroid_;
std::cout << "No of clusters formed are " << cluster_indices.size () << std::endl;
- // Saving the clusters in seperate pcd files
+ // Saving the clusters in separate pcd files
int j = 0;
for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it)
{
{
pcl::console::parse (argc, argv, "-o", outputname);
- // If no filename is given, get output filename from inputname (strip seperators and file extension)
+ // If no filename is given, get output filename from inputname (strip separators and file extension)
if (outputname.empty () || (outputname.at (0) == '-'))
{
outputname = pcd_filename;
color = concave_color;
// two times since we add also two points per edge
+#if (VTK_MAJOR_VERSION < 7) || (VTK_MAJOR_VERSION == 7 && VTK_MINOR_VERSION == 0)
colors->InsertNextTupleValue (color);
colors->InsertNextTupleValue (color);
+#else
+ colors->InsertNextTypedTuple (color);
+ colors->InsertNextTypedTuple (color);
+#endif
pcl::Supervoxel<PointT>::Ptr supervoxel = supervoxel_clusters.at (sv_label);
pcl::PointXYZRGBA vert_curr = supervoxel->centroid_;
new_point.z = depth;
}
- uint32_t rgb = static_cast<uint32_t>(color_pixel[0]) << 16 | static_cast<uint32_t>(color_pixel[1]) << 8 | static_cast<uint32_t>(color_pixel[2]);
- new_point.rgb = *reinterpret_cast<float*> (&rgb);
+ new_point.r = color_pixel[0];
+ new_point.g = color_pixel[1];
+ new_point.b = color_pixel[2];
cloud->points.push_back (new_point);
}
"include/pcl/${SUBSYS_NAME}/boost.h"
"include/pcl/${SUBSYS_NAME}/eigen.h"
"include/pcl/${SUBSYS_NAME}/board.h"
+ "include/pcl/${SUBSYS_NAME}/flare.h"
"include/pcl/${SUBSYS_NAME}/brisk_2d.h"
"include/pcl/${SUBSYS_NAME}/cppf.h"
"include/pcl/${SUBSYS_NAME}/cvfh.h"
"include/pcl/${SUBSYS_NAME}/fpfh.h"
"include/pcl/${SUBSYS_NAME}/fpfh_omp.h"
"include/pcl/${SUBSYS_NAME}/from_meshes.h"
+ "include/pcl/${SUBSYS_NAME}/gasd.h"
"include/pcl/${SUBSYS_NAME}/gfpfh.h"
"include/pcl/${SUBSYS_NAME}/integral_image2D.h"
"include/pcl/${SUBSYS_NAME}/integral_image_normal.h"
set(impl_incs
"include/pcl/${SUBSYS_NAME}/impl/board.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/flare.hpp"
"include/pcl/${SUBSYS_NAME}/impl/brisk_2d.hpp"
"include/pcl/${SUBSYS_NAME}/impl/cppf.hpp"
"include/pcl/${SUBSYS_NAME}/impl/cvfh.hpp"
"include/pcl/${SUBSYS_NAME}/impl/feature.hpp"
"include/pcl/${SUBSYS_NAME}/impl/fpfh.hpp"
"include/pcl/${SUBSYS_NAME}/impl/fpfh_omp.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/gasd.hpp"
"include/pcl/${SUBSYS_NAME}/impl/gfpfh.hpp"
"include/pcl/${SUBSYS_NAME}/impl/integral_image2D.hpp"
"include/pcl/${SUBSYS_NAME}/impl/integral_image_normal.hpp"
set(srcs
src/board.cpp
+ src/flare.cpp
src/brisk_2d.cpp
src/boundary.cpp
src/cppf.cpp
src/crh.cpp
src/don.cpp
src/fpfh.cpp
+ src/gasd.cpp
src/gfpfh.cpp
src/integral_image_normal.cpp
src/intensity_gradient.cpp
)
if(MSVC)
- # Workaround to aviod hitting the MSVC 4GB linker memory limit when building pcl_features.
+ # Workaround to avoid hitting the MSVC 4GB linker memory limit when building pcl_features.
# Disable whole program optimization (/GL) and link-time code generation (/LTCG).
string(REPLACE "/GL" "" CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
string(REPLACE "/LTCG" "" CMAKE_SHARED_LINKER_FLAGS_RELEASE ${CMAKE_SHARED_LINKER_FLAGS_RELEASE})
*
* \note This should never be called by a regular user. We use a fixed type in PCL
* (BRISKSignature512) and tampering with the parameters might lead to a different
- * size descriptor which the user needs to accomodate in a new point type.
+ * size descriptor which the user needs to accommodate in a new point type.
*/
void
generateKernel (std::vector<float> &radius_list,
min_points_ = min;
}
- /** \brief Sets wether if the CVFH signatures should be normalized or not
+ /** \brief Sets whether if the CVFH signatures should be normalized or not
* \param[in] normalize true if normalization is required, false otherwise
*/
inline void
*/
float leaf_size_;
- /** \brief Wether to normalize the signatures or not. Default: false. */
+ /** \brief Whether to normalize the signatures or not. Default: false. */
bool normalize_bins_;
/** \brief Curvature threshold for removing normals. */
--- /dev/null
+/*
+* Software License Agreement (BSD License)
+*
+* Point Cloud Library (PCL) - www.pointclouds.org
+* Copyright (c) 2016-, Open Perception, Inc.
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+* * Neither the name of the copyright holder(s) nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*
+*
+*/
+
+#ifndef PCL_FLARE_H_
+#define PCL_FLARE_H_
+
+#include <pcl/point_types.h>
+#include <pcl/features/feature.h>
+#include <pcl/features/normal_3d.h>
+
+
+namespace pcl
+{
+
+ /** \brief FLARELocalReferenceFrameEstimation implements the Fast LocAl Reference framE algorithm
+ * for local reference frame estimation as described here:
+ *
+ * - A. Petrelli, L. Di Stefano,
+ * "A repeatable and efficient canonical reference for surface matching",
+ * 3DimPVT, 2012
+ *
+ * FLARE algorithm is deployed in ReLOC algorithm proposed in:
+ *
+ * Petrelli A., Di Stefano L., "Pairwise registration by local orientation cues", Computer Graphics Forum, 2015.
+ *
+ * \author Alioscia Petrelli
+ * \ingroup features
+ */
+ template<typename PointInT, typename PointNT, typename PointOutT = ReferenceFrame, typename SignedDistanceT = float>
+ class FLARELocalReferenceFrameEstimation : public FeatureFromNormals<PointInT, PointNT, PointOutT>
+ {
+ protected:
+ using Feature<PointInT, PointOutT>::feature_name_;
+ using Feature<PointInT, PointOutT>::input_;
+ using Feature<PointInT, PointOutT>::indices_;
+ using Feature<PointInT, PointOutT>::surface_;
+ using Feature<PointInT, PointOutT>::tree_;
+ using Feature<PointInT, PointOutT>::search_parameter_;
+ using FeatureFromNormals<PointInT, PointNT, PointOutT>::normals_;
+ using Feature<PointInT, PointOutT>::fake_surface_;
+ using Feature<PointInT, PointOutT>::getClassName;
+
+ using typename Feature<PointInT, PointOutT>::PointCloudIn;
+ using typename Feature<PointInT, PointOutT>::PointCloudOut;
+
+ using typename Feature<PointInT, PointOutT>::PointCloudInConstPtr;
+
+ using typename Feature<PointInT, PointOutT>::KdTreePtr;
+
+ typedef typename pcl::PointCloud<SignedDistanceT> PointCloudSignedDistance;
+ typedef typename PointCloudSignedDistance::Ptr PointCloudSignedDistancePtr;
+
+ typedef boost::shared_ptr<FLARELocalReferenceFrameEstimation<PointInT, PointNT, PointOutT> > Ptr;
+ typedef boost::shared_ptr<const FLARELocalReferenceFrameEstimation<PointInT, PointNT, PointOutT> > ConstPtr;
+
+ public:
+ /** \brief Constructor. */
+ FLARELocalReferenceFrameEstimation () :
+ tangent_radius_ (0.0f),
+ margin_thresh_ (0.85f),
+ min_neighbors_for_normal_axis_ (6),
+ min_neighbors_for_tangent_axis_ (6),
+ sampled_surface_ (),
+ sampled_tree_ (),
+ fake_sampled_surface_ (false)
+ {
+ feature_name_ = "FLARELocalReferenceFrameEstimation";
+ }
+
+ //Getters/Setters
+
+ /** \brief Set the maximum distance of the points used to estimate the x_axis of the FLARE Reference Frame for a given point.
+ *
+ * \param[in] radius The search radius for x axis.
+ */
+ inline void
+ setTangentRadius (float radius)
+ {
+ tangent_radius_ = radius;
+ }
+
+ /** \brief Get the maximum distance of the points used to estimate the x_axis of the FLARE Reference Frame for a given point.
+ *
+ * \return The search radius for x axis.
+ */
+ inline float
+ getTangentRadius () const
+ {
+ return (tangent_radius_);
+ }
+
+ /** \brief Set the percentage of the search tangent radius after which a point is considered part of the support.
+ *
+ * \param[in] margin_thresh the percentage of the search tangent radius after which a point is considered part of the support.
+ */
+ inline void
+ setMarginThresh (float margin_thresh)
+ {
+ margin_thresh_ = margin_thresh;
+ }
+
+ /** \brief Get the percentage of the search tangent radius after which a point is considered part of the support.
+ *
+ * \return The percentage of the search tangent radius after which a point is considered part of the support.
+ */
+ inline float
+ getMarginThresh () const
+ {
+ return (margin_thresh_);
+ }
+
+
+ /** \brief Set min number of neighbours required for the computation of Z axis.
+ *
+ * \param[in] min_neighbors_for_normal_axis min number of neighbours required for the computation of Z axis.
+ */
+ inline void
+ setMinNeighboursForNormalAxis (int min_neighbors_for_normal_axis)
+ {
+ min_neighbors_for_normal_axis_ = min_neighbors_for_normal_axis;
+ }
+
+ /** \brief Get min number of neighbours required for the computation of Z axis.
+ *
+ * \return min number of neighbours required for the computation of Z axis.
+ */
+ inline int
+ getMinNeighboursForNormalAxis () const
+ {
+ return (min_neighbors_for_normal_axis_);
+ }
+
+
+ /** \brief Set min number of neighbours required for the computation of X axis.
+ *
+ * \param[in] min_neighbors_for_tangent_axis min number of neighbours required for the computation of X axis.
+ */
+ inline void
+ setMinNeighboursForTangentAxis (int min_neighbors_for_tangent_axis)
+ {
+ min_neighbors_for_tangent_axis_ = min_neighbors_for_tangent_axis;
+ }
+
+ /** \brief Get min number of neighbours required for the computation of X axis.
+ *
+ * \return min number of neighbours required for the computation of X axis.
+ */
+ inline int
+ getMinNeighboursForTangentAxis () const
+ {
+ return (min_neighbors_for_tangent_axis_);
+ }
+
+
+ /** \brief Provide a pointer to the dataset used for the estimation of X axis.
+ * As the estimation of x axis is negligibly affected by surface downsampling,
+ * this method lets to consider a downsampled version of surface_ in the estimation of x axis.
+ * This is optional, if this is not set, it will only use the data in the
+ * surface_ cloud to estimate the x axis.
+ * \param[in] cloud a pointer to a PointCloud
+ */
+ inline void
+ setSearchSampledSurface(const PointCloudInConstPtr &cloud)
+ {
+ sampled_surface_ = cloud;
+ fake_sampled_surface_ = false;
+ }
+
+ /** \brief Get a pointer to the sampled_surface_ cloud dataset. */
+ inline const PointCloudInConstPtr&
+ getSearchSampledSurface() const
+ {
+ return (sampled_surface_);
+ }
+
+ /** \brief Provide a pointer to the search object linked to sampled_surface.
+ * \param[in] tree a pointer to the spatial search object linked to sampled_surface.
+ */
+ inline void
+ setSearchMethodForSampledSurface (const KdTreePtr &tree) { sampled_tree_ = tree; }
+
+ /** \brief Get a pointer to the search method used for the extimation of x axis. */
+ inline const KdTreePtr&
+ getSearchMethodForSampledSurface () const
+ {
+ return (sampled_tree_);
+ }
+
+ /** \brief Get the signed distances of the highest points from the fitted planes. */
+ inline const std::vector<SignedDistanceT> &
+ getSignedDistancesFromHighestPoints () const
+ {
+ return (signed_distances_from_highest_points_);
+ }
+
+ protected:
+ /** \brief This method should get called before starting the actual computation. */
+ virtual bool
+ initCompute ();
+
+ /** \brief This method should get called after the actual computation is ended. */
+ virtual bool
+ deinitCompute ();
+
+ /** \brief Estimate the LRF descriptor for a given point based on its spatial neighborhood of 3D points with normals
+ * \param[in] index the index of the point in input_
+ * \param[out] lrf the resultant local reference frame
+ * \return signed distance of the highest point from the fitted plane. Max if the lrf is not computable.
+ */
+ SignedDistanceT
+ computePointLRF (const int index, Eigen::Matrix3f &lrf);
+
+ /** \brief Abstract feature estimation method.
+ * \param[out] output the resultant features
+ */
+ virtual void
+ computeFeature (PointCloudOut &output);
+
+
+ private:
+ /** \brief Radius used to find tangent axis. */
+ float tangent_radius_;
+
+ /** \brief Threshold that define if a support point is near the margins. */
+ float margin_thresh_;
+
+ /** \brief Min number of neighbours required for the computation of Z axis. Otherwise, feature point normal is used. */
+ int min_neighbors_for_normal_axis_;
+
+ /** \brief Min number of neighbours required for the computation of X axis. Otherwise, a random X axis is set */
+ int min_neighbors_for_tangent_axis_;
+
+ /** \brief An input point cloud describing the surface that is to be used
+ * for nearest neighbor searches for the estimation of X axis.
+ */
+ PointCloudInConstPtr sampled_surface_;
+
+ /** \brief A pointer to the spatial search object used for the estimation of X axis. */
+ KdTreePtr sampled_tree_;
+
+ /** \brief Class for normal estimation. */
+ NormalEstimation<PointInT, PointNT> normal_estimation_;
+
+ /** \brief Signed distances of the highest points from the fitted planes.*/
+ std::vector<SignedDistanceT> signed_distances_from_highest_points_;
+
+ /** \brief If no sampled_surface_ is given, we use surface_ as the sampled surface. */
+ bool fake_sampled_surface_;
+
+ };
+
+}
+
+#ifdef PCL_NO_PRECOMPILE
+#include <pcl/features/impl/flare.hpp>
+#endif
+
+#endif //#ifndef PCL_FLARE_H_
* In Proceedings of the 22nd IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS),
* St. Louis, MO, USA, October 11-15 2009.
*
- * \attention
+ * \attention
* The convention for FPFH features is:
- * - if a query point's nearest neighbors cannot be estimated, the FPFH feature will be set to NaN
+ * - if a query point's nearest neighbors cannot be estimated, the FPFH feature will be set to NaN
* (not a number)
* - it is impossible to estimate a FPFH descriptor for a point that
* doesn't have finite 3D coordinates. Therefore, any point that contains
/** \brief Initialize the scheduler and set the number of threads to use.
* \param[in] nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- FPFHEstimationOMP (unsigned int nr_threads = 0) : nr_bins_f1_ (11), nr_bins_f2_ (11), nr_bins_f3_ (11), threads_ (nr_threads)
+ FPFHEstimationOMP (unsigned int nr_threads = 0) : nr_bins_f1_ (11), nr_bins_f2_ (11), nr_bins_f3_ (11)
{
feature_name_ = "FPFHEstimationOMP";
+
+ setNumberOfThreads(nr_threads);
}
/** \brief Initialize the scheduler and set the number of threads to use.
* \param[in] nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
private:
/** \brief Estimate the Fast Point Feature Histograms (FPFH) descriptors at a set of points given by
* setSearchMethod ()
* \param[out] output the resultant point cloud model dataset that contains the FPFH feature estimates
*/
- void
+ void
computeFeature (PointCloudOut &output);
public:
#ifndef PCL_FEATURES_FROM_MESHES_H_
#define PCL_FEATURES_FROM_MESHES_H_
-#include <pcl/features/normal_3d.h>
+#include "pcl/features/normal_3d.h"
+#include "pcl/Vertices.h"
namespace pcl
{
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2016-, Open Perception, Inc.
+ * Copyright (c) 2016, Voxar Labs, CIn-UFPE / DEINFO-UFRPE
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_FEATURES_GASD_H_
+#define PCL_FEATURES_GASD_H_
+
+#include <pcl/features/feature.h>
+#include <pcl/common/common.h>
+#include <pcl/point_cloud.h>
+
+namespace pcl
+{
+ /// Different histogram interpolation methods
+ enum HistogramInterpolationMethod
+ {
+ INTERP_NONE, ///< no interpolation
+ INTERP_TRILINEAR, ///< trilinear interpolation
+ INTERP_QUADRILINEAR ///< quadrilinear interpolation
+ };
+
+ /** \brief GASDEstimation estimates the Globally Aligned Spatial Distribution (GASD) descriptor for a given
+ * point cloud dataset given XYZ data.
+ *
+ * The suggested PointOutT is pcl::GASDSignature512.
+ *
+ * \note If you use this code in any academic work, please cite:
+ *
+ * - J. Lima, V. Teichrieb.
+ * An Efficient Global Point Cloud Descriptor for Object Recognition and Pose Estimation.
+ * In Proceedings of the 29th SIBGRAPI - Conference on Graphics, Patterns and Images,
+ * Sao Jose dos Campos, Brazil, October 4-7 2016.
+ *
+ * \author Joao Paulo Lima
+ *
+ * Voxar Labs, Centro de Informatica, Universidade Federal de Pernambuco, Brazil
+ *
+ * Departamento de Estatistica e Informatica, Universidade Federal Rural de Pernambuco, Brazil
+ *
+ * \ingroup features
+ */
+ template <typename PointInT, typename PointOutT = GASDSignature512>
+ class GASDEstimation : public Feature<PointInT, PointOutT>
+ {
+ public:
+ using typename Feature<PointInT, PointOutT>::PointCloudIn;
+ using typename Feature<PointInT, PointOutT>::PointCloudOut;
+ typedef boost::shared_ptr<GASDEstimation<PointInT, PointOutT> > Ptr;
+ typedef boost::shared_ptr<const GASDEstimation<PointInT, PointOutT> > ConstPtr;
+
+ /** \brief Constructor.
+ * \param[in] view_direction view direction
+ * \param[in] shape_half_grid_size shape half grid size
+ * \param[in] shape_hists_size shape histograms size
+ * \param[in] shape_interp shape histograms interpolation method
+ */
+ GASDEstimation (const Eigen::Vector3f &view_direction = Eigen::Vector3f (0.0f, 0.0f, 1.0f),
+ const size_t shape_half_grid_size = 4,
+ const size_t shape_hists_size = 1,
+ const HistogramInterpolationMethod shape_interp = INTERP_TRILINEAR) :
+ view_direction_ (view_direction),
+ shape_half_grid_size_ (shape_half_grid_size),
+ shape_hists_size_ (shape_hists_size),
+ shape_interp_ (shape_interp)
+ {
+ search_radius_ = 0;
+ k_ = 1;
+ feature_name_ = "GASDEstimation";
+ }
+
+ /** \brief Set the view direction.
+ * \param[in] dir view direction
+ */
+ inline void
+ setViewDirection (const Eigen::Vector3f &dir)
+ {
+ view_direction_ = dir;
+ }
+
+ /** \brief Set the shape half grid size.
+ * \param[in] shgs shape half grid size
+ */
+ inline void
+ setShapeHalfGridSize (const size_t shgs)
+ {
+ shape_half_grid_size_ = shgs;
+ }
+
+ /** \brief Set the shape histograms size. If size is 1, then each histogram bin will store the number
+ * of points that belong to its correspondent cell in the 3D regular grid. If size > 1, then for each cell
+ * it will be computed a histogram of normalized distances between each sample and the cloud centroid
+ * \param[in] shs shape histograms size
+ */
+ inline void
+ setShapeHistsSize (const size_t shs)
+ {
+ shape_hists_size_ = shs;
+ }
+
+ /** \brief Set the shape histograms interpolation method.
+ * \param[in] interp shape histograms interpolation method
+ */
+ inline void
+ setShapeHistsInterpMethod (const HistogramInterpolationMethod interp)
+ {
+ shape_interp_ = interp;
+ }
+
+ /** \brief Returns the transformation aligning the point cloud to the canonical coordinate system
+ * \param[out] trans transformation
+ */
+ const Eigen::Matrix4f&
+ getTransform () const
+ {
+ return transform_;
+ }
+
+ /** \brief Overloaded computed method from pcl::Feature.
+ * \param[out] output the resultant point cloud model dataset containing the estimated feature
+ */
+ void
+ compute (PointCloudOut &output);
+
+ protected:
+ using Feature<PointInT, PointOutT>::feature_name_;
+ using Feature<PointInT, PointOutT>::getClassName;
+ using Feature<PointInT, PointOutT>::indices_;
+ using Feature<PointInT, PointOutT>::k_;
+ using Feature<PointInT, PointOutT>::search_radius_;
+ using Feature<PointInT, PointOutT>::surface_;
+
+ /** \brief Point cloud aligned to the canonical coordinate system. */
+ PointCloudIn shape_samples_;
+
+ /** \brief Normalization factor with respect to axis-aligned bounding cube centered on the origin. */
+ float max_coord_;
+
+ /** \brief Normalized sample contribution with respect to the total number of points in the cloud. */
+ float hist_incr_;
+
+ /** \brief Current position of output descriptor point cloud. */
+ size_t pos_;
+
+ /** \brief add a sample to its respective histogram, optionally performing interpolation.
+ * \param[in] p histogram sample
+ * \param[in] max_coord normalization factor with respect to axis-aligned bounding cube centered on the origin
+ * \param[in] half_grid_size half size of the regular grid used to compute the descriptor
+ * \param[in] interp interpolation method to be used while computing the descriptor
+ * \param[in] hbin histogram bin
+ * \param[in] hist_incr normalization factor of sample contribution
+ * \param[in,out] hists updated histograms
+ */
+ void
+ addSampleToHistograms (const Eigen::Vector4f &p,
+ const float max_coord,
+ const size_t half_grid_size,
+ const HistogramInterpolationMethod interp,
+ const float hbin,
+ const float hist_incr,
+ std::vector<Eigen::VectorXf> &hists);
+
+ /** \brief Estimate GASD descriptor
+ *
+ * \param[out] output the resultant point cloud model dataset containing the GASD feature
+ */
+ void
+ computeFeature (PointCloudOut &output);
+
+ private:
+ /** \brief Transform that aligns the point cloud to the canonical coordinate system. */
+ Eigen::Matrix4f transform_;
+
+ /** \brief Viewing direction, default value is (0, 0, 1). */
+ Eigen::Vector3f view_direction_;
+
+ /** \brief Half size of the regular grid used to compute the shape descriptor. */
+ size_t shape_half_grid_size_;
+
+ /** \brief Size of the histograms of normalized distances between each sample and the cloud centroid. */
+ size_t shape_hists_size_;
+
+ /** \brief Interpolation method to be used while computing the shape descriptor. */
+ HistogramInterpolationMethod shape_interp_;
+
+ /** \brief Estimates a reference frame for the point cloud and uses it to compute a transform that aligns the point cloud to the canonical coordinate system. */
+ void
+ computeAlignmentTransform ();
+
+ /** \brief copy computed shape histograms to output descriptor point cloud
+ * \param[in] grid_size size of the regular grid used to compute the descriptor
+ * \param[in] hists_size size of the shape histograms
+ * \param[in] hists shape histograms
+ * \param[out] output output descriptor point cloud
+ * \param[in,out] pos current position of output descriptor point cloud
+ */
+ void
+ copyShapeHistogramsToOutput (const size_t grid_size,
+ const size_t hists_size,
+ const std::vector<Eigen::VectorXf> &hists,
+ PointCloudOut &output,
+ size_t &pos);
+ };
+
+ /** \brief GASDColorEstimation estimates the Globally Aligned Spatial Distribution (GASD) descriptor for a given
+ * point cloud dataset given XYZ and RGB data.
+ *
+ * The suggested PointOutT is pcl::GASDSignature984.
+ *
+ * \note If you use this code in any academic work, please cite:
+ *
+ * - J. Lima, V. Teichrieb.
+ * An Efficient Global Point Cloud Descriptor for Object Recognition and Pose Estimation.
+ * In Proceedings of the 29th SIBGRAPI - Conference on Graphics, Patterns and Images,
+ * Sao Jose dos Campos, Brazil, October 4-7 2016.
+ *
+ * \author Joao Paulo Lima
+ *
+ * Voxar Labs, Centro de Informatica, Universidade Federal de Pernambuco, Brazil
+ *
+ * Departamento de Estatistica e Informatica, Universidade Federal Rural de Pernambuco, Brazil
+ *
+ * \ingroup features
+ */
+ template <typename PointInT, typename PointOutT = GASDSignature984>
+ class GASDColorEstimation : public GASDEstimation<PointInT, PointOutT>
+ {
+ public:
+ using typename Feature<PointInT, PointOutT>::PointCloudOut;
+ typedef boost::shared_ptr<GASDColorEstimation<PointInT, PointOutT> > Ptr;
+ typedef boost::shared_ptr<const GASDColorEstimation<PointInT, PointOutT> > ConstPtr;
+
+ /** \brief Constructor.
+ * \param[in] view_direction view direction
+ * \param[in] shape_half_grid_size shape half grid size
+ * \param[in] shape_hists_size shape histograms size
+ * \param[in] color_half_grid_size color half grid size
+ * \param[in] color_hists_size color histograms size
+ * \param[in] shape_interp shape histograms interpolation method
+ * \param[in] color_interp color histograms interpolation method
+ */
+ GASDColorEstimation (const Eigen::Vector3f &view_direction = Eigen::Vector3f (0.0f, 0.0f, 1.0f),
+ const size_t shape_half_grid_size = 3,
+ const size_t shape_hists_size = 1,
+ const size_t color_half_grid_size = 2,
+ const size_t color_hists_size = 12,
+ const HistogramInterpolationMethod shape_interp = INTERP_NONE,
+ const HistogramInterpolationMethod color_interp = INTERP_NONE) :
+ GASDEstimation<PointInT, PointOutT> (view_direction, shape_half_grid_size, shape_hists_size, shape_interp),
+ color_half_grid_size_ (color_half_grid_size),
+ color_hists_size_ (color_hists_size),
+ color_interp_ (color_interp)
+ {
+ feature_name_ = "GASDColorEstimation";
+ }
+
+ /** \brief Set the color half grid size.
+ * \param[in] chgs color half grid size
+ */
+ inline void
+ setColorHalfGridSize (const size_t chgs)
+ {
+ color_half_grid_size_ = chgs;
+ }
+
+ /** \brief Set the color histograms size (number of bins in the hue histogram for each cell of the 3D regular grid).
+ * \param[in] chs color histograms size
+ */
+ inline void
+ setColorHistsSize (const size_t chs)
+ {
+ color_hists_size_ = chs;
+ }
+
+ /** \brief Set the color histograms interpolation method.
+ * \param[in] interp color histograms interpolation method
+ */
+ inline void
+ setColorHistsInterpMethod (const HistogramInterpolationMethod interp)
+ {
+ color_interp_ = interp;
+ }
+
+ protected:
+ using Feature<PointInT, PointOutT>::feature_name_;
+ using Feature<PointInT, PointOutT>::getClassName;
+ using Feature<PointInT, PointOutT>::indices_;
+ using Feature<PointInT, PointOutT>::k_;
+ using Feature<PointInT, PointOutT>::search_radius_;
+ using Feature<PointInT, PointOutT>::surface_;
+ using GASDEstimation<PointInT, PointOutT>::shape_samples_;
+ using GASDEstimation<PointInT, PointOutT>::max_coord_;
+ using GASDEstimation<PointInT, PointOutT>::hist_incr_;
+ using GASDEstimation<PointInT, PointOutT>::pos_;
+
+ private:
+ /** \brief Half size of the regular grid used to compute the color descriptor. */
+ size_t color_half_grid_size_;
+
+ /** \brief Size of the hue histograms. */
+ size_t color_hists_size_;
+
+ /** \brief Interpolation method to be used while computing the color descriptor. */
+ HistogramInterpolationMethod color_interp_;
+
+ /** \brief copy computed color histograms to output descriptor point cloud
+ * \param[in] grid_size size of the regular grid used to compute the descriptor
+ * \param[in] hists_size size of the color histograms
+ * \param[in,out] hists color histograms, which are finalized, since they are circular
+ * \param[out] output output descriptor point cloud
+ * \param[in,out] pos current position of output descriptor point cloud
+ */
+ void
+ copyColorHistogramsToOutput (const size_t grid_size,
+ const size_t hists_size,
+ std::vector<Eigen::VectorXf> &hists,
+ PointCloudOut &output,
+ size_t &pos);
+
+ /** \brief Estimate GASD color descriptor
+ *
+ * \param[out] output the resultant point cloud model dataset containing the GASD color feature
+ */
+ void
+ computeFeature (PointCloudOut &output);
+ };
+} // namespace pcl
+
+#ifdef PCL_NO_PRECOMPILE
+#include <pcl/features/impl/gasd.hpp>
+#endif
+
+#endif //#ifndef PCL_FEATURES_GASD_H_
PCL_ERROR ("Shape Context Error INF!\n");
if (w != w)
PCL_ERROR ("Shape Context Error IND!\n");
- /// Accumulate w into correspondant Bin(j,k,l)
+ /// Accumulate w into correspondent Bin(j,k,l)
desc[(l*elevation_bins_*radius_bins_) + (k*radius_bins_) + j] += w;
assert (desc[(l*elevation_bins_*radius_bins_) + (k*radius_bins_) + j] >= 0);
--- /dev/null
+/*
+* Software License Agreement (BSD License)
+*
+* Point Cloud Library (PCL) - www.pointclouds.org
+* Copyright (c) 2016-, Open Perception, Inc.
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+* * Neither the name of the copyright holder(s) nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*
+*
+*/
+
+#ifndef PCL_FEATURES_IMPL_FLARE_H_
+#define PCL_FEATURES_IMPL_FLARE_H_
+
+#include <pcl/features/flare.h>
+#include <pcl/common/geometry.h>
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointInT, typename PointNT, typename PointOutT, typename SignedDistanceT> bool
+ pcl::FLARELocalReferenceFrameEstimation<PointInT, PointNT, PointOutT, SignedDistanceT>::initCompute ()
+{
+ if (!FeatureFromNormals<PointInT, PointNT, PointOutT>::initCompute ())
+ {
+ PCL_ERROR ("[pcl::%s::initCompute] Init failed.\n", getClassName ().c_str ());
+ return (false);
+ }
+
+ if (tangent_radius_ == 0.0f)
+ {
+ PCL_ERROR ("[pcl::%s::initCompute] tangent_radius_ not set.\n", getClassName ().c_str ());
+ return (false);
+ }
+
+ // If no search sampled_surface_ has been defined, use the surface_ dataset as the search sampled_surface_ itself
+ if (!sampled_surface_)
+ {
+ fake_sampled_surface_ = true;
+ sampled_surface_ = surface_;
+
+ if (sampled_tree_)
+ {
+ PCL_WARN ("[pcl::%s::initCompute] sampled_surface_ is not set even if sampled_tree_ is already set.", getClassName ().c_str ());
+ PCL_WARN ("sampled_tree_ will be rebuilt from surface_. Use sampled_surface_.\n");
+ }
+ }
+
+ // Check if a space search locator was given for sampled_surface_
+ if (!sampled_tree_)
+ {
+ if (sampled_surface_->isOrganized () && surface_->isOrganized () && input_->isOrganized ())
+ sampled_tree_.reset (new pcl::search::OrganizedNeighbor<PointInT> ());
+ else
+ sampled_tree_.reset (new pcl::search::KdTree<PointInT> (false));
+ }
+
+ if (sampled_tree_->getInputCloud () != sampled_surface_) // Make sure the tree searches the sampled surface
+ sampled_tree_->setInputCloud (sampled_surface_);
+
+ return (true);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointInT, typename PointNT, typename PointOutT, typename SignedDistanceT> bool
+ pcl::FLARELocalReferenceFrameEstimation<PointInT, PointNT, PointOutT, SignedDistanceT>::deinitCompute ()
+{
+ // Reset the surface
+ if (fake_surface_)
+ {
+ surface_.reset ();
+ fake_surface_ = false;
+ }
+ // Reset the sampled surface
+ if (fake_sampled_surface_)
+ {
+ sampled_surface_.reset ();
+ fake_sampled_surface_ = false;
+ }
+ return (true);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointInT, typename PointNT, typename PointOutT, typename SignedDistanceT> SignedDistanceT
+ pcl::FLARELocalReferenceFrameEstimation<PointInT, PointNT, PointOutT, SignedDistanceT>::computePointLRF (const int index,
+ Eigen::Matrix3f &lrf)
+{
+ Eigen::Vector3f x_axis, y_axis;
+ Eigen::Vector3f fitted_normal; //z_axis
+
+ //find Z axis
+
+ //extract support points for the computation of Z axis
+ std::vector<int> neighbours_indices;
+ std::vector<float> neighbours_distances;
+ int n_neighbours = this->searchForNeighbors (index, search_parameter_, neighbours_indices, neighbours_distances);
+
+ if (n_neighbours < min_neighbors_for_normal_axis_)
+ {
+ if (!pcl::isFinite ((*normals_)[index]))
+ {
+ //normal is invalid
+ //setting lrf to NaN
+ lrf.setConstant (std::numeric_limits<float>::quiet_NaN ());
+ return (std::numeric_limits<SignedDistanceT>::max ());
+ }
+
+ //set z_axis as the normal of index point
+ fitted_normal = (*normals_)[index].getNormalVector3fMap ();
+ }
+ else
+ {
+ float plane_curvature;
+ normal_estimation_.computePointNormal (*surface_, neighbours_indices, fitted_normal (0), fitted_normal (1), fitted_normal (2), plane_curvature);
+
+ //disambiguate Z axis with normal mean
+ if (!pcl::flipNormalTowardsNormalsMean<PointNT> (*normals_, neighbours_indices, fitted_normal))
+ {
+ //all normals in the neighbourood are invalid
+ //setting lrf to NaN
+ lrf.setConstant (std::numeric_limits<float>::quiet_NaN ());
+ return (std::numeric_limits<SignedDistanceT>::max ());
+ }
+ }
+
+ //setting LRF Z axis
+ lrf.row (2).matrix () = fitted_normal;
+
+ //find X axis
+
+ //extract support points for Rx radius
+ n_neighbours = sampled_tree_->radiusSearch ((*input_)[index], tangent_radius_, neighbours_indices, neighbours_distances);
+
+ if (n_neighbours < min_neighbors_for_tangent_axis_)
+ {
+ //set X axis as a random axis
+ x_axis = pcl::geometry::randomOrthogonalAxis (fitted_normal);
+ y_axis = fitted_normal.cross (x_axis);
+
+ lrf.row (0).matrix () = x_axis;
+ lrf.row (1).matrix () = y_axis;
+
+ return (std::numeric_limits<SignedDistanceT>::max ());
+ }
+
+ //find point with the largest signed distance from tangent plane
+
+ SignedDistanceT shape_score;
+ SignedDistanceT best_shape_score = -std::numeric_limits<SignedDistanceT>::max ();
+ int best_shape_index = -1;
+
+ Eigen::Vector3f best_margin_point;
+
+ const float radius2 = tangent_radius_ * tangent_radius_;
+ const float margin_distance2 = margin_thresh_ * margin_thresh_ * radius2;
+
+ Vector3fMapConst feature_point = (*input_)[index].getVector3fMap ();
+
+ for (int curr_neigh = 0; curr_neigh < n_neighbours; ++curr_neigh)
+ {
+ const int& curr_neigh_idx = neighbours_indices[curr_neigh];
+ const float& neigh_distance_sqr = neighbours_distances[curr_neigh];
+
+ if (neigh_distance_sqr <= margin_distance2)
+ {
+ continue;
+ }
+
+ //point curr_neigh_idx is inside the ring between marginThresh and Radius
+
+ shape_score = fitted_normal.dot ((*sampled_surface_)[curr_neigh_idx].getVector3fMap ());
+
+ if (shape_score > best_shape_score)
+ {
+ best_shape_index = curr_neigh_idx;
+ best_shape_score = shape_score;
+ }
+ } //for each neighbor
+
+ if (best_shape_index == -1)
+ {
+ x_axis = pcl::geometry::randomOrthogonalAxis (fitted_normal);
+ y_axis = fitted_normal.cross (x_axis);
+
+ lrf.row (0).matrix () = x_axis;
+ lrf.row (1).matrix () = y_axis;
+
+ return (std::numeric_limits<SignedDistanceT>::max ());
+ }
+
+ //find orthogonal axis directed to best_shape_index point projection on plane with fittedNormal as axis
+ x_axis = pcl::geometry::projectedAsUnitVector (sampled_surface_->at (best_shape_index).getVector3fMap (), feature_point, fitted_normal);
+
+ y_axis = fitted_normal.cross (x_axis);
+
+ lrf.row (0).matrix () = x_axis;
+ lrf.row (1).matrix () = y_axis;
+ //z axis already set
+
+ best_shape_score -= fitted_normal.dot (feature_point);
+ return (best_shape_score);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointInT, typename PointNT, typename PointOutT, typename SignedDistanceT> void
+ pcl::FLARELocalReferenceFrameEstimation<PointInT, PointNT, PointOutT, SignedDistanceT>::computeFeature (PointCloudOut &output)
+{
+ //check whether used with search radius or search k-neighbors
+ if (this->getKSearch () != 0)
+ {
+ PCL_ERROR (
+ "[pcl::%s::computeFeature] Error! Search method set to k-neighborhood. Call setKSearch (0) and setRadiusSearch (radius) to use this class.\n",
+ getClassName ().c_str ());
+ return;
+ }
+
+ signed_distances_from_highest_points_.resize (indices_->size ());
+
+ for (size_t point_idx = 0; point_idx < indices_->size (); ++point_idx)
+ {
+ Eigen::Matrix3f currentLrf;
+ PointOutT &rf = output[point_idx];
+
+ signed_distances_from_highest_points_[point_idx] = computePointLRF ((*indices_)[point_idx], currentLrf);
+ if (signed_distances_from_highest_points_[point_idx] == std::numeric_limits<SignedDistanceT>::max ())
+ {
+ output.is_dense = false;
+ }
+
+ rf.getXAxisVector3fMap () = currentLrf.row (0);
+ rf.getYAxisVector3fMap () = currentLrf.row (1);
+ rf.getZAxisVector3fMap () = currentLrf.row (2);
+ }
+}
+
+#define PCL_INSTANTIATE_FLARELocalReferenceFrameEstimation(T,NT,OutT,SdT) template class PCL_EXPORTS pcl::FLARELocalReferenceFrameEstimation<T,NT,OutT,SdT>;
+
+#endif // PCL_FEATURES_IMPL_FLARE_H_
#include <pcl/features/fpfh_omp.h>
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointNT, typename PointOutT> void
+pcl::FPFHEstimationOMP<PointInT, PointNT, PointOutT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT> void
pcl::FPFHEstimationOMP<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
for (size_t idx = 0; idx < indices_->size (); ++idx)
{
int p_idx = (*indices_)[idx];
- if (this->searchForNeighbors (p_idx, search_parameter_, nn_indices, nn_dists) == 0)
+ if (!isFinite ((*input_)[p_idx]) ||
+ this->searchForNeighbors (p_idx, search_parameter_, nn_indices, nn_dists) == 0)
continue;
spfh_indices_set.insert (nn_indices.begin (), nn_indices.end ());
int p_idx = spfh_indices_vec[i];
// Find the neighborhood around p_idx
- if (this->searchForNeighbors (*surface_, p_idx, search_parameter_, nn_indices, nn_dists) == 0)
+ if (!isFinite ((*input_)[p_idx]) ||
+ this->searchForNeighbors (*surface_, p_idx, search_parameter_, nn_indices, nn_dists) == 0)
continue;
// Estimate the SPFH signature around p_idx
spfh_hist_lookup[p_idx] = i;
}
- // Intialize the array that will store the FPFH signature
+ // Initialize the array that will store the FPFH signature
int nr_bins = nr_bins_f1_ + nr_bins_f2_ + nr_bins_f3_;
nn_indices.clear();
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2016-, Open Perception, Inc.
+ * Copyright (c) 2016, Voxar Labs, CIn-UFPE / DEINFO-UFRPE
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef PCL_FEATURES_IMPL_GASD_H_
+#define PCL_FEATURES_IMPL_GASD_H_
+
+#include <pcl/features/gasd.h>
+#include <pcl/common/transforms.h>
+#include <pcl/point_types_conversion.h>
+
+#include <vector>
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::GASDEstimation<PointInT, PointOutT>::compute (PointCloudOut &output)
+{
+ if (!Feature<PointInT, PointOutT>::initCompute ())
+ {
+ output.width = output.height = 0;
+ output.points.clear ();
+ return;
+ }
+
+ // Resize the output dataset
+ output.resize (1);
+
+ // Copy header and is_dense flag from input
+ output.header = surface_->header;
+ output.is_dense = surface_->is_dense;
+
+ // Perform the actual feature computation
+ computeFeature (output);
+
+ Feature<PointInT, PointOutT>::deinitCompute ();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::GASDEstimation<PointInT, PointOutT>::computeAlignmentTransform ()
+{
+ Eigen::Vector4f centroid;
+ Eigen::Matrix3f covariance_matrix;
+
+ // compute centroid of the object's partial view
+ pcl::compute3DCentroid (*surface_, *indices_, centroid);
+
+ // compute covariance matrix from points and centroid of the object's partial view
+ pcl::computeCovarianceMatrix (*surface_, *indices_, centroid, covariance_matrix);
+
+ Eigen::Matrix3f eigenvectors;
+ Eigen::Vector3f eigenvalues;
+
+ // compute eigenvalues and eigenvectors of the covariance matrix
+ pcl::eigen33 (covariance_matrix, eigenvectors, eigenvalues);
+
+ // z axis of the reference frame is the eigenvector associated with the minimal eigenvalue
+ Eigen::Vector3f z_axis = eigenvectors.col (0);
+
+ // if angle between z axis and viewing direction is in the [-90 deg, 90 deg] range, then z axis is negated
+ if (z_axis.dot (view_direction_) > 0)
+ {
+ z_axis = -z_axis;
+ }
+
+ // x axis of the reference frame is the eigenvector associated with the maximal eigenvalue
+ const Eigen::Vector3f x_axis = eigenvectors.col (2);
+
+ // y axis is the cross product of z axis and x axis
+ const Eigen::Vector3f y_axis = z_axis.cross (x_axis);
+
+ const Eigen::Vector3f centroid_xyz = centroid.head<3> ();
+
+ // compute alignment transform from axes and centroid
+ transform_ << x_axis.transpose (), -x_axis.dot (centroid_xyz),
+ y_axis.transpose (), -y_axis.dot (centroid_xyz),
+ z_axis.transpose (), -z_axis.dot (centroid_xyz),
+ 0.0f, 0.0f, 0.0f, 1.0f;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::GASDEstimation<PointInT, PointOutT>::addSampleToHistograms (const Eigen::Vector4f &p,
+ const float max_coord,
+ const size_t half_grid_size,
+ const HistogramInterpolationMethod interp,
+ const float hbin,
+ const float hist_incr,
+ std::vector<Eigen::VectorXf> &hists)
+{
+ const size_t grid_size = half_grid_size * 2;
+
+ // compute normalized coordinates with respect to axis-aligned bounding cube centered on the origin
+ const Eigen::Vector3f scaled ( (p[0] / max_coord) * half_grid_size, (p[1] / max_coord) * half_grid_size, (p[2] / max_coord) * half_grid_size);
+
+ // compute histograms array coords
+ Eigen::Vector4f coords (scaled[0] + half_grid_size, scaled[1] + half_grid_size, scaled[2] + half_grid_size, hbin);
+
+ // if using histogram interpolation, subtract 0.5 so samples with the central value of the bin have full weight in it
+ if (interp != INTERP_NONE)
+ {
+ coords -= Eigen::Vector4f (0.5f, 0.5f, 0.5f, 0.5f);
+ }
+
+ // compute histograms bins indices
+ const Eigen::Vector4f bins (std::floor (coords[0]), std::floor (coords[1]), std::floor (coords[2]), std::floor (coords[3]));
+
+ // compute indices of the bin where the sample falls into
+ const size_t grid_idx = ( (bins[0] + 1) * (grid_size + 2) + bins[1] + 1) * (grid_size + 2) + bins[2] + 1;
+ const size_t h_idx = bins[3] + 1;
+
+ if (interp == INTERP_NONE)
+ {
+ // no interpolation
+ hists[grid_idx][h_idx] += hist_incr;
+ }
+ else
+ {
+ // if using histogram interpolation, compute trilinear interpolation
+ coords -= Eigen::Vector4f (bins[0], bins[1], bins[2], 0.0f);
+
+ const float v_x1 = hist_incr * coords[0];
+ const float v_x0 = hist_incr - v_x1;
+
+ const float v_xy11 = v_x1 * coords[1];
+ const float v_xy10 = v_x1 - v_xy11;
+ const float v_xy01 = v_x0 * coords[1];
+ const float v_xy00 = v_x0 - v_xy01;
+
+ const float v_xyz111 = v_xy11 * coords[2];
+ const float v_xyz110 = v_xy11 - v_xyz111;
+ const float v_xyz101 = v_xy10 * coords[2];
+ const float v_xyz100 = v_xy10 - v_xyz101;
+ const float v_xyz011 = v_xy01 * coords[2];
+ const float v_xyz010 = v_xy01 - v_xyz011;
+ const float v_xyz001 = v_xy00 * coords[2];
+ const float v_xyz000 = v_xy00 - v_xyz001;
+
+ if (interp == INTERP_TRILINEAR)
+ {
+ // trilinear interpolation
+ hists[grid_idx][h_idx] += v_xyz000;
+ hists[grid_idx + 1][h_idx] += v_xyz001;
+ hists[grid_idx + (grid_size + 2)][h_idx] += v_xyz010;
+ hists[grid_idx + (grid_size + 3)][h_idx] += v_xyz011;
+ hists[grid_idx + (grid_size + 2) * (grid_size + 2)][h_idx] += v_xyz100;
+ hists[grid_idx + (grid_size + 2) * (grid_size + 2) + 1][h_idx] += v_xyz101;
+ hists[grid_idx + (grid_size + 3) * (grid_size + 2)][h_idx] += v_xyz110;
+ hists[grid_idx + (grid_size + 3) * (grid_size + 2) + 1][h_idx] += v_xyz111;
+ }
+ else
+ {
+ // quadrilinear interpolation
+ coords[3] -= bins[3];
+
+ const float v_xyzh1111 = v_xyz111 * coords[3];
+ const float v_xyzh1110 = v_xyz111 - v_xyzh1111;
+ const float v_xyzh1101 = v_xyz110 * coords[3];
+ const float v_xyzh1100 = v_xyz110 - v_xyzh1101;
+ const float v_xyzh1011 = v_xyz101 * coords[3];
+ const float v_xyzh1010 = v_xyz101 - v_xyzh1011;
+ const float v_xyzh1001 = v_xyz100 * coords[3];
+ const float v_xyzh1000 = v_xyz100 - v_xyzh1001;
+ const float v_xyzh0111 = v_xyz011 * coords[3];
+ const float v_xyzh0110 = v_xyz011 - v_xyzh0111;
+ const float v_xyzh0101 = v_xyz010 * coords[3];
+ const float v_xyzh0100 = v_xyz010 - v_xyzh0101;
+ const float v_xyzh0011 = v_xyz001 * coords[3];
+ const float v_xyzh0010 = v_xyz001 - v_xyzh0011;
+ const float v_xyzh0001 = v_xyz000 * coords[3];
+ const float v_xyzh0000 = v_xyz000 - v_xyzh0001;
+
+ hists[grid_idx][h_idx] += v_xyzh0000;
+ hists[grid_idx][h_idx + 1] += v_xyzh0001;
+ hists[grid_idx + 1][h_idx] += v_xyzh0010;
+ hists[grid_idx + 1][h_idx + 1] += v_xyzh0011;
+ hists[grid_idx + (grid_size + 2)][h_idx] += v_xyzh0100;
+ hists[grid_idx + (grid_size + 2)][h_idx + 1] += v_xyzh0101;
+ hists[grid_idx + (grid_size + 3)][h_idx] += v_xyzh0110;
+ hists[grid_idx + (grid_size + 3)][h_idx + 1] += v_xyzh0111;
+ hists[grid_idx + (grid_size + 2) * (grid_size + 2)][h_idx] += v_xyzh1000;
+ hists[grid_idx + (grid_size + 2) * (grid_size + 2)][h_idx + 1] += v_xyzh1001;
+ hists[grid_idx + (grid_size + 2) * (grid_size + 2) + 1][h_idx] += v_xyzh1010;
+ hists[grid_idx + (grid_size + 2) * (grid_size + 2) + 1][h_idx + 1] += v_xyzh1011;
+ hists[grid_idx + (grid_size + 3) * (grid_size + 2)][h_idx] += v_xyzh1100;
+ hists[grid_idx + (grid_size + 3) * (grid_size + 2)][h_idx + 1] += v_xyzh1101;
+ hists[grid_idx + (grid_size + 3) * (grid_size + 2) + 1][h_idx] += v_xyzh1110;
+ hists[grid_idx + (grid_size + 3) * (grid_size + 2) + 1][h_idx + 1] += v_xyzh1111;
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::GASDEstimation<PointInT, PointOutT>::copyShapeHistogramsToOutput (const size_t grid_size,
+ const size_t hists_size,
+ const std::vector<Eigen::VectorXf> &hists,
+ PointCloudOut &output,
+ size_t &pos)
+{
+ for (size_t i = 0; i < grid_size; ++i)
+ {
+ for (size_t j = 0; j < grid_size; ++j)
+ {
+ for (size_t k = 0; k < grid_size; ++k)
+ {
+ const size_t idx = ( (i + 1) * (grid_size + 2) + (j + 1)) * (grid_size + 2) + (k + 1);
+
+ std::copy (hists[idx].data () + 1, hists[idx].data () + hists_size + 1, output.points[0].histogram + pos);
+ pos += hists_size;
+ }
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::GASDEstimation<PointInT, PointOutT>::computeFeature (PointCloudOut &output)
+{
+ // compute alignment transform using reference frame
+ computeAlignmentTransform ();
+
+ // align point cloud
+ pcl::transformPointCloud (*surface_, *indices_, shape_samples_, transform_);
+
+ const size_t shape_grid_size = shape_half_grid_size_ * 2;
+
+ // each histogram dimension has 2 additional bins, 1 in each boundary, for performing interpolation
+ std::vector<Eigen::VectorXf> shape_hists ((shape_grid_size + 2) * (shape_grid_size + 2) * (shape_grid_size + 2),
+ Eigen::VectorXf::Zero (shape_hists_size_ + 2));
+
+ Eigen::Vector4f centroid_p = Eigen::Vector4f::Zero ();
+
+ // compute normalization factor for distances between samples and centroid
+ Eigen::Vector4f far_pt;
+ pcl::getMaxDistance (shape_samples_, centroid_p, far_pt);
+ far_pt[3] = 0;
+ const float distance_normalization_factor = (centroid_p - far_pt).norm ();
+
+ // compute normalization factor with respect to axis-aligned bounding cube centered on the origin
+ Eigen::Vector4f min_pt, max_pt;
+ pcl::getMinMax3D (shape_samples_, min_pt, max_pt);
+
+ max_coord_ = std::max (min_pt.head<3> ().cwiseAbs ().maxCoeff (), max_pt.head<3> ().cwiseAbs ().maxCoeff ());
+
+ // normalize sample contribution with respect to the total number of points in the cloud
+ hist_incr_ = 100.0f / static_cast<float> (shape_samples_.size () - 1);
+
+ // for each sample
+ for (size_t i = 0; i < shape_samples_.size (); ++i)
+ {
+ // compute shape histogram array coord based on distance between sample and centroid
+ const Eigen::Vector4f p (shape_samples_[i].x, shape_samples_[i].y, shape_samples_[i].z, 0.0f);
+ const float d = p.norm ();
+
+ const float shape_grid_step = distance_normalization_factor / shape_half_grid_size_;
+
+ const float dist_hist_start = ((int) (d / shape_grid_step)) * shape_grid_step;
+
+ const float dist_hist_val = (d - dist_hist_start) / shape_grid_step;
+
+ const float dbin = dist_hist_val * shape_hists_size_;
+
+ // add sample to shape histograms, optionally performing interpolation
+ addSampleToHistograms (p, max_coord_, shape_half_grid_size_, shape_interp_, dbin, hist_incr_, shape_hists);
+ }
+
+ pos_ = 0;
+
+ // copy shape histograms to output
+ copyShapeHistogramsToOutput (shape_grid_size, shape_hists_size_, shape_hists, output, pos_);
+
+ // set remaining values of the descriptor to zero (if any)
+ std::fill (output.points[0].histogram + pos_, output.points[0].histogram + output.points[0].descriptorSize (), 0.0f);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::GASDColorEstimation<PointInT, PointOutT>::copyColorHistogramsToOutput (const size_t grid_size,
+ const size_t hists_size,
+ std::vector<Eigen::VectorXf> &hists,
+ PointCloudOut &output,
+ size_t &pos)
+{
+ for (size_t i = 0; i < grid_size; ++i)
+ {
+ for (size_t j = 0; j < grid_size; ++j)
+ {
+ for (size_t k = 0; k < grid_size; ++k)
+ {
+ const size_t idx = ( (i + 1) * (grid_size + 2) + (j + 1)) * (grid_size + 2) + (k + 1);
+
+ hists[idx][1] += hists[idx][hists_size + 1];
+ hists[idx][hists_size] += hists[idx][0];
+
+ std::copy (hists[idx].data () + 1, hists[idx].data () + hists_size + 1, output.points[0].histogram + pos);
+ pos += hists_size;
+ }
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::GASDColorEstimation<PointInT, PointOutT>::computeFeature (PointCloudOut &output)
+{
+ // call shape feature computation
+ GASDEstimation<PointInT, PointOutT>::computeFeature (output);
+
+ const size_t color_grid_size = color_half_grid_size_ * 2;
+
+ // each histogram dimension has 2 additional bins, 1 in each boundary, for performing interpolation
+ std::vector<Eigen::VectorXf> color_hists ((color_grid_size + 2) * (color_grid_size + 2) * (color_grid_size + 2),
+ Eigen::VectorXf::Zero (color_hists_size_ + 2));
+
+ // for each sample
+ for (size_t i = 0; i < shape_samples_.size (); ++i)
+ {
+ // compute shape histogram array coord based on distance between sample and centroid
+ const Eigen::Vector4f p (shape_samples_[i].x, shape_samples_[i].y, shape_samples_[i].z, 0.0f);
+
+ // compute hue value
+ float hue = 0.f;
+
+ const unsigned char max = std::max (shape_samples_[i].r, std::max (shape_samples_[i].g, shape_samples_[i].b));
+ const unsigned char min = std::min (shape_samples_[i].r, std::min (shape_samples_[i].g, shape_samples_[i].b));
+
+ const float diff_inv = 1.f / static_cast <float> (max - min);
+
+ if (pcl_isfinite (diff_inv))
+ {
+ if (max == shape_samples_[i].r)
+ {
+ hue = 60.f * (static_cast <float> (shape_samples_[i].g - shape_samples_[i].b) * diff_inv);
+ }
+ else if (max == shape_samples_[i].g)
+ {
+ hue = 60.f * (2.f + static_cast <float> (shape_samples_[i].b - shape_samples_[i].r) * diff_inv);
+ }
+ else
+ {
+ hue = 60.f * (4.f + static_cast <float> (shape_samples_[i].r - shape_samples_[i].g) * diff_inv); // max == b
+ }
+
+ if (hue < 0.f)
+ {
+ hue += 360.f;
+ }
+ }
+
+ // compute color histogram array coord based on hue value
+ const float hbin = (hue / 360) * color_hists_size_;
+
+ // add sample to color histograms, optionally performing interpolation
+ GASDEstimation<PointInT, PointOutT>::addSampleToHistograms (p, max_coord_, color_half_grid_size_, color_interp_, hbin, hist_incr_, color_hists);
+ }
+
+ // copy color histograms to output
+ copyColorHistogramsToOutput (color_grid_size, color_hists_size_, color_hists, output, pos_);
+
+ // set remaining values of the descriptor to zero (if any)
+ std::fill (output.points[0].histogram + pos_, output.points[0].histogram + output.points[0].descriptorSize (), 0.0f);
+}
+
+#define PCL_INSTANTIATE_GASDEstimation(InT, OutT) template class PCL_EXPORTS pcl::GASDEstimation<InT, OutT>;
+#define PCL_INSTANTIATE_GASDColorEstimation(InT, OutT) template class PCL_EXPORTS pcl::GASDColorEstimation<InT, OutT>;
+
+#endif // PCL_FEATURES_IMPL_GASD_H_
unsigned count = integral_image_XYZ_.getFiniteElementsCount (pos_x - (rect_width_2_), pos_y - (rect_height_2_), rect_width_, rect_height_);
- // no valid points within the rectangular reagion?
+ // no valid points within the rectangular region?
if (count == 0)
{
normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point;
unsigned count = 0;
sumArea<unsigned>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getFiniteElementsCountSE, &integral_image_XYZ_, _1, _2, _3, _4), count);
- // no valid points within the rectangular reagion?
+ // no valid points within the rectangular region?
if (count == 0)
{
normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point;
// Estimate the XYZ centroid
compute3DCentroid (cloud, indices, xyz_centroid_);
- // Initalize the centralized moments
+ // Initialize the centralized moments
float mu200 = 0, mu020 = 0, mu002 = 0, mu110 = 0, mu101 = 0, mu011 = 0;
// Iterate over the nearest neighbors set
// Estimate the XYZ centroid
compute3DCentroid (cloud, xyz_centroid_);
- // Initalize the centralized moments
+ // Initialize the centralized moments
float mu200 = 0, mu020 = 0, mu002 = 0, mu110 = 0, mu101 = 0, mu011 = 0;
// Iterate over the nearest neighbors set
#include <pcl/features/normal_3d_omp.h>
+///////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointOutT> void
+pcl::NormalEstimationOMP<PointInT, PointOutT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
pcl::NormalEstimationOMP<PointInT, PointOutT>::computeFeature (PointCloudOut &output)
std::vector<float> nn_dists (k_);
output.is_dense = true;
-
// Save a few cycles by not checking every point for NaN/Inf values if the cloud is set to dense
if (input_->is_dense)
{
// Iterating over the entire index vector
for (int idx = 0; idx < static_cast<int> (indices_->size ()); ++idx)
{
- if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
+ Eigen::Vector4f n;
+ if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 ||
+ !computePointNormal (*surface_, nn_indices, n, output.points[idx].curvature))
{
output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
continue;
}
- Eigen::Vector4f n;
- pcl::computePointNormal<PointInT> (*surface_, nn_indices,
- n,
- output.points[idx].curvature);
-
output.points[idx].normal_x = n[0];
output.points[idx].normal_y = n[1];
output.points[idx].normal_z = n[2];
-
+
flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_,
- output.points[idx].normal[0],
- output.points[idx].normal[1],
- output.points[idx].normal[2]);
+ output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]);
+
}
}
else
#ifdef _OPENMP
#pragma omp parallel for shared (output) private (nn_indices, nn_dists) num_threads(threads_)
#endif
- // Iterating over the entire index vector
+ // Iterating over the entire index vector
for (int idx = 0; idx < static_cast<int> (indices_->size ()); ++idx)
{
+ Eigen::Vector4f n;
if (!isFinite ((*input_)[(*indices_)[idx]]) ||
- this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0)
+ this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 ||
+ !computePointNormal (*surface_, nn_indices, n, output.points[idx].curvature))
{
output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN ();
continue;
}
- Eigen::Vector4f n;
- pcl::computePointNormal<PointInT> (*surface_, nn_indices,
- n,
- output.points[idx].curvature);
-
output.points[idx].normal_x = n[0];
output.points[idx].normal_y = n[1];
output.points[idx].normal_z = n[2];
flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_,
output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]);
+
}
- }
+ }
}
#define PCL_INSTANTIATE_NormalEstimationOMP(T,NT) template class PCL_EXPORTS pcl::NormalEstimationOMP<T,NT>;
#include <pcl/features/shot_lrf_omp.h>
#include <pcl/features/shot_lrf.h>
-template<typename PointInT, typename PointOutT>
-void
+//////////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointInT, typename PointOutT> void
+pcl::SHOTLocalReferenceFrameEstimationOMP<PointInT, PointOutT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointInT, typename PointOutT> void
pcl::SHOTLocalReferenceFrameEstimationOMP<PointInT, PointOutT>::computeFeature (PointCloudOut &output)
{
//check whether used with search radius or search k-neighbors
return (true);
}
+//////////////////////////////////////////////////////////////////////////////////////////////
+template<typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
+pcl::SHOTEstimationOMP<PointInT, PointNT, PointOutT, PointRFT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
pcl::SHOTEstimationOMP<PointInT, PointNT, PointOutT, PointRFT>::computeFeature (PointCloudOut &output)
}
}
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
+pcl::SHOTColorEstimationOMP<PointInT, PointNT, PointOutT, PointRFT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointNT, typename PointOutT, typename PointRFT> void
pcl::SHOTColorEstimationOMP<PointInT, PointNT, PointOutT, PointRFT>::computeFeature (PointCloudOut &output)
PCL_ERROR ("Shape Context Error INF!\n");
if (w != w)
PCL_ERROR ("Shape Context Error IND!\n");
- /// Accumulate w into correspondant Bin(j,k,l)
+ /// Accumulate w into correspondent Bin(j,k,l)
desc[(l*elevation_bins_*radius_bins_) + (k*radius_bins_) + j] += w;
assert (desc[(l*elevation_bins_*radius_bins_) + (k*radius_bins_) + j] >= 0);
pcl::VFHEstimation<PointInT, PointNT, PointOutT>::computeFeature (PointCloudOut &output)
{
// ---[ Step 1a : compute the centroid in XYZ space
- Eigen::Vector4f xyz_centroid;
+ Eigen::Vector4f xyz_centroid (0, 0, 0, 0);
if (use_given_centroid_)
xyz_centroid = centroid_to_use_;
*/
void
computeFeature (PointCloudOut &output);
-
private:
/** \brief 16-bytes aligned placeholder for the XYZ centroid of a surface patch. */
Eigen::Vector4f xyz_centroid_;
}
}
+ /** \brief Flip (in place) normal to get the same sign of the mean of the normals specified by normal_indices.
+ *
+ * The method is described in:
+ * A. Petrelli, L. Di Stefano, "A repeatable and efficient canonical reference for surface matching", 3DimPVT, 2012
+ * A. Petrelli, L. Di Stefano, "On the repeatability of the local reference frame for partial shape matching", 13th International Conference on Computer Vision (ICCV), 2011
+ *
+ * Normals should be unit vectors. Otherwise the resulting mean would be weighted by the normal norms.
+ * \param[in] normal_cloud Cloud of normals used to compute the mean
+ * \param[in] normal_indices Indices of normals used to compute the mean
+ * \param[in] normal input Normal to flip. Normal is modified by the function.
+ * \return false if normal_indices does not contain any valid normal.
+ * \ingroup features
+ */
+ template<typename PointNT> inline bool
+ flipNormalTowardsNormalsMean ( pcl::PointCloud<PointNT> const &normal_cloud,
+ std::vector<int> const &normal_indices,
+ Eigen::Vector3f &normal)
+ {
+ Eigen::Vector3f normal_mean = Eigen::Vector3f::Zero ();
+
+ for (size_t i = 0; i < normal_indices.size (); ++i)
+ {
+ const PointNT& cur_pt = normal_cloud[normal_indices[i]];
+
+ if (pcl::isFinite (cur_pt))
+ {
+ normal_mean += cur_pt.getNormalVector3fMap ();
+ }
+ }
+
+ if (normal_mean.isZero ())
+ return false;
+
+ normal_mean.normalize ();
+
+ if (normal.dot (normal_mean) < 0)
+ {
+ normal = -normal;
+ }
+
+ return true;
+ }
+
/** \brief NormalEstimation estimates local surface properties (surface normals and curvatures)at each
* 3D point. If PointOutT is specified as pcl::Normal, the normal is stored in the first 3 components (0-2),
* and the curvature is stored in component 3.
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- NormalEstimationOMP (unsigned int nr_threads = 0) : threads_ (nr_threads)
+ NormalEstimationOMP (unsigned int nr_threads = 0)
{
feature_name_ = "NormalEstimationOMP";
+
+ setNumberOfThreads(nr_threads);
}
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
protected:
/** \brief The number of threads the scheduler should use. */
* setSearchSurface () and the spatial locator in setSearchMethod ()
* \param output the resultant point cloud model dataset that contains surface normals and curvatures
*/
- void
+ void
computeFeature (PointCloudOut &output);
};
}
min_points_ = min;
}
- /** \brief Sets wether if the signatures should be normalized or not
+ /** \brief Sets whether the signatures should be normalized or not
* \param[in] normalize true if normalization is required, false otherwise
*/
inline void
*/
float leaf_size_;
- /** \brief Wether to normalize the signatures or not. Default: false. */
+ /** \brief Whether to normalize the signatures or not. Default: false. */
bool normalize_bins_;
/** \brief Curvature threshold for removing normals. */
void
setNumberOfPartitionBins (unsigned int number_of_bins);
- /** \brief Returns the nmber of partition bins. */
+ /** \brief Returns the number of partition bins. */
unsigned int
getNumberOfPartitionBins () const;
setTriangles (const std::vector <pcl::Vertices>& triangles);
/** \brief Returns the triangles of the mesh.
- * \param[out] triangles triangles of tthe mesh
+ * \param[out] triangles triangles of the mesh
*/
void
getTriangles (std::vector <pcl::Vertices>& triangles) const;
/** \brief This method simply builds the list of triangles for every point.
* The list of triangles for each point consists of indices of triangles it belongs to.
- * The only purpose of this method is to improve perfomance of the algorithm.
+ * The only purpose of this method is to improve performance of the algorithm.
*/
void
buildListOfPointsTriangles ();
/** \brief This method crops all the triangles within the given radius of the given point.
* \param[in] point point for which the local surface is computed
- * \param[out] local_triangles strores the indices of the triangles that belong to the local surface
+ * \param[out] local_triangles stores the indices of the triangles that belong to the local surface
* \param[out] local_points stores the indices of the points that belong to the local surface
*/
void
/** \brief This method computes LRF (Local Reference Frame) matrix for the given point.
* \param[in] point point for which the LRF is computed
* \param[in] local_triangles list of triangles that represents the local surface of the point
- * \paran[out] lrf_matrix strores computed LRF matrix for the given point
+ * \paran[out] lrf_matrix stores computed LRF matrix for the given point
*/
void
computeLRF (const PointInT& point, const std::set <unsigned int>& local_triangles, Eigen::Matrix3f& lrf_matrix) const;
/** \brief Stores the angle step. Step is calculated with respect to number of rotations. */
float step_;
- /** \brief Stores the set of triangles reprsenting the mesh. */
+ /** \brief Stores the set of triangles representing the mesh. */
std::vector <pcl::Vertices> triangles_;
- /** \brief Stores the set of triangles for each point. Its purpose is to improve perfomance. */
+ /** \brief Stores the set of triangles for each point. Its purpose is to improve performance. */
std::vector <std::vector <unsigned int> > triangles_of_the_point_;
public:
{
/** \brief Transform a list of 2D matrices into a point cloud containing the values in a vector (Histogram<N>).
* Can be used to transform the 2D histograms obtained in \ref RSDEstimation into a point cloud.
- * @note The template paramter N should be (greater or) equal to the product of the number of rows and columns.
+ * @note The template parameter N should be (greater or) equal to the product of the number of rows and columns.
* \param[in] histograms2D the list of neighborhood 2D histograms
* \param[out] histogramsPC the dataset containing the linearized matrices
* \ingroup features
typedef boost::shared_ptr<SHOTLocalReferenceFrameEstimationOMP<PointInT, PointOutT> > Ptr;
typedef boost::shared_ptr<const SHOTLocalReferenceFrameEstimationOMP<PointInT, PointOutT> > ConstPtr;
/** \brief Constructor */
- SHOTLocalReferenceFrameEstimationOMP () : threads_ (0)
+ SHOTLocalReferenceFrameEstimationOMP ()
{
feature_name_ = "SHOTLocalReferenceFrameEstimationOMP";
+
+ setNumberOfThreads(0);
}
-
+
/** \brief Empty destructor */
virtual ~SHOTLocalReferenceFrameEstimationOMP () {}
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
protected:
using Feature<PointInT, PointOutT>::feature_name_;
typedef typename Feature<PointInT, PointOutT>::PointCloudIn PointCloudIn;
/** \brief Empty constructor. */
- SHOTEstimationOMP (unsigned int nr_threads = 0) : SHOTEstimation<PointInT, PointNT, PointOutT, PointRFT> (), threads_ (nr_threads)
- { };
+ SHOTEstimationOMP (unsigned int nr_threads = 0) : SHOTEstimation<PointInT, PointNT, PointOutT, PointRFT> ()
+ {
+ setNumberOfThreads(nr_threads);
+ };
+
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
protected:
SHOTColorEstimationOMP (bool describe_shape = true,
bool describe_color = true,
unsigned int nr_threads = 0)
- : SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT> (describe_shape, describe_color), threads_ (nr_threads)
+ : SHOTColorEstimation<PointInT, PointNT, PointOutT, PointRFT> (describe_shape, describe_color)
{
+ setNumberOfThreads(nr_threads);
}
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
protected:
* different than feature estimation methods that extend \ref
* FeatureFromNormals, which match the normals with the search surface.
*
- * With the default paramters, pcl::Histogram<153> is a good choice for PointOutT.
+ * With the default parameters, pcl::Histogram<153> is a good choice for PointOutT.
* Of course the dimension of this descriptor must change to match the number
* of bins set by the parameters.
*
* - F. Tombari, S. Salti, L. Di Stefano,
* "Unique Shape Context for 3D data description",
* International Workshop on 3D Object Retrieval (3DOR 10) -
- * in conjuction with ACM Multimedia 2010
+ * in conjunction with ACM Multimedia 2010
*
* The suggested PointOutT is pcl::UniqueShapeContext1960
*
--- /dev/null
+/*
+* Software License Agreement (BSD License)
+*
+* Point Cloud Library (PCL) - www.pointclouds.org
+* Copyright (c) 2016-, Open Perception, Inc.
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+* * Neither the name of the copyright holder(s) nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#include <pcl/features/impl/flare.hpp>
+
+
+#ifndef PCL_NO_PRECOMPILE
+#include <pcl/point_types.h>
+#include <pcl/impl/instantiate.hpp>
+// Instantiations of specific point types
+#ifdef PCL_ONLY_CORE_POINT_TYPES
+PCL_INSTANTIATE_PRODUCT(FLARELocalReferenceFrameEstimation, ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA))((pcl::Normal))((pcl::ReferenceFrame))((float)))
+#else
+PCL_INSTANTIATE_PRODUCT(FLARELocalReferenceFrameEstimation, (PCL_XYZ_POINT_TYPES)(PCL_NORMAL_POINT_TYPES)((pcl::ReferenceFrame))((float)))
+#endif
+#endif // PCL_NO_PRECOMPILE
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2016-, Open Perception, Inc.
+ * Copyright (c) 2016, Voxar Labs, CIn-UFPE / DEINFO-UFRPE
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pcl/features/impl/gasd.hpp>
+
+#ifndef PCL_NO_PRECOMPILE
+#include <pcl/point_types.h>
+#include <pcl/impl/instantiate.hpp>
+// Instantiations of specific point types
+PCL_INSTANTIATE_PRODUCT(GASDEstimation, ((pcl::PointXYZ)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)) ((pcl::GASDSignature512)(pcl::GASDSignature984)(pcl::GASDSignature7992)))
+PCL_INSTANTIATE_PRODUCT(GASDColorEstimation, ((pcl::PointXYZRGB)(pcl::PointXYZRGBA)) ((pcl::GASDSignature512)(pcl::GASDSignature984)(pcl::GASDSignature7992)))
+#endif // PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
// Instantiations of specific point types
#ifdef PCL_ONLY_CORE_POINT_TYPES
- PCL_INSTANTIATE_PRODUCT(MomentOfInertiaEstimation, ((pcl::PointXYZ)))
- PCL_INSTANTIATE_PRODUCT(MomentOfInertiaEstimation, ((pcl::PointXYZI)))
- PCL_INSTANTIATE_PRODUCT(MomentOfInertiaEstimation, ((pcl::PointXYZRGBA)))
- PCL_INSTANTIATE_PRODUCT(MomentOfInertiaEstimation, ((pcl::PointNormal)))
+ PCL_INSTANTIATE(MomentOfInertiaEstimation, (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGBA)(pcl::PointNormal))
#else
- PCL_INSTANTIATE_PRODUCT(MomentOfInertiaEstimation, (PCL_XYZ_POINT_TYPES))
+ PCL_INSTANTIATE(MomentOfInertiaEstimation, PCL_XYZ_POINT_TYPES)
#endif
#endif // PCL_NO_PRECOMPILE
if (neighbor_border_direction==NULL || index2==index)
continue;
- // Oposite directions?
+ // Opposite directions?
float cos_angle = neighbor_border_direction->dot(*border_direction);
if (cos_angle<min_cos_angle)
{
*/
virtual Clipper3D<PointT>*
clone () const = 0;
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
}
* \param op the operator to use when making the comparison
* \param compare_val the constant value to compare the field value too
*/
- FieldComparison (std::string field_name, ComparisonOps::CompareOp op, double compare_val);
+ FieldComparison (const std::string &field_name, ComparisonOps::CompareOp op, double compare_val);
/** \brief Copy constructor.
* \param[in] src the field comparison object to copy into this
* \param op the operator to use when making the comparison
* \param compare_val the constant value to compare the component value too
*/
- PackedRGBComparison (std::string component_name, ComparisonOps::CompareOp op, double compare_val);
+ PackedRGBComparison (const std::string &component_name, ComparisonOps::CompareOp op, double compare_val);
/** \brief Destructor. */
virtual ~PackedRGBComparison () {}
* \param op the operator to use when making the comparison
* \param compare_val the constant value to compare the component value too
*/
- PackedHSIComparison (std::string component_name, ComparisonOps::CompareOp op, double compare_val);
+ PackedHSIComparison (const std::string &component_name, ComparisonOps::CompareOp op, double compare_val);
/** \brief Destructor. */
virtual ~PackedHSIComparison () {}
filter_name_ = "ConditionalRemoval";
}
- /** \brief a constructor that includes the condition.
- * \param condition the condition that each point must satisfy to avoid
- * being removed by the filter
- * \param extract_removed_indices extract filtered indices from indices vector
- */
- PCL_DEPRECATED ("ConditionalRemoval(ConditionBasePtr condition, bool extract_removed_indices = false) is deprecated, "
- "please use the setCondition (ConditionBasePtr condition) function instead.")
- ConditionalRemoval (ConditionBasePtr condition, bool extract_removed_indices = false) :
- Filter<PointT>::Filter (extract_removed_indices), capable_ (false), keep_organized_ (false), condition_ (),
- user_filter_value_ (std::numeric_limits<float>::quiet_NaN ())
- {
- filter_name_ = "ConditionalRemoval";
- setCondition (condition);
- }
-
/** \brief Set whether the filtered points should be kept and set to the
* value given through \a setUserFilterValue (default: NaN), or removed
* from the PointCloud, thus potentially breaking its organized
* policies:
* - Ignoring: elements at special locations are filled with zero
* (default behaviour)
- * - Mirroring: the missing rows or columns are obtained throug mirroring
- * - Duplicating: the missing rows or columns are obtained throug
+ * - Mirroring: the missing rows or columns are obtained through mirroring
+ * - Duplicating: the missing rows or columns are obtained through
* duplicating
*
* \author Nizar Sallem
virtual PointOutT
operator() (const std::vector<int>& indices, const std::vector<float>& distances) = 0;
- /** \brief Must call this methode before doing any computation
+ /** \brief Must call this method before doing any computation
* \note make sure to override this with at least
* \code
* bool initCompute ()
inline void
setThreshold (float threshold) { threshold_ = threshold; }
- /** Must call this methode before doing any computation */
+ /** Must call this method before doing any computation */
bool initCompute ();
virtual PointOutT
/** \brief Compute the condition number of the input point cloud. The condition number is the ratio between the
* largest and smallest eigenvalues of the 6x6 covariance matrix of the cloud. The closer this number is to 1.0,
* the more stable the cloud is for ICP registration.
- * \param[in] covariance_matrix user given covariance matrix
+ * \param[in] covariance_matrix user given covariance matrix. Assumed to be self adjoint/symmetric.
* \return the condition number
*/
static double
*/
void
applyFilter (std::vector<int> &indices);
-
private:
/** \brief The minimum point of the box. */
Eigen::Vector4f min_pt_;
typedef typename Filter<PointT>::PointCloud PointCloud;
public:
-
+
typedef boost::shared_ptr< FastBilateralFilterOMP<PointT> > Ptr;
typedef boost::shared_ptr< const FastBilateralFilterOMP<PointT> > ConstPtr;
/** \brief Empty constructor. */
FastBilateralFilterOMP (unsigned int nr_threads = 0)
- : threads_ (nr_threads)
- { }
+ {
+ setNumberOfThreads(nr_threads);
+ }
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
/** \brief Filter the input data and store the results into output.
* \param[out] output the resultant point cloud
*/
void
applyFilter (PointCloud &output);
-
+
protected:
/** \brief The number of threads the scheduler should use. */
unsigned int threads_;
{
/** \brief Removes points with x, y, or z equal to NaN
* \param[in] cloud_in the input point cloud
- * \param[out] cloud_out the input point cloud
+ * \param[out] cloud_out the output point cloud
* \param[out] index the mapping (ordered): cloud_out.points[i] = cloud_in.points[index[i]]
* \note The density of the point cloud is lost.
* \note Can be called with cloud_in == cloud_out
* \ingroup filters
*/
template<typename PointT> void
- removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- pcl::PointCloud<PointT> &cloud_out,
+ removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
+ pcl::PointCloud<PointT> &cloud_out,
std::vector<int> &index);
/** \brief Removes points that have their normals invalid (i.e., equal to NaN)
* \param[in] cloud_in the input point cloud
- * \param[out] cloud_out the input point cloud
+ * \param[out] cloud_out the output point cloud
* \param[out] index the mapping (ordered): cloud_out.points[i] = cloud_in.points[index[i]]
* \note The density of the point cloud is lost.
* \note Can be called with cloud_in == cloud_out
* \ingroup filters
*/
template<typename PointT> void
- removeNaNNormalsFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
- pcl::PointCloud<PointT> &cloud_out,
+ removeNaNNormalsFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
+ pcl::PointCloud<PointT> &cloud_out,
std::vector<int> &index);
////////////////////////////////////////////////////////////////////////////////////////////
typedef typename PointCloud::ConstPtr PointCloudConstPtr;
/** \brief Empty constructor.
- * \param[in] extract_removed_indices set to true if the filtered data indices should be saved in a
+ * \param[in] extract_removed_indices set to true if the filtered data indices should be saved in a
* separate list. Default: false.
*/
- Filter (bool extract_removed_indices = false) :
+ Filter (bool extract_removed_indices = false) :
removed_indices_ (new std::vector<int>),
filter_name_ (),
extract_removed_indices_ (extract_removed_indices)
/** \brief Get the point indices being removed */
inline IndicesConstPtr const
- getRemovedIndices ()
+ getRemovedIndices () const
{
return (removed_indices_);
}
- /** \brief Get the point indices being removed
+ /** \brief Get the point indices being removed
* \param[out] pi the resultant point indices that have been removed
*/
inline void
/** \brief Set to true if we want to return the indices of the removed points. */
bool extract_removed_indices_;
- /** \brief Abstract filter method.
+ /** \brief Abstract filter method.
*
* The implementation needs to set output.{points, width, height, is_dense}.
*
typedef PCLPointCloud2::Ptr PCLPointCloud2Ptr;
typedef PCLPointCloud2::ConstPtr PCLPointCloud2ConstPtr;
- /** \brief Empty constructor.
- * \param[in] extract_removed_indices set to true if the filtered data indices should be saved in a
+ /** \brief Empty constructor.
+ * \param[in] extract_removed_indices set to true if the filtered data indices should be saved in a
* separate list. Default: false.
*/
- Filter (bool extract_removed_indices = false) :
+ Filter (bool extract_removed_indices = false) :
removed_indices_ (new std::vector<int>),
extract_removed_indices_ (extract_removed_indices),
filter_name_ ()
{
}
-
+
/** \brief Empty destructor */
virtual ~Filter () {}
/** \brief Get the point indices being removed */
inline IndicesConstPtr const
- getRemovedIndices ()
+ getRemovedIndices () const
{
return (removed_indices_);
}
- /** \brief Get the point indices being removed
+ /** \brief Get the point indices being removed
* \param[out] pi the resultant point indices that have been removed
*/
inline void
* \return The value of the internal \a negative_ parameter; false = normal filter behavior (default), true = inverted behavior.
*/
inline bool
- getNegative ()
+ getNegative () const
{
return (negative_);
}
* \return The value of the internal \a keep_organized_ parameter; false = remove points (default), true = redefine points, keep structure.
*/
inline bool
- getKeepOrganized ()
+ getKeepOrganized () const
{
return (keep_organized_);
}
* \return The value of the internal \a negative_ parameter; false = normal filter behavior (default), true = inverted behavior.
*/
inline bool
- getNegative ()
+ getNegative () const
{
return (negative_);
}
* \return The value of the internal \a keep_organized_ parameter; false = remove points (default), true = redefine points, keep structure.
*/
inline bool
- getKeepOrganized ()
+ getKeepOrganized () const
{
return (keep_organized_);
}
#define PCL_INSTANTIATE_BilateralFilter(T) template class PCL_EXPORTS pcl::BilateralFilter<T>;
-#endif // PCL_FILTERS_BILATERAL_H_
+#endif // PCL_FILTERS_BILATERAL_IMPL_H_
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-// ToDo use product on point.getVector3fMap () and transformatio_.col (i) to use the SSE advantages of eigen
template<typename PointT> bool
pcl::BoxClipper3D<PointT>::clipPoint3D (const PointT& point) const
{
- return (fabs(transformation_.data () [ 0] * point.x +
- transformation_.data () [ 3] * point.y +
- transformation_.data () [ 6] * point.z +
- transformation_.data () [ 9]) <= 1 &&
- fabs(transformation_.data () [ 1] * point.x +
- transformation_.data () [ 4] * point.y +
- transformation_.data () [ 7] * point.z +
- transformation_.data () [10]) <= 1 &&
- fabs(transformation_.data () [ 2] * point.x +
- transformation_.data () [ 5] * point.y +
- transformation_.data () [ 8] * point.z +
- transformation_.data () [11]) <= 1 );
+ Eigen::Vector4f point_coordinates (transformation_.matrix ()
+ * point.getVector4fMap ());
+ return (point_coordinates.array ().abs () <= 1).all ();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
* @attention untested code
*/
template<typename PointT> bool
-pcl::BoxClipper3D<PointT>::clipLineSegment3D (PointT& point1, PointT& point2) const
+pcl::BoxClipper3D<PointT>::clipLineSegment3D (PointT&, PointT&) const
{
/*
PointT pt1, pt2;
return true;
}
*/
+ throw std::logic_error ("Not implemented");
return false;
}
* @attention untested code
*/
template<typename PointT> void
-pcl::BoxClipper3D<PointT>::clipPlanarPolygon3D (const std::vector<PointT, Eigen::aligned_allocator<PointT> >& polygon, std::vector<PointT, Eigen::aligned_allocator<PointT> >& clipped_polygon) const
+pcl::BoxClipper3D<PointT>::clipPlanarPolygon3D (const std::vector<PointT, Eigen::aligned_allocator<PointT> >&, std::vector<PointT, Eigen::aligned_allocator<PointT> >& clipped_polygon) const
{
// not implemented -> clip everything
clipped_polygon.clear ();
+ throw std::logic_error ("Not implemented");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
{
// not implemented -> clip everything
polygon.clear ();
+ throw std::logic_error ("Not implemented");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT> void
pcl::BoxClipper3D<PointT>::clipPointCloud3D (const pcl::PointCloud<PointT>& cloud_in, std::vector<int>& clipped, const std::vector<int>& indices) const
{
+ clipped.clear ();
if (indices.empty ())
{
clipped.reserve (cloud_in.size ());
clipped.push_back (*iIt);
}
}
-#endif //PCL_FILTERS_IMPL_BOX_CLIPPER3D_HPP
\ No newline at end of file
+#endif //PCL_FILTERS_IMPL_BOX_CLIPPER3D_HPP
//////////////////////////////////////////////////////////////////////////
template <typename PointT>
pcl::FieldComparison<PointT>::FieldComparison (
- std::string field_name, ComparisonOps::CompareOp op, double compare_val)
+ const std::string &field_name, ComparisonOps::CompareOp op, double compare_val)
: ComparisonBase<PointT> ()
, compare_val_ (compare_val), point_data_ (NULL)
{
{
if (!this->capable_)
{
- PCL_WARN ("[pcl::FieldComparison::evaluate] invalid compariosn!\n");
+ PCL_WARN ("[pcl::FieldComparison::evaluate] invalid comparison!\n");
return (false);
}
//////////////////////////////////////////////////////////////////////////
template <typename PointT>
pcl::PackedRGBComparison<PointT>::PackedRGBComparison (
- std::string component_name, ComparisonOps::CompareOp op, double compare_val) :
+ const std::string &component_name, ComparisonOps::CompareOp op, double compare_val) :
component_name_ (component_name), component_offset_ (), compare_val_ (compare_val)
{
// get all the fields
//////////////////////////////////////////////////////////////////////////
template <typename PointT>
pcl::PackedHSIComparison<PointT>::PackedHSIComparison (
- std::string component_name, ComparisonOps::CompareOp op, double compare_val) :
+ const std::string &component_name, ComparisonOps::CompareOp op, double compare_val) :
component_name_ (component_name), component_id_ (), compare_val_ (compare_val), rgb_offset_ ()
{
// Get all the fields
}
else
{
- PCL_WARN ("[pcl::PackedRGBComparison::PackedRGBComparison] unrecognized component name!\n");
+ PCL_WARN ("[pcl::PackedHSIComparison::PackedHSIComparison] unrecognized component name!\n");
capable_ = false;
return;
}
case pcl::ComparisonOps::EQ:
return (myVal == 0);
default:
- PCL_WARN ("[pcl::transformableQuadricXYZComparison::evaluate] unrecognized op_!\n");
+ PCL_WARN ("[pcl::TfQuadraticXYZComparison::evaluate] unrecognized op_!\n");
return (false);
}
}
return (pt_val > val) - (pt_val < val);
}
default :
- PCL_WARN ("[pcl::pcl::PointDataAtOffset::compare] unknown data_type!\n");
+ PCL_WARN ("[pcl::PointDataAtOffset::compare] unknown data_type!\n");
return (0);
}
}
if (!computeCovarianceMatrix (covariance_matrix))
return (-1.);
- Eigen::EigenSolver<Eigen::Matrix<double, 6, 6> > eigen_solver;
- eigen_solver.compute (covariance_matrix, true);
-
- Eigen::MatrixXcd complex_eigenvalues = eigen_solver.eigenvalues ();
-
- double max_ev = -std::numeric_limits<double>::max (),
- min_ev = std::numeric_limits<double>::max ();
- for (size_t i = 0; i < 6; ++i)
- {
- if (real (complex_eigenvalues (i, 0)) > max_ev)
- max_ev = real (complex_eigenvalues (i, 0));
-
- if (real (complex_eigenvalues (i, 0)) < min_ev)
- min_ev = real (complex_eigenvalues (i, 0));
- }
-
- return (max_ev / min_ev);
+ return computeConditionNumber (covariance_matrix);
}
template<typename PointT, typename PointNT> double
pcl::CovarianceSampling<PointT, PointNT>::computeConditionNumber (const Eigen::Matrix<double, 6, 6> &covariance_matrix)
{
- Eigen::EigenSolver<Eigen::Matrix<double, 6, 6> > eigen_solver;
- eigen_solver.compute (covariance_matrix, true);
-
- Eigen::MatrixXcd complex_eigenvalues = eigen_solver.eigenvalues ();
-
- double max_ev = -std::numeric_limits<double>::max (),
- min_ev = std::numeric_limits<double>::max ();
- for (size_t i = 0; i < 6; ++i)
- {
- if (real (complex_eigenvalues (i, 0)) > max_ev)
- max_ev = real (complex_eigenvalues (i, 0));
-
- if (real (complex_eigenvalues (i, 0)) < min_ev)
- min_ev = real (complex_eigenvalues (i, 0));
- }
-
+ const Eigen::SelfAdjointEigenSolver<Eigen::Matrix<double, 6, 6> > solver (covariance_matrix, Eigen::EigenvaluesOnly);
+ const double max_ev = solver.eigenvalues (). maxCoeff ();
+ const double min_ev = solver.eigenvalues (). minCoeff ();
return (max_ev / min_ev);
}
template<typename PointT, typename PointNT> void
pcl::CovarianceSampling<PointT, PointNT>::applyFilter (std::vector<int> &sampled_indices)
{
- if (!initCompute ())
+ Eigen::Matrix<double, 6, 6> c_mat;
+ // Invokes initCompute()
+ if (!computeCovarianceMatrix (c_mat))
return;
- //--- Part A from the paper
- // Set up matrix F
- Eigen::Matrix<double, 6, Eigen::Dynamic> f_mat = Eigen::Matrix<double, 6, Eigen::Dynamic> (6, indices_->size ());
- for (size_t p_i = 0; p_i < scaled_points_.size (); ++p_i)
- {
- f_mat.block<3, 1> (0, p_i) = scaled_points_[p_i].cross (
- (*input_normals_)[(*indices_)[p_i]].getNormalVector3fMap ()).template cast<double> ();
- f_mat.block<3, 1> (3, p_i) = (*input_normals_)[(*indices_)[p_i]].getNormalVector3fMap ().template cast<double> ();
- }
-
- // Compute the covariance matrix C and its 6 eigenvectors (initially complex, move them to a double matrix)
- Eigen::Matrix<double, 6, 6> c_mat (f_mat * f_mat.transpose ());
-
- Eigen::EigenSolver<Eigen::Matrix<double, 6, 6> > eigen_solver;
- eigen_solver.compute (c_mat, true);
- Eigen::MatrixXcd complex_eigenvectors = eigen_solver.eigenvectors ();
-
- Eigen::Matrix<double, 6, 6> x;
- for (size_t i = 0; i < 6; ++i)
- for (size_t j = 0; j < 6; ++j)
- x (i, j) = real (complex_eigenvectors (i, j));
-
+ const Eigen::SelfAdjointEigenSolver<Eigen::Matrix<double, 6, 6> > solver (c_mat);
+ const Eigen::Matrix<double, 6, 6> x = solver.eigenvectors ();
//--- Part B from the paper
/// TODO figure out how to fill the candidate_indices - see subsequent paper paragraphs
pcl::for_each_type<FieldList> (pcl::detail::FieldAdder<PointT> (fields));
for (int rii = 0; rii < static_cast<int> (removed_indices_->size ()); ++rii) // rii = removed indices iterator
{
- int pt_index = (*removed_indices_)[rii];
+ size_t pt_index = (size_t) (*removed_indices_)[rii];
if (pt_index >= input_->points.size ())
{
PCL_ERROR ("[pcl::%s::filterDirectly] The index exceeds the size of the input. Do nothing.\n",
pcl::for_each_type<FieldList> (pcl::detail::FieldAdder<PointT> (fields));
for (int rii = 0; rii < static_cast<int> (removed_indices_->size ()); ++rii) // rii = removed indices iterator
{
- int pt_index = (*removed_indices_)[rii];
+ size_t pt_index = (size_t)(*removed_indices_)[rii];
if (pt_index >= input_->points.size ())
{
PCL_ERROR ("[pcl::%s::applyFilter] The index exceeds the size of the input. Do nothing.\n",
#include <pcl/console/time.h>
#include <assert.h>
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointT> void
+pcl::FastBilateralFilterOMP<PointT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::FastBilateralFilterOMP<PointT>::applyFilter (PointCloud &output)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
+pcl::removeNaNFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
std::vector<int> &index)
{
{
cloud_out.header = cloud_in.header;
cloud_out.points.resize (cloud_in.points.size ());
+ cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
+ cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
}
// Reserve enough space for the indices
index.resize (cloud_in.points.size ());
{
for (size_t i = 0; i < cloud_in.points.size (); ++i)
{
- if (!pcl_isfinite (cloud_in.points[i].x) ||
- !pcl_isfinite (cloud_in.points[i].y) ||
+ if (!pcl_isfinite (cloud_in.points[i].x) ||
+ !pcl_isfinite (cloud_in.points[i].y) ||
!pcl_isfinite (cloud_in.points[i].z))
continue;
cloud_out.points[j] = cloud_in.points[i];
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::removeNaNNormalsFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
+pcl::removeNaNNormalsFromPointCloud (const pcl::PointCloud<PointT> &cloud_in,
pcl::PointCloud<PointT> &cloud_out,
std::vector<int> &index)
{
{
cloud_out.header = cloud_in.header;
cloud_out.points.resize (cloud_in.points.size ());
+ cloud_out.sensor_origin_ = cloud_in.sensor_origin_;
+ cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_;
}
// Reserve enough space for the indices
index.resize (cloud_in.points.size ());
for (size_t i = 0; i < cloud_in.points.size (); ++i)
{
- if (!pcl_isfinite (cloud_in.points[i].normal_x) ||
- !pcl_isfinite (cloud_in.points[i].normal_y) ||
+ if (!pcl_isfinite (cloud_in.points[i].normal_x) ||
+ !pcl_isfinite (cloud_in.points[i].normal_y) ||
!pcl_isfinite (cloud_in.points[i].normal_z))
continue;
cloud_out.points[j] = cloud_in.points[i];
Eigen::Vector4f pl_l; // left plane
Eigen::Vector3f view = camera_pose_.block (0, 0, 3, 1); // view vector for the camera - first column of the rotation matrix
- Eigen::Vector3f up = camera_pose_.block (0, 1, 3, 1); // up vector for the camera - second column of the rotation matix
+ Eigen::Vector3f up = camera_pose_.block (0, 1, 3, 1); // up vector for the camera - second column of the rotation matrix
Eigen::Vector3f right = camera_pose_.block (0, 2, 3, 1); // right vector for the camera - third column of the rotation matrix
Eigen::Vector3f T = camera_pose_.block (0, 3, 3, 1); // The (X, Y, Z) position of the camera w.r.t origin
// Initialize the Sample Consensus model and set its parameters
if (!initSACModel (model_type_))
{
- PCL_ERROR ("[pcl::%s::segment] Error initializing the SAC model!\n", getClassName ().c_str ());
+ PCL_ERROR ("[pcl::%s::applyFilter] Error initializing the SAC model!\n", getClassName ().c_str ());
output.width = output.height = 0;
output.points.clear ();
return;
void
pcl::RandomSample<PointT>::applyFilter (std::vector<int> &indices)
{
- unsigned N = static_cast<unsigned> (indices_->size ());
-
- unsigned int sample_size = negative_ ? N - sample_ : sample_;
+ size_t N = indices_->size ();
+ size_t sample_size = negative_ ? N - sample_ : sample_;
// If sample size is 0 or if the sample size is greater then input cloud size
// then return all indices
if (sample_size >= N)
else
{
// Resize output indices to sample size
- indices.resize (static_cast<size_t> (sample_size));
+ indices.resize (sample_size);
if (extract_removed_indices_)
- removed_indices_->resize (static_cast<size_t> (N - sample_size));
+ removed_indices_->resize (N - sample_size);
// Set random seed so derived indices are the same each time the filter runs
std::srand (seed_);
- // Algorithm A
- unsigned top = N - sample_size;
- unsigned i = 0;
- unsigned index = 0;
+ // Algorithm S
+ size_t i = 0;
+ size_t index = 0;
std::vector<bool> added;
if (extract_removed_indices_)
added.resize (indices_->size (), false);
- for (size_t n = sample_size; n >= 2; n--)
+ size_t n = sample_size;
+ while (n > 0)
{
- float V = unifRand ();
- unsigned S = 0;
- float quot = static_cast<float> (top) / static_cast<float> (N);
- while (quot > V)
+ // Step 1: [Generate U.] Generate a random variate U that is uniformly distributed between 0 and 1.
+ const float U = unifRand ();
+ // Step 2: [Test.] If N * U > n, go to Step 4.
+ if ((N * U) <= n)
{
- S++;
- top--;
- N--;
- quot = quot * static_cast<float> (top) / static_cast<float> (N);
+ // Step 3: [Select.] Select the next record in the file for the sample, and set n : = n - 1.
+ if (extract_removed_indices_)
+ added[index] = true;
+ indices[i++] = (*indices_)[index];
+ --n;
}
- index += S;
- if (extract_removed_indices_)
- added[index] = true;
- indices[i++] = (*indices_)[index++];
- N--;
+ // Step 4: [Don't select.] Skip over the next record (do not include it in the sample).
+ // Set N : = N - 1.
+ --N;
+ ++index;
+ // If n > 0, then return to Step 1; otherwise, the sample is complete and the algorithm terminates.
}
- index += N * static_cast<unsigned> (unifRand ());
- if (extract_removed_indices_)
- added[index] = true;
- indices[i++] = (*indices_)[index++];
-
// Now populate removed_indices_ appropriately
if (extract_removed_indices_)
{
- unsigned ri = 0;
+ size_t ri = 0;
for (size_t i = 0; i < added.size (); i++)
{
if (!added[i])
// Set up the division multiplier
divb_mul_ = Eigen::Vector4i (1, div_b_[0], div_b_[0] * div_b_[1], 0);
+ Filter<PointT>::removed_indices_->clear();
// First pass: build a set of leaves with the point index closest to the leaf center
for (size_t cp = 0; cp < indices_->size (); ++cp)
{
if (!input_->is_dense)
+ {
// Check if the point is invalid
if (!pcl_isfinite (input_->points[(*indices_)[cp]].x) ||
!pcl_isfinite (input_->points[(*indices_)[cp]].y) ||
!pcl_isfinite (input_->points[(*indices_)[cp]].z))
+ {
+ if (Filter<PointT>::extract_removed_indices_)
+ {
+ Filter<PointT>::removed_indices_->push_back ((*indices_)[cp]);
+ }
continue;
+ }
+ }
Eigen::Vector4i ijk = Eigen::Vector4i::Zero ();
ijk[0] = static_cast<int> (floor (input_->points[(*indices_)[cp]].x * inverse_leaf_size_[0]));
// If current point is closer, copy its index instead
if (diff_cur < diff_prev)
+ {
+ if (Filter<PointT>::extract_removed_indices_)
+ {
+ Filter<PointT>::removed_indices_->push_back (leaf.idx);
+ }
+
leaf.idx = (*indices_)[cp];
+ }
}
// Second pass: go over all leaves and copy data
std::sort (index_vector.begin (), index_vector.end (), std::less<cloud_point_index_idx> ());
// Third pass: count output cells
- // we need to skip all the same, adjacenent idx values
+ // we need to skip all the same, adjacent idx values
unsigned int total = 0;
unsigned int index = 0;
// first_and_last_indices_vector[i] represents the index in index_vector of the first point in
/** \brief The model used to calculate distances */
SampleConsensusModelPtr model_;
- /** \brief The threshold used to seperate outliers (removed_indices) from inliers (indices) */
+ /** \brief The threshold used to separate outliers (removed_indices) from inliers (indices) */
float thresh_;
/** \brief The model coefficients */
* \return The name of the field that will be used for filtering.
*/
inline std::string const
- getFilterFieldName ()
+ getFilterFieldName () const
{
return (filter_field_name_);
}
* \param[out] limit_max The maximum allowed field value (default = FLT_MAX).
*/
inline void
- getFilterLimits (float &limit_min, float &limit_max)
+ getFilterLimits (float &limit_min, float &limit_max) const
{
limit_min = filter_limit_min_;
limit_max = filter_limit_max_;
negative_ = limit_negative;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \warning This method will be removed in the future. Use getNegative() instead.
* \param[out] limit_negative true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline void
- getFilterLimitsNegative (bool &limit_negative)
+ getFilterLimitsNegative (bool &limit_negative) const
{
limit_negative = negative_;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \warning This method will be removed in the future. Use getNegative() instead.
* \return true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline bool
- getFilterLimitsNegative ()
+ getFilterLimitsNegative () const
{
return (negative_);
}
* structure. By default, points are removed.
*
* \param[in] val set to true whether the filtered points should be kept and
- * set to a given user value (default: NaN)
+ * set to a user given value (default: NaN)
*/
inline void
setKeepOrganized (bool val)
/** \brief Obtain the value of the internal \a keep_organized_ parameter. */
inline bool
- getKeepOrganized ()
+ getKeepOrganized () const
{
return (keep_organized_);
}
/** \brief Get the name of the field used for filtering. */
inline std::string const
- getFilterFieldName ()
+ getFilterFieldName () const
{
return (filter_field_name_);
}
filter_limit_max_ = limit_max;
}
- /** \brief Get the field filter limits (min/max) set by the user. The default values are -FLT_MAX, FLT_MAX.
+ /** \brief Get the field filter limits (min/max) set by the user. The default values are -FLT_MAX, FLT_MAX.
* \param[out] limit_min the minimum allowed field value
* \param[out] limit_max the maximum allowed field value
*/
inline void
- getFilterLimits (double &limit_min, double &limit_max)
+ getFilterLimits (double &limit_min, double &limit_max) const
{
limit_min = filter_limit_min_;
limit_max = filter_limit_max_;
filter_limit_negative_ = limit_negative;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \param[out] limit_negative true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline void
- getFilterLimitsNegative (bool &limit_negative)
+ getFilterLimitsNegative (bool &limit_negative) const
{
limit_negative = filter_limit_negative_;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \return true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline bool
- getFilterLimitsNegative ()
+ getFilterLimitsNegative () const
{
return (filter_limit_negative_);
}
private:
/** \brief Keep the structure of the data organized, by setting the
- * filtered points to the a user given value (NaN by default).
+ * filtered points to a user given value (NaN by default).
*/
bool keep_organized_;
/** \brief User given value to be set to any filtered point. Casted to
- * the correct field type.
+ * the correct field type.
*/
float user_filter_value_;
unifRand ()
{
return (static_cast<float> (rand () / double (RAND_MAX)));
- //return (((214013 * seed_ + 2531011) >> 16) & 0x7FFF);
}
};
}
private:
- /** \brief @b CompareDim is a comparator object for sorting across a specific dimenstion (i,.e X, Y or Z)
+ /** \brief @b CompareDim is a comparator object for sorting across a specific dimension (i,.e X, Y or Z)
*/
struct CompareDim
{
* \ingroup filters
*/
template<>
- class PCL_EXPORTS StatisticalOutlierRemoval<pcl::PCLPointCloud2> : public Filter<pcl::PCLPointCloud2>
+ class PCL_EXPORTS StatisticalOutlierRemoval<pcl::PCLPointCloud2> : public FilterIndices<pcl::PCLPointCloud2>
{
- using Filter<pcl::PCLPointCloud2>::filter_name_;
- using Filter<pcl::PCLPointCloud2>::getClassName;
+ using FilterIndices<pcl::PCLPointCloud2>::filter_name_;
+ using FilterIndices<pcl::PCLPointCloud2>::getClassName;
- using Filter<pcl::PCLPointCloud2>::removed_indices_;
- using Filter<pcl::PCLPointCloud2>::extract_removed_indices_;
+ using FilterIndices<pcl::PCLPointCloud2>::removed_indices_;
+ using FilterIndices<pcl::PCLPointCloud2>::extract_removed_indices_;
typedef pcl::search::Search<pcl::PointXYZ> KdTree;
typedef pcl::search::Search<pcl::PointXYZ>::Ptr KdTreePtr;
public:
/** \brief Empty constructor. */
StatisticalOutlierRemoval (bool extract_removed_indices = false) :
- Filter<pcl::PCLPointCloud2>::Filter (extract_removed_indices), mean_k_ (2),
- std_mul_ (0.0), tree_ (), negative_ (false)
+ FilterIndices<pcl::PCLPointCloud2>::FilterIndices (extract_removed_indices), mean_k_ (2),
+ std_mul_ (0.0), tree_ ()
{
filter_name_ = "StatisticalOutlierRemoval";
}
return (std_mul_);
}
- /** \brief Set whether the indices should be returned, or all points \e except the indices.
- * \param negative true if all points \e except the input indices will be returned, false otherwise
- */
- inline void
- setNegative (bool negative)
- {
- negative_ = negative;
- }
-
- /** \brief Get the value of the internal #negative_ parameter. If
- * true, all points \e except the input indices will be returned.
- * \return The value of the "negative" flag
- */
- inline bool
- getNegative ()
- {
- return (negative_);
- }
-
protected:
/** \brief The number of points to use for mean distance estimation. */
int mean_k_;
/** \brief A pointer to the spatial search object. */
KdTreePtr tree_;
- /** \brief If true, the outliers will be returned instead of the inliers (default: false). */
- bool negative_;
+ virtual void
+ applyFilter (std::vector<int> &indices);
- void
+ virtual void
applyFilter (PCLPointCloud2 &output);
+
+ /**
+ * \brief Compute the statistical values used in both applyFilter methods.
+ *
+ * This method tries to avoid duplicate code.
+ */
+ virtual void
+ generateStatistics (double& mean, double& variance, double& stddev, std::vector<float>& distances);
};
}
typedef boost::shared_ptr<const UniformSampling<PointT> > ConstPtr;
/** \brief Empty constructor. */
- UniformSampling () :
+ UniformSampling (bool extract_removed_indices = false) :
+ Filter<PointT>(extract_removed_indices),
leaves_ (),
leaf_size_ (Eigen::Vector4f::Zero ()),
inverse_leaf_size_ (Eigen::Vector4f::Zero ()),
* \param[in] x_idx the index of the X channel
* \param[in] y_idx the index of the Y channel
* \param[in] z_idx the index of the Z channel
- * \param[out] min_pt the minimum data point
+ * \param[out] min_pt the minimum data point
* \param[out] max_pt the maximum data point
*/
- PCL_EXPORTS void
+ PCL_EXPORTS void
getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, int x_idx, int y_idx, int z_idx,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt);
- /** \brief Obtain the maximum and minimum points in 3D from a given point cloud.
+ /** \brief Obtain the maximum and minimum points in 3D from a given point cloud.
* \note Performs internal data filtering as well.
* \param[in] cloud the pointer to a pcl::PCLPointCloud2 dataset
* \param[in] x_idx the index of the X channel
* \param[in] distance_field_name the name of the dimension to filter data along to
* \param[in] min_distance the minimum acceptable value in \a distance_field_name data
* \param[in] max_distance the maximum acceptable value in \a distance_field_name data
- * \param[out] min_pt the minimum data point
+ * \param[out] min_pt the minimum data point
* \param[out] max_pt the maximum data point
* \param[in] limit_negative \b false if data \b inside of the [min_distance; max_distance] interval should be
* considered, \b true otherwise.
*/
- PCL_EXPORTS void
+ PCL_EXPORTS void
getMinMax3D (const pcl::PCLPointCloud2ConstPtr &cloud, int x_idx, int y_idx, int z_idx,
- const std::string &distance_field_name, float min_distance, float max_distance,
+ const std::string &distance_field_name, float min_distance, float max_distance,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative = false);
/** \brief Get the relative cell indices of the "upper half" 13 neighbors.
int idx = 0;
// 0 - 8
- for (int i = -1; i < 2; i++)
+ for (int i = -1; i < 2; i++)
{
- for (int j = -1; j < 2; j++)
+ for (int j = -1; j < 2; j++)
{
relative_coordinates (0, idx) = i;
relative_coordinates (1, idx) = j;
}
}
// 9 - 11
- for (int i = -1; i < 2; i++)
+ for (int i = -1; i < 2; i++)
{
relative_coordinates (0, idx) = i;
relative_coordinates (1, idx) = -1;
* \param[in] limit_negative if set to true, then all points outside of the interval (min_distance;max_distace) are considered
* \ingroup filters
*/
- template <typename PointT> void
- getMinMax3D (const typename pcl::PointCloud<PointT>::ConstPtr &cloud,
+ template <typename PointT> void
+ getMinMax3D (const typename pcl::PointCloud<PointT>::ConstPtr &cloud,
const std::string &distance_field_name, float min_distance, float max_distance,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative = false);
* \param[in] limit_negative if set to true, then all points outside of the interval (min_distance;max_distace) are considered
* \ingroup filters
*/
- template <typename PointT> void
- getMinMax3D (const typename pcl::PointCloud<PointT>::ConstPtr &cloud,
+ template <typename PointT> void
+ getMinMax3D (const typename pcl::PointCloud<PointT>::ConstPtr &cloud,
const std::vector<int> &indices,
const std::string &distance_field_name, float min_distance, float max_distance,
Eigen::Vector4f &min_pt, Eigen::Vector4f &max_pt, bool limit_negative = false);
typedef typename Filter<PointT>::PointCloud PointCloud;
typedef typename PointCloud::Ptr PointCloudPtr;
typedef typename PointCloud::ConstPtr PointCloudConstPtr;
+
+ public:
+
typedef boost::shared_ptr< VoxelGrid<PointT> > Ptr;
typedef boost::shared_ptr< const VoxelGrid<PointT> > ConstPtr;
-
- public:
/** \brief Empty constructor. */
- VoxelGrid () :
+ VoxelGrid () :
leaf_size_ (Eigen::Vector4f::Zero ()),
inverse_leaf_size_ (Eigen::Array4f::Zero ()),
- downsample_all_data_ (true),
+ downsample_all_data_ (true),
save_leaf_layout_ (false),
leaf_layout_ (),
min_b_ (Eigen::Vector4i::Zero ()),
max_b_ (Eigen::Vector4i::Zero ()),
div_b_ (Eigen::Vector4i::Zero ()),
divb_mul_ (Eigen::Vector4i::Zero ()),
- filter_field_name_ (""),
- filter_limit_min_ (-FLT_MAX),
+ filter_field_name_ (""),
+ filter_limit_min_ (-FLT_MAX),
filter_limit_max_ (FLT_MAX),
filter_limit_negative_ (false),
min_points_per_voxel_ (0)
/** \brief Set the voxel grid leaf size.
* \param[in] leaf_size the voxel grid leaf size
*/
- inline void
- setLeafSize (const Eigen::Vector4f &leaf_size)
- {
- leaf_size_ = leaf_size;
+ inline void
+ setLeafSize (const Eigen::Vector4f &leaf_size)
+ {
+ leaf_size_ = leaf_size;
// Avoid division errors
if (leaf_size_[3] == 0)
leaf_size_[3] = 1;
}
/** \brief Get the voxel grid leaf size. */
- inline Eigen::Vector3f
- getLeafSize () { return (leaf_size_.head<3> ()); }
+ inline Eigen::Vector3f
+ getLeafSize () const { return (leaf_size_.head<3> ()); }
/** \brief Set to true if all fields need to be downsampled, or false if just XYZ.
* \param[in] downsample the new value (true/false)
*/
- inline void
+ inline void
setDownsampleAllData (bool downsample) { downsample_all_data_ = downsample; }
/** \brief Get the state of the internal downsampling parameter (true if
- * all fields need to be downsampled, false if just XYZ).
+ * all fields need to be downsampled, false if just XYZ).
*/
- inline bool
- getDownsampleAllData () { return (downsample_all_data_); }
+ inline bool
+ getDownsampleAllData () const { return (downsample_all_data_); }
/** \brief Set the minimum number of points required for a voxel to be used.
* \param[in] min_points_per_voxel the minimum number of points for required for a voxel to be used
*/
- inline void
+ inline void
setMinimumPointsNumberPerVoxel (unsigned int min_points_per_voxel) { min_points_per_voxel_ = min_points_per_voxel; }
/** \brief Return the minimum number of points required for a voxel to be used.
*/
inline unsigned int
- getMinimumPointsNumberPerVoxel () { return min_points_per_voxel_; }
+ getMinimumPointsNumberPerVoxel () const { return min_points_per_voxel_; }
/** \brief Set to true if leaf layout information needs to be saved for later access.
* \param[in] save_leaf_layout the new value (true/false)
*/
- inline void
+ inline void
setSaveLeafLayout (bool save_leaf_layout) { save_leaf_layout_ = save_leaf_layout; }
/** \brief Returns true if leaf layout information will to be saved for later access. */
- inline bool
- getSaveLeafLayout () { return (save_leaf_layout_); }
+ inline bool
+ getSaveLeafLayout () const { return (save_leaf_layout_); }
/** \brief Get the minimum coordinates of the bounding box (after
- * filtering is performed).
+ * filtering is performed).
*/
- inline Eigen::Vector3i
- getMinBoxCoordinates () { return (min_b_.head<3> ()); }
+ inline Eigen::Vector3i
+ getMinBoxCoordinates () const { return (min_b_.head<3> ()); }
/** \brief Get the minimum coordinates of the bounding box (after
- * filtering is performed).
+ * filtering is performed).
*/
- inline Eigen::Vector3i
- getMaxBoxCoordinates () { return (max_b_.head<3> ()); }
+ inline Eigen::Vector3i
+ getMaxBoxCoordinates () const { return (max_b_.head<3> ()); }
/** \brief Get the number of divisions along all 3 axes (after filtering
- * is performed).
+ * is performed).
*/
- inline Eigen::Vector3i
- getNrDivisions () { return (div_b_.head<3> ()); }
+ inline Eigen::Vector3i
+ getNrDivisions () const { return (div_b_.head<3> ()); }
/** \brief Get the multipliers to be applied to the grid coordinates in
- * order to find the centroid index (after filtering is performed).
+ * order to find the centroid index (after filtering is performed).
*/
- inline Eigen::Vector3i
- getDivisionMultiplier () { return (divb_mul_.head<3> ()); }
+ inline Eigen::Vector3i
+ getDivisionMultiplier () const { return (divb_mul_.head<3> ()); }
/** \brief Returns the index in the resulting downsampled cloud of the specified point.
*
- * \note for efficiency, user must make sure that the saving of the leaf layout is enabled and filtering
+ * \note for efficiency, user must make sure that the saving of the leaf layout is enabled and filtering
* performed, and that the point is inside the grid, to avoid invalid access (or use
* getGridCoordinates+getCentroidIndexAt)
*
* \param[in] p the point to get the index at
*/
- inline int
- getCentroidIndex (const PointT &p)
+ inline int
+ getCentroidIndex (const PointT &p) const
{
- return (leaf_layout_.at ((Eigen::Vector4i (static_cast<int> (floor (p.x * inverse_leaf_size_[0])),
- static_cast<int> (floor (p.y * inverse_leaf_size_[1])),
+ return (leaf_layout_.at ((Eigen::Vector4i (static_cast<int> (floor (p.x * inverse_leaf_size_[0])),
+ static_cast<int> (floor (p.y * inverse_leaf_size_[1])),
static_cast<int> (floor (p.z * inverse_leaf_size_[2])), 0) - min_b_).dot (divb_mul_)));
}
* \param[in] relative_coordinates matrix with the columns being the coordinates of the requested cells, relative to the reference point's cell
* \note for efficiency, user must make sure that the saving of the leaf layout is enabled and filtering performed
*/
- inline std::vector<int>
- getNeighborCentroidIndices (const PointT &reference_point, const Eigen::MatrixXi &relative_coordinates)
+ inline std::vector<int>
+ getNeighborCentroidIndices (const PointT &reference_point, const Eigen::MatrixXi &relative_coordinates) const
{
- Eigen::Vector4i ijk (static_cast<int> (floor (reference_point.x * inverse_leaf_size_[0])),
- static_cast<int> (floor (reference_point.y * inverse_leaf_size_[1])),
+ Eigen::Vector4i ijk (static_cast<int> (floor (reference_point.x * inverse_leaf_size_[0])),
+ static_cast<int> (floor (reference_point.y * inverse_leaf_size_[1])),
static_cast<int> (floor (reference_point.z * inverse_leaf_size_[2])), 0);
Eigen::Array4i diff2min = min_b_ - ijk;
Eigen::Array4i diff2max = max_b_ - ijk;
/** \brief Returns the layout of the leafs for fast access to cells relative to current position.
* \note position at (i-min_x) + (j-min_y)*div_x + (k-min_z)*div_x*div_y holds the index of the element at coordinates (i,j,k) in the grid (-1 if empty)
*/
- inline std::vector<int>
- getLeafLayout () { return (leaf_layout_); }
+ inline std::vector<int>
+ getLeafLayout () const { return (leaf_layout_); }
- /** \brief Returns the corresponding (i,j,k) coordinates in the grid of point (x,y,z).
+ /** \brief Returns the corresponding (i,j,k) coordinates in the grid of point (x,y,z).
* \param[in] x the X point coordinate to get the (i, j, k) index at
* \param[in] y the Y point coordinate to get the (i, j, k) index at
* \param[in] z the Z point coordinate to get the (i, j, k) index at
*/
- inline Eigen::Vector3i
- getGridCoordinates (float x, float y, float z)
- {
- return (Eigen::Vector3i (static_cast<int> (floor (x * inverse_leaf_size_[0])),
- static_cast<int> (floor (y * inverse_leaf_size_[1])),
- static_cast<int> (floor (z * inverse_leaf_size_[2]))));
+ inline Eigen::Vector3i
+ getGridCoordinates (float x, float y, float z) const
+ {
+ return (Eigen::Vector3i (static_cast<int> (floor (x * inverse_leaf_size_[0])),
+ static_cast<int> (floor (y * inverse_leaf_size_[1])),
+ static_cast<int> (floor (z * inverse_leaf_size_[2]))));
}
/** \brief Returns the index in the downsampled cloud corresponding to a given set of coordinates.
* \param[in] ijk the coordinates (i,j,k) in the grid (-1 if empty)
*/
- inline int
- getCentroidIndexAt (const Eigen::Vector3i &ijk)
+ inline int
+ getCentroidIndexAt (const Eigen::Vector3i &ijk) const
{
int idx = ((Eigen::Vector4i() << ijk, 0).finished() - min_b_).dot (divb_mul_);
if (idx < 0 || idx >= static_cast<int> (leaf_layout_.size ())) // this checks also if leaf_layout_.size () == 0 i.e. everything was computed as needed
/** \brief Get the name of the field used for filtering. */
inline std::string const
- getFilterFieldName ()
+ getFilterFieldName () const
{
return (filter_field_name_);
}
filter_limit_max_ = limit_max;
}
- /** \brief Get the field filter limits (min/max) set by the user. The default values are -FLT_MAX, FLT_MAX.
+ /** \brief Get the field filter limits (min/max) set by the user. The default values are -FLT_MAX, FLT_MAX.
* \param[out] limit_min the minimum allowed field value
* \param[out] limit_max the maximum allowed field value
*/
inline void
- getFilterLimits (double &limit_min, double &limit_max)
+ getFilterLimits (double &limit_min, double &limit_max) const
{
limit_min = filter_limit_min_;
limit_max = filter_limit_max_;
filter_limit_negative_ = limit_negative;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \param[out] limit_negative true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline void
- getFilterLimitsNegative (bool &limit_negative)
+ getFilterLimitsNegative (bool &limit_negative) const
{
limit_negative = filter_limit_negative_;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \return true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline bool
- getFilterLimitsNegative ()
+ getFilterLimitsNegative () const
{
return (filter_limit_negative_);
}
/** \brief The size of a leaf. */
Eigen::Vector4f leaf_size_;
- /** \brief Internal leaf sizes stored as 1/leaf_size_ for efficiency reasons. */
+ /** \brief Internal leaf sizes stored as 1/leaf_size_ for efficiency reasons. */
Eigen::Array4f inverse_leaf_size_;
/** \brief Set to true if all fields need to be downsampled, or false if just XYZ. */
/** \brief Downsample a Point Cloud using a voxelized grid approach
* \param[out] output the resultant point cloud message
*/
- void
+ void
applyFilter (PointCloud &output);
};
public:
/** \brief Empty constructor. */
- VoxelGrid () :
+ VoxelGrid () :
leaf_size_ (Eigen::Vector4f::Zero ()),
inverse_leaf_size_ (Eigen::Array4f::Zero ()),
- downsample_all_data_ (true),
+ downsample_all_data_ (true),
save_leaf_layout_ (false),
leaf_layout_ (),
min_b_ (Eigen::Vector4i::Zero ()),
max_b_ (Eigen::Vector4i::Zero ()),
div_b_ (Eigen::Vector4i::Zero ()),
divb_mul_ (Eigen::Vector4i::Zero ()),
- filter_field_name_ (""),
- filter_limit_min_ (-FLT_MAX),
+ filter_field_name_ (""),
+ filter_limit_min_ (-FLT_MAX),
filter_limit_max_ (FLT_MAX),
filter_limit_negative_ (false),
min_points_per_voxel_ (0)
/** \brief Set the voxel grid leaf size.
* \param[in] leaf_size the voxel grid leaf size
*/
- inline void
- setLeafSize (const Eigen::Vector4f &leaf_size)
- {
- leaf_size_ = leaf_size;
+ inline void
+ setLeafSize (const Eigen::Vector4f &leaf_size)
+ {
+ leaf_size_ = leaf_size;
// Avoid division errors
if (leaf_size_[3] == 0)
leaf_size_[3] = 1;
}
/** \brief Get the voxel grid leaf size. */
- inline Eigen::Vector3f
- getLeafSize () { return (leaf_size_.head<3> ()); }
+ inline Eigen::Vector3f
+ getLeafSize () const { return (leaf_size_.head<3> ()); }
/** \brief Set to true if all fields need to be downsampled, or false if just XYZ.
* \param[in] downsample the new value (true/false)
*/
- inline void
+ inline void
setDownsampleAllData (bool downsample) { downsample_all_data_ = downsample; }
/** \brief Get the state of the internal downsampling parameter (true if
- * all fields need to be downsampled, false if just XYZ).
+ * all fields need to be downsampled, false if just XYZ).
*/
- inline bool
- getDownsampleAllData () { return (downsample_all_data_); }
+ inline bool
+ getDownsampleAllData () const { return (downsample_all_data_); }
/** \brief Set the minimum number of points required for a voxel to be used.
* \param[in] min_points_per_voxel the minimum number of points for required for a voxel to be used
*/
- inline void
+ inline void
setMinimumPointsNumberPerVoxel (unsigned int min_points_per_voxel) { min_points_per_voxel_ = min_points_per_voxel; }
/** \brief Return the minimum number of points required for a voxel to be used.
*/
inline unsigned int
- getMinimumPointsNumberPerVoxel () { return min_points_per_voxel_; }
+ getMinimumPointsNumberPerVoxel () const { return min_points_per_voxel_; }
/** \brief Set to true if leaf layout information needs to be saved for later access.
* \param[in] save_leaf_layout the new value (true/false)
*/
- inline void
+ inline void
setSaveLeafLayout (bool save_leaf_layout) { save_leaf_layout_ = save_leaf_layout; }
/** \brief Returns true if leaf layout information will to be saved for later access. */
- inline bool
- getSaveLeafLayout () { return (save_leaf_layout_); }
+ inline bool
+ getSaveLeafLayout () const { return (save_leaf_layout_); }
/** \brief Get the minimum coordinates of the bounding box (after
- * filtering is performed).
+ * filtering is performed).
*/
- inline Eigen::Vector3i
- getMinBoxCoordinates () { return (min_b_.head<3> ()); }
+ inline Eigen::Vector3i
+ getMinBoxCoordinates () const { return (min_b_.head<3> ()); }
/** \brief Get the minimum coordinates of the bounding box (after
- * filtering is performed).
+ * filtering is performed).
*/
- inline Eigen::Vector3i
- getMaxBoxCoordinates () { return (max_b_.head<3> ()); }
+ inline Eigen::Vector3i
+ getMaxBoxCoordinates () const { return (max_b_.head<3> ()); }
/** \brief Get the number of divisions along all 3 axes (after filtering
- * is performed).
+ * is performed).
*/
- inline Eigen::Vector3i
- getNrDivisions () { return (div_b_.head<3> ()); }
+ inline Eigen::Vector3i
+ getNrDivisions () const { return (div_b_.head<3> ()); }
/** \brief Get the multipliers to be applied to the grid coordinates in
- * order to find the centroid index (after filtering is performed).
+ * order to find the centroid index (after filtering is performed).
*/
- inline Eigen::Vector3i
- getDivisionMultiplier () { return (divb_mul_.head<3> ()); }
+ inline Eigen::Vector3i
+ getDivisionMultiplier () const { return (divb_mul_.head<3> ()); }
/** \brief Returns the index in the resulting downsampled cloud of the specified point.
* \note for efficiency, user must make sure that the saving of the leaf layout is enabled and filtering performed,
* \param[in] y the Y point coordinate to get the index at
* \param[in] z the Z point coordinate to get the index at
*/
- inline int
- getCentroidIndex (float x, float y, float z)
+ inline int
+ getCentroidIndex (float x, float y, float z) const
{
- return (leaf_layout_.at ((Eigen::Vector4i (static_cast<int> (floor (x * inverse_leaf_size_[0])),
- static_cast<int> (floor (y * inverse_leaf_size_[1])),
- static_cast<int> (floor (z * inverse_leaf_size_[2])),
- 0)
+ return (leaf_layout_.at ((Eigen::Vector4i (static_cast<int> (floor (x * inverse_leaf_size_[0])),
+ static_cast<int> (floor (y * inverse_leaf_size_[1])),
+ static_cast<int> (floor (z * inverse_leaf_size_[2])),
+ 0)
- min_b_).dot (divb_mul_)));
}
* \param[out] relative_coordinates matrix with the columns being the coordinates of the requested cells, relative to the reference point's cell
* \note for efficiency, user must make sure that the saving of the leaf layout is enabled and filtering performed
*/
- inline std::vector<int>
- getNeighborCentroidIndices (float x, float y, float z, const Eigen::MatrixXi &relative_coordinates)
+ inline std::vector<int>
+ getNeighborCentroidIndices (float x, float y, float z, const Eigen::MatrixXi &relative_coordinates) const
{
- Eigen::Vector4i ijk (static_cast<int> (floor (x * inverse_leaf_size_[0])),
- static_cast<int> (floor (y * inverse_leaf_size_[1])),
+ Eigen::Vector4i ijk (static_cast<int> (floor (x * inverse_leaf_size_[0])),
+ static_cast<int> (floor (y * inverse_leaf_size_[1])),
static_cast<int> (floor (z * inverse_leaf_size_[2])), 0);
Eigen::Array4i diff2min = min_b_ - ijk;
Eigen::Array4i diff2max = max_b_ - ijk;
}
return (neighbors);
}
-
+
/** \brief Returns the indices in the resulting downsampled cloud of the points at the specified grid coordinates,
* relative to the grid coordinates of the specified point (or -1 if the cell was empty/out of bounds).
* \param[in] x the X coordinate of the reference point (corresponding cell is allowed to be empty/out of bounds)
* \param[out] relative_coordinates vector with the elements being the coordinates of the requested cells, relative to the reference point's cell
* \note for efficiency, user must make sure that the saving of the leaf layout is enabled and filtering performed
*/
- inline std::vector<int>
- getNeighborCentroidIndices (float x, float y, float z, const std::vector<Eigen::Vector3i, Eigen::aligned_allocator<Eigen::Vector3i> > &relative_coordinates)
+ inline std::vector<int>
+ getNeighborCentroidIndices (float x, float y, float z, const std::vector<Eigen::Vector3i, Eigen::aligned_allocator<Eigen::Vector3i> > &relative_coordinates) const
{
Eigen::Vector4i ijk (static_cast<int> (floorf (x * inverse_leaf_size_[0])), static_cast<int> (floorf (y * inverse_leaf_size_[1])), static_cast<int> (floorf (z * inverse_leaf_size_[2])), 0);
std::vector<int> neighbors;
/** \brief Returns the layout of the leafs for fast access to cells relative to current position.
* \note position at (i-min_x) + (j-min_y)*div_x + (k-min_z)*div_x*div_y holds the index of the element at coordinates (i,j,k) in the grid (-1 if empty)
*/
- inline std::vector<int>
- getLeafLayout () { return (leaf_layout_); }
+ inline std::vector<int>
+ getLeafLayout () const { return (leaf_layout_); }
/** \brief Returns the corresponding (i,j,k) coordinates in the grid of point (x,y,z).
* \param[in] x the X point coordinate to get the (i, j, k) index at
* \param[in] y the Y point coordinate to get the (i, j, k) index at
* \param[in] z the Z point coordinate to get the (i, j, k) index at
*/
- inline Eigen::Vector3i
- getGridCoordinates (float x, float y, float z)
- {
- return (Eigen::Vector3i (static_cast<int> (floor (x * inverse_leaf_size_[0])),
- static_cast<int> (floor (y * inverse_leaf_size_[1])),
- static_cast<int> (floor (z * inverse_leaf_size_[2]))));
+ inline Eigen::Vector3i
+ getGridCoordinates (float x, float y, float z) const
+ {
+ return (Eigen::Vector3i (static_cast<int> (floor (x * inverse_leaf_size_[0])),
+ static_cast<int> (floor (y * inverse_leaf_size_[1])),
+ static_cast<int> (floor (z * inverse_leaf_size_[2]))));
}
/** \brief Returns the index in the downsampled cloud corresponding to a given set of coordinates.
* \param[in] ijk the coordinates (i,j,k) in the grid (-1 if empty)
*/
- inline int
- getCentroidIndexAt (const Eigen::Vector3i &ijk)
+ inline int
+ getCentroidIndexAt (const Eigen::Vector3i &ijk) const
{
int idx = ((Eigen::Vector4i() << ijk, 0).finished() - min_b_).dot (divb_mul_);
if (idx < 0 || idx >= static_cast<int> (leaf_layout_.size ())) // this checks also if leaf_layout_.size () == 0 i.e. everything was computed as needed
/** \brief Get the name of the field used for filtering. */
inline std::string const
- getFilterFieldName ()
+ getFilterFieldName () const
{
return (filter_field_name_);
}
filter_limit_max_ = limit_max;
}
- /** \brief Get the field filter limits (min/max) set by the user. The default values are -FLT_MAX, FLT_MAX.
+ /** \brief Get the field filter limits (min/max) set by the user. The default values are -FLT_MAX, FLT_MAX.
* \param[out] limit_min the minimum allowed field value
* \param[out] limit_max the maximum allowed field value
*/
inline void
- getFilterLimits (double &limit_min, double &limit_max)
+ getFilterLimits (double &limit_min, double &limit_max) const
{
limit_min = filter_limit_min_;
limit_max = filter_limit_max_;
filter_limit_negative_ = limit_negative;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \param[out] limit_negative true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline void
- getFilterLimitsNegative (bool &limit_negative)
+ getFilterLimitsNegative (bool &limit_negative) const
{
limit_negative = filter_limit_negative_;
}
- /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
+ /** \brief Get whether the data outside the interval (min/max) is to be returned (true) or inside (false).
* \return true if data \b outside the interval [min; max] is to be returned, false otherwise
*/
inline bool
- getFilterLimitsNegative ()
+ getFilterLimitsNegative () const
{
return (filter_limit_negative_);
}
/** \brief The size of a leaf. */
Eigen::Vector4f leaf_size_;
- /** \brief Internal leaf sizes stored as 1/leaf_size_ for efficiency reasons. */
+ /** \brief Internal leaf sizes stored as 1/leaf_size_ for efficiency reasons. */
Eigen::Array4f inverse_leaf_size_;
/** \brief Set to true if all fields need to be downsampled, or false if just XYZ. */
bool downsample_all_data_;
/** \brief Set to true if leaf layout information needs to be saved in \a
- * leaf_layout.
+ * leaf_layout.
*/
bool save_leaf_layout_;
/** \brief The leaf layout information for fast access to cells relative
- * to current position
+ * to current position
*/
std::vector<int> leaf_layout_;
/** \brief The minimum and maximum bin coordinates, the number of
- * divisions, and the division multiplier.
+ * divisions, and the division multiplier.
*/
Eigen::Vector4i min_b_, max_b_, div_b_, divb_mul_;
/** \brief Downsample a Point Cloud using a voxelized grid approach
* \param[out] output the resultant point cloud
*/
- void
+ void
applyFilter (PCLPointCloud2 &output);
};
}
}
- /** \brief Get the voxels surrounding point p, not including the voxel contating point p.
+ /** \brief Get the voxels surrounding point p, not including the voxel containing point p.
* \note Only voxels containing a sufficient number of points are used (slower than radius search in practice).
* \param[in] reference_point the point to get the leaf structure at
* \param[out] neighbors
/** \brief Flag to determine if voxel structure is searchable. */
bool searchable_;
- /** \brief Minimum points contained with in a voxel to allow it to be useable. */
+ /** \brief Minimum points contained with in a voxel to allow it to be usable. */
int min_points_per_voxel_;
/** \brief Minimum allowable ratio between eigenvalues to prevent singular covariance matrices. */
const Eigen::Vector3i& in_target_voxel);
/** \brief Computes the voxel coordinates (i, j, k) of all occluded
- * voxels in the voxel gird.
+ * voxels in the voxel grid.
* \param[out] occluded_voxels the coordinates (i, j, k) of all occluded voxels
* \return 0 upon success and -1 if an error occurs
*/
#include <pcl/point_types.h>
#ifdef PCL_ONLY_CORE_POINT_TYPES
- PCL_INSTANTIATE(ExtractIndices, (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::Normal)(pcl::PointXYZRGBNormal))
+ PCL_INSTANTIATE(ExtractIndices, (pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::Normal)(pcl::PointNormal)(pcl::PointXYZRGBNormal))
#else
PCL_INSTANTIATE(ExtractIndices, PCL_POINT_TYPES)
#endif
{
// Copy the header (and thus the frame_id) + allocate enough space for points
output.height = 1; // filtering breaks the organized structure
- // Because we're doing explit checks for isfinite, is_dense = true
+ // Because we're doing explicit checks for isfinite, is_dense = true
output.is_dense = true;
}
output.row_step = input_->row_step;
#include <pcl/filters/impl/statistical_outlier_removal.hpp>
#include <pcl/conversions.h>
+using namespace std;
+
///////////////////////////////////////////////////////////////////////////////////////////
void
pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (PCLPointCloud2 &output)
{
- output.is_dense = true;
// If fields x/y/z are not present, we cannot filter
if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
{
output.data.clear ();
return;
}
+
+ double mean;
+ double variance;
+ double stddev;
+ vector<float> distances;
+ generateStatistics (mean, variance, stddev, distances);
+ double const distance_threshold = mean + std_mul_ * stddev; // a distance that is bigger than this signals an outlier
+
+ // Copy the common fields
+ output.is_dense = input_->is_dense;
+ output.is_bigendian = input_->is_bigendian;
+ output.point_step = input_->point_step;
+ if (keep_organized_)
+ {
+ output.width = input_->width;
+ output.height = input_->height;
+ output.data.resize (input_->data.size ());
+ }
+ else
+ {
+ output.height = 1;
+ output.data.resize (indices_->size () * input_->point_step); // reserve enough space
+ }
+
+ removed_indices_->resize (input_->data.size ());
+
+ // Build a new cloud by neglecting outliers
+ int nr_p = 0;
+ int nr_removed_p = 0;
+ bool remove_point = false;
+ for (int cp = 0; cp < static_cast<int> (indices_->size ()); ++cp)
+ {
+ if (negative_)
+ remove_point = (distances[cp] <= distance_threshold);
+ else
+ remove_point = (distances[cp] > distance_threshold);
+
+ if (remove_point)
+ {
+ if (extract_removed_indices_)
+ (*removed_indices_)[nr_removed_p++] = cp;
+
+ if (keep_organized_)
+ {
+ /* Set the current point to NaN. */
+ *(reinterpret_cast<float*>(&output.data[nr_p * output.point_step])+0) = std::numeric_limits<float>::quiet_NaN();
+ *(reinterpret_cast<float*>(&output.data[nr_p * output.point_step])+1) = std::numeric_limits<float>::quiet_NaN();
+ *(reinterpret_cast<float*>(&output.data[nr_p * output.point_step])+2) = std::numeric_limits<float>::quiet_NaN();
+ nr_p++;
+ output.is_dense = false;
+ }
+ else
+ continue;
+ }
+ else
+ {
+ memcpy (&output.data[nr_p * output.point_step], &input_->data[(*indices_)[cp] * output.point_step],
+ output.point_step);
+ nr_p++;
+ }
+ }
+
+ if (!keep_organized_)
+ {
+ output.width = nr_p;
+ output.data.resize (output.width * output.point_step);
+ }
+ output.row_step = output.point_step * output.width;
+
+ removed_indices_->resize (nr_removed_p);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+void
+pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::applyFilter (vector<int>& indices)
+{
+ // If fields x/y/z are not present, we cannot filter
+ if (x_idx_ == -1 || y_idx_ == -1 || z_idx_ == -1)
+ {
+ PCL_ERROR ("[pcl::%s::applyFilter] Input dataset doesn't have x-y-z coordinates!\n", getClassName ().c_str ());
+ indices.clear();
+ return;
+ }
+
+ if (std_mul_ == 0.0)
+ {
+ PCL_ERROR ("[pcl::%s::applyFilter] Standard deviation multipler not set!\n", getClassName ().c_str ());
+ indices.clear();
+ return;
+ }
+
+ double mean;
+ double variance;
+ double stddev;
+ vector<float> distances;
+ generateStatistics(mean, variance, stddev, distances);
+ double const distance_threshold = mean + std_mul_ * stddev; // a distance that is bigger than this signals an outlier
+
+ // Second pass: Classify the points on the computed distance threshold
+ size_t nr_p = 0, nr_removed_p = 0;
+ for (size_t cp = 0; cp < indices_->size (); ++cp)
+ {
+ // Points having a too high average distance are outliers and are passed to removed indices
+ // Unless negative was set, then it's the opposite condition
+ if ((!negative_ && distances[cp] > distance_threshold) || (negative_ && distances[cp] <= distance_threshold))
+ {
+ if (extract_removed_indices_)
+ (*removed_indices_)[nr_removed_p++] = (*indices_)[cp];
+ continue;
+ }
+
+ // Otherwise it was a normal point for output (inlier)
+ indices[nr_p++] = (*indices_)[cp];
+ }
+
+ // Resize the output arrays
+ indices.resize (nr_p);
+ removed_indices_->resize (nr_p);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+void
+pcl::StatisticalOutlierRemoval<pcl::PCLPointCloud2>::generateStatistics (double& mean,
+ double& variance,
+ double& stddev,
+ std::vector<float>& distances)
+{
// Send the input dataset to the spatial locator
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::fromPCLPointCloud2 (*input_, *cloud);
std::vector<int> nn_indices (mean_k_);
std::vector<float> nn_dists (mean_k_);
- std::vector<float> distances (indices_->size ());
+ distances.resize (indices_->size ());
int valid_distances = 0;
// Go over all the points and calculate the mean or smallest distance
for (size_t cp = 0; cp < indices_->size (); ++cp)
sum += distances[i];
sq_sum += distances[i] * distances[i];
}
- double mean = sum / static_cast<double>(valid_distances);
- double variance = (sq_sum - sum * sum / static_cast<double>(valid_distances)) / (static_cast<double>(valid_distances) - 1);
- double stddev = sqrt (variance);
- //getMeanStd (distances, mean, stddev);
- double distance_threshold = mean + std_mul_ * stddev; // a distance that is bigger than this signals an outlier
-
- // Copy the common fields
- output.is_bigendian = input_->is_bigendian;
- output.point_step = input_->point_step;
- output.height = 1;
-
- output.data.resize (indices_->size () * input_->point_step); // reserve enough space
- removed_indices_->resize (input_->data.size ());
-
- // Build a new cloud by neglecting outliers
- int nr_p = 0;
- int nr_removed_p = 0;
- for (int cp = 0; cp < static_cast<int> (indices_->size ()); ++cp)
- {
- if (negative_)
- {
- if (distances[cp] <= distance_threshold)
- {
- if (extract_removed_indices_)
- {
- (*removed_indices_)[nr_removed_p] = cp;
- nr_removed_p++;
- }
- continue;
- }
- }
- else
- {
- if (distances[cp] > distance_threshold)
- {
- if (extract_removed_indices_)
- {
- (*removed_indices_)[nr_removed_p] = cp;
- nr_removed_p++;
- }
- continue;
- }
- }
-
- memcpy (&output.data[nr_p * output.point_step], &input_->data[(*indices_)[cp] * output.point_step],
- output.point_step);
- nr_p++;
- }
- output.width = nr_p;
- output.data.resize (output.width * output.point_step);
- output.row_step = output.point_step * output.width;
-
- removed_indices_->resize (nr_removed_p);
+ mean = sum / static_cast<double>(valid_distances);
+ variance = (sq_sum - sum * sum / static_cast<double>(valid_distances)) / (static_cast<double>(valid_distances) - 1);
+ stddev = sqrt (variance);
}
+
#ifndef PCL_NO_PRECOMPILE
#include <pcl/impl/instantiate.hpp>
#include <pcl/point_types.h>
typedef EdgeDataT EdgeData;
typedef FaceDataT FaceData;
- /** \brief Specifies wether the mesh is manifold or not (only non-manifold vertices can be represented). */
+ /** \brief Specifies whether the mesh is manifold or not (only non-manifold vertices can be represented). */
typedef boost::false_type IsManifold;
};
} // End namespace geometry
/** \brief Initializes with user allocated buffer. Reference counting is disabled in this case.
* \param ptr pointer to buffer
- * \param size elemens number
+ * \param size elements number
* */
DeviceArray(T *ptr, size_t size);
/** \brief Copy constructor. Just increments reference counter. */
DeviceArray(const DeviceArray& other);
- /** \brief Assigment operator. Just increments reference counter. */
+ /** \brief Assignment operator. Just increments reference counter. */
DeviceArray& operator = (const DeviceArray& other);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
- * \param size elemens number
+ * \param size elements number
* */
void create(size_t size);
/** \brief Uploads data to internal buffer in GPU memory. It calls create() inside to ensure that intenal buffer size is enough.
* \param host_ptr pointer to buffer to upload
- * \param size elemens number
+ * \param size elements number
* */
void upload(const T *host_ptr, size_t size);
/** \brief Copy constructor. Just increments reference counter. */
DeviceArray2D(const DeviceArray2D& other);
- /** \brief Assigment operator. Just increments reference counter. */
+ /** \brief Assignment operator. Just increments reference counter. */
DeviceArray2D& operator = (const DeviceArray2D& other);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
* */
void upload(const void *host_ptr, size_t host_step, int rows, int cols);
- /** \brief Downloads data from internal buffer to CPU memory. User is resposible for correct host buffer size.
+ /** \brief Downloads data from internal buffer to CPU memory. User is responsible for correct host buffer size.
* \param host_ptr pointer to host buffer to download
* \param host_step stride between two consecutive rows in bytes for host buffer
* */
/** \brief Copy constructor. Just increments reference counter. */
DeviceMemory(const DeviceMemory& other_arg);
- /** \brief Assigment operator. Just increments reference counter. */
+ /** \brief Assignment operator. Just increments reference counter. */
DeviceMemory& operator=(const DeviceMemory& other_arg);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
/** \brief Copy constructor. Just increments reference counter. */
DeviceMemory2D(const DeviceMemory2D& other_arg);
- /** \brief Assigment operator. Just increments reference counter. */
+ /** \brief Assignment operator. Just increments reference counter. */
DeviceMemory2D& operator=(const DeviceMemory2D& other_arg);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was created before the function recreates it with new size. If new and old sizes are equal it does nothing.
* */
void upload(const void *host_ptr_arg, size_t host_step_arg, int rows_arg, int colsBytes_arg);
- /** \brief Downloads data from internal buffer to CPU memory. User is resposible for correct host buffer size.
+ /** \brief Downloads data from internal buffer to CPU memory. User is responsible for correct host buffer size.
* \param host_ptr_arg pointer to host buffer to download
* \param host_step_arg stride between two consecutive rows in bytes for host buffer
* */
/** \brief Sets active device to work with. */
PCL_EXPORTS void setDevice(int device);
- /** \brief Return devuce name for gived device. */
+ /** \brief Return device name for given device. */
PCL_EXPORTS std::string getDeviceName(int device);
- /** \brief Prints infromatoin about given cuda deivce or about all deivces
- * \param device: if < 0 prints info for all devices, otherwise the function interpets is as device id.
+ /** \brief Prints information about given cuda device or about all devices
+ * \param device: if < 0 prints info for all devices, otherwise the function interprets it as device id.
*/
void PCL_EXPORTS printCudaDeviceInfo(int device = -1);
- /** \brief Prints infromatoin about given cuda deivce or about all deivces
- * \param device: if < 0 prints info for all devices, otherwise the function interpets is as device id.
+ /** \brief Prints information about given cuda device or about all devices
+ * \param device: if < 0 prints info for all devices, otherwise the function interprets it as device id.
*/
void PCL_EXPORTS printShortCudaDeviceInfo(int device = -1);
- /** \brief Returns true if pre-Fermi generaton GPU.
+ /** \brief Returns true if pre-Fermi generator GPU.
* \param device: device id to check, if < 0 checks current device.
*/
bool PCL_EXPORTS checkIfPreFermiGPU(int device = -1);
int pcl::gpu::getCudaEnabledDeviceCount() { return 0; }
void pcl::gpu::setDevice(int /*device*/) { throw_nogpu(); }
std::string pcl::gpu::getDeviceName(int /*device*/) { throw_nogpu(); }
-void pcl::gpu::printCudaDeviceInfo(int /*deivce*/){ throw_nogpu(); }
+void pcl::gpu::printCudaDeviceInfo(int /*device*/){ throw_nogpu(); }
void pcl::gpu::printShortCudaDeviceInfo(int /*device*/) { throw_nogpu(); }
#else
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct {
- int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
+ int SM; // 0xMm (hexadecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} SMtoCores;
PCL_MAKE_PKGCONFIG("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}" "${SUBSYS_DEPS}" "${EXT_DEPS}" "" "" "")
# Install include files
- PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}" ${incs} ${dev_incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}" ${incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}/device" ${dev_incs})
add_subdirectory(test)
endif()
if (roots.x >= roots.y)
swap (roots.x, roots.y);
}
- if (roots.x <= 0) // eigenval for symetric positive semi-definite matrix can not be negative! Set it to 0
+ if (roots.x <= 0) // eigenval for symmetric positive semi-definite matrix can not be negative! Set it to 0
computeRoots2 (c2, c1, roots);
}
}
if (angular)
{
//transform sum to average dividing angle/spinimage element-wize.
- const float *amgles_beg = simage_angles + FSize;
- const float *amgles_end = amgles_beg + FSize;
+ const float *angles_beg = simage_angles + FSize;
+ const float *angles_end = angles_beg + FSize;
const float *images_beg = simage_angles;
- Block::transfrom(amgles_beg, amgles_end, images_beg, output.ptr(i_input), Div12eps());
- ////Block::copy(amgles_beg, amgles_end, output.ptr(i_input));
+ Block::transform(angles_beg, angles_end, images_beg, output.ptr(i_input), Div12eps());
+ ////Block::copy(angles_beg, angles_end, output.ptr(i_input));
//Block::copy(images_beg, images_beg + FSize, output.ptr(i_input));
}
else
__syncthreads();
float sum = simage_angles[FSize];
- Block::transfrom(simage_angles, simage_angles + FSize, output.ptr(i_input), DivValIfNonZero(sum));
+ Block::transform(simage_angles, simage_angles + FSize, output.ptr(i_input), DivValIfNonZero(sum));
}
}
cudaSafeCall( cudaGetDeviceProperties(&prop, device) );
int total = static_cast<int> (indices.empty() ? points.size() : indices.size());
- int total_lenght_in_blocks = (total + VfhDevice::CTA_SIZE - 1) / VfhDevice::CTA_SIZE;
+ int total_length_in_blocks = (total + VfhDevice::CTA_SIZE - 1) / VfhDevice::CTA_SIZE;
int block = VfhDevice::CTA_SIZE;
- int grid = min(total_lenght_in_blocks, prop.multiProcessorCount * prop.maxThreadsPerMultiProcessor / VfhDevice::CTA_SIZE);
+ int grid = min(total_length_in_blocks, prop.multiProcessorCount * prop.maxThreadsPerMultiProcessor / VfhDevice::CTA_SIZE);
DeviceArray2D<float> global_buffer(grid, VfhDevice::FSize);
*/
-#ifndef PCL_GPU_FEATURES_TEST_DATA_SORUCE_HPP_
-#define PCL_GPU_FEATURES_TEST_DATA_SORUCE_HPP_
+#ifndef PCL_GPU_FEATURES_TEST_DATA_SOURCE_HPP_
+#define PCL_GPU_FEATURES_TEST_DATA_SOURCE_HPP_
#include<string>
}
}
-#endif /* PCL_GPU_FEATURES_TEST_DATA_SORUCE_HPP_ */
+#endif /* PCL_GPU_FEATURES_TEST_DATA_SOURCE_HPP_ */
*/
ColorVolume(const TsdfVolume& tsdf, int max_weight = -1);
- /** \brief Desctructor */
+ /** \brief Destructor */
~ColorVolume();
/** \brief Resets color volume to uninitialized state */
getDepthIntrinsics (float& fx, float& fy, float& cx, float& cy);
- /** \brief Sets initial camera pose relative to volume coordiante space
+ /** \brief Sets initial camera pose relative to volume coordinate space
* \param[in] pose Initial camera pose
*/
void
/** \brief Camera movement threshold. TSDF is integrated iff a camera movement metric exceedes some value. */
float integration_metric_threshold_;
- /** \brief ICP step is completelly disabled. Inly integratio now */
+ /** \brief ICP step is completely disabled. Only integration now. */
bool disable_icp_;
/** \brief Allocates all GPU internal buffers.
/** \brief Runs marching cubes triangulation.
* \param[in] tsdf
- * \param[in] triangles_buffer Buffer for triangles. Its size determines max extracted triangles. If empty, it will be allocated with default size will be used.
- * \return Array with triangles. Each 3 consequent poits belond to a single triangle. The returned array points to 'triangles_buffer' data.
+ * \param[in] triangles_buffer Buffer for triangles. Its size determines max extracted triangles. If empty, it will be allocated with default size to be used.
+ * \return Array with triangles. Each 3 consequent points belong to a single triangle. The returned array points to 'triangles_buffer' data.
*/
DeviceArray<PointType>
run(const TsdfVolume& tsdf, DeviceArray<PointType>& triangles_buffer);
/** \brief Edge table for marching cubes */
DeviceArray<int> edgeTable_;
- /** \brief Number of vertextes table for marching cubes */
+ /** \brief Number of vertices table for marching cubes */
DeviceArray<int> numVertsTable_;
/** \brief Triangles table for marching cubes */
DeviceArray<int> triTable_;
- /** \brief Temporary buffer used by marching cubes (first row stores occuped voxes id, second number of vetexes, third poits offsets */
+ /** \brief Temporary buffer used by marching cubes (first row stores occupied voxel id, second number of vertices, third points offsets */
DeviceArray2D<int> occupied_voxels_buffer_;
};
}
void
setIntrinsics(float fx = 525.f, float fy = 525.f, float cx = -1, float cy = -1);
- /** \brief Runs raycasting algorithm from given camera pose. It writes results to internal fiels.
+ /** \brief Runs raycasting algorithm from given camera pose. It writes results to internal files.
* \param[in] volume tsdf volume container
* \param[in] camera_pose camera pose
*/
*/
TsdfVolume(const Eigen::Vector3i& resolution);
- /** \brief Sets Tsdf volume size for each dimention
+ /** \brief Sets Tsdf volume size for each dimension
* \param[in] size size of tsdf volume in meters
*/
void
void
setTsdfTruncDist (float distance);
- /** \brief Returns tsdf volume container that point to data in GPU memroy */
+ /** \brief Returns tsdf volume container that point to data in GPU memory */
DeviceArray2D<int>
data() const;
__shared__ int cta_buffer[CTA_SIZE];
#endif
-#if __CUDA_ARCH__ >= 120
+#if CUDA_VERSION >= 9000
+ if (__all_sync (__activemask (), x >= VOLUME_X)
+ || __all_sync (__activemask (), y >= VOLUME_Y))
+ return;
+#elif __CUDA_ARCH__ >= 120
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#else
} /* if (W != 0 && F != 1.f) */
} /* if (x < VOLUME_X && y < VOLUME_Y) */
-
-#if __CUDA_ARCH__ >= 200
+#if CUDA_VERSION >= 9000
+ int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ + __popc (__ballot_sync (__activemask (), local_count > 1))
+ + __popc (__ballot_sync (__activemask (), local_count > 2));
+#elif __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
if (x < cols && y < rows)
{
- //vetexes
+ //vertices
float3 vsrc, vdst = make_float3 (qnan, qnan, qnan);
vsrc.x = vmap_src.ptr (y)[x];
#endif
-#if __CUDA_ARCH__ >= 120
+#if CUDA_VERSION >= 9000
+ if (__all_sync (__activemask (), x >= VOLUME_X)
+ || __all_sync (__activemask (), y >= VOLUME_Y))
+ return;
+#elif __CUDA_ARCH__ >= 200
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#else
// read number of vertices from texture
numVerts = (cubeindex == 0 || cubeindex == 255) ? 0 : tex1Dfetch (numVertsTex, cubeindex);
}
-#if __CUDA_ARCH__ >= 200
+#if CUDA_VERSION >= 9000
+ int total = __popc (__ballot_sync (__activemask (), numVerts > 0));
+#elif __CUDA_ARCH__ >= 200
int total = __popc (__ballot (numVerts > 0));
#else
int total = __popc (Emulation::Ballot(numVerts > 0, cta_buffer));
}
int old_global_voxels_count = warps_buffer[warp_id];
-#if __CUDA_ARCH__ >= 200
+#if CUDA_VERSION >= 9000
+ int offs = Warp::binaryExclScan (__ballot_sync (__activemask (), numVerts > 0));
+#elif __CUDA_ARCH__ >= 200
int offs = Warp::binaryExclScan (__ballot (numVerts > 0));
#else
int offs = Warp::binaryExclScan(Emulation::Ballot(numVerts > 0, cta_buffer));
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
- //tranform to curr cam coo space
+ //transform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
#ifndef PCL_GPU_KINFU_CUDA_UTILS_HPP_
#define PCL_GPU_KINFU_CUDA_UTILS_HPP_
+#include <cuda.h>
namespace pcl
{
if (roots.x >= roots.y)
swap (roots.x, roots.y);
}
- if (roots.x <= 0) // eigenval for symetric positive semi-definite matrix can not be negative! Set it to 0
+ if (roots.x <= 0) // eigenval for symmetric positive semi-definite matrix can not be negative! Set it to 0
computeRoots2 (c2, c1, roots);
}
}
static __forceinline__ __device__ int
Ballot(int predicate, volatile int* cta_buffer)
{
-#if __CUDA_ARCH__ >= 200
+#if CUDA_VERSION >= 9000
+ (void)cta_buffer;
+ return __ballot_sync (__activemask (), predicate);
+#elif __CUDA_ARCH__ >= 200
(void)cta_buffer;
- return __ballot(predicate);
+ return __ballot(predicate);
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
static __forceinline__ __device__ bool
All(int predicate, volatile int* cta_buffer)
{
-#if __CUDA_ARCH__ >= 200
+#if CUDA_VERSION >= 9000
+ (void)cta_buffer;
+ return __all_sync (__activemask (), predicate);
+#elif __CUDA_ARCH__ >= 200
(void)cta_buffer;
return __all(predicate);
#else
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Maps
- /** \brief Perfoms bilateral filtering of disparity map
- * \param[in] src soruce map
+ /** \brief Performs bilateral filtering of disparity map
+ * \param[in] src source map
* \param[out] dst output map
*/
void
void
computeNormalsEigen (const MapArr& vmap, MapArr& nmap);
- /** \brief Performs affine tranform of vertex and normal maps
+ /** \brief Performs affine transform of vertex and normal maps
* \param[in] vmap_src source vertex map
* \param[in] nmap_src source vertex map
* \param[in] Rmat Rotation mat
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// ICP
- /** \brief (now it's exra code) Computes corespondances map
+ /** \brief (now it's extra code) Computes corespondances map
* \param[in] vmap_g_curr current vertex map in global coo space
* \param[in] nmap_g_curr current normals map in global coo space
* \param[in] Rprev_inv inverse camera rotation at previous pose
findCoresp (const MapArr& vmap_g_curr, const MapArr& nmap_g_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr,
const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, PtrStepSz<short2> coresp);
- /** \brief (now it's exra code) Computation Ax=b for ICP iteration
+ /** \brief (now it's extra code) Computation Ax=b for ICP iteration
* \param[in] v_dst destination vertex map (previous frame cloud)
* \param[in] n_dst destination normal map (previous frame normals)
* \param[in] v_src source normal map (current frame cloud)
const PtrStep<short2>& volume, MapArr& vmap, MapArr& nmap);
/** \brief Renders 3D image of the scene
- * \param[in] vmap vetex map
+ * \param[in] vmap vertex map
* \param[in] nmap normals map
* \param[in] light poase of light source
* \param[out] dst buffer where image is generated
/** \brief Perform point cloud extraction from tsdf volume
* \param[in] volume tsdf volume
* \param[in] volume_size size of the volume
- * \param[out] output buffer large enought to store point cloud
+ * \param[out] output buffer large enough to store point cloud
* \return number of point stored to passed buffer
*/
PCL_EXPORTS size_t
extractCloud (const PtrStep<short2>& volume, const float3& volume_size, PtrSz<PointType> output);
- /** \brief Performs normals computation for given poins using tsdf volume
+ /** \brief Performs normals computation for given points using tsdf volume
* \param[in] volume tsdf volume
* \param[in] volume_size volume size
* \param[in] input points where normals are computed
void
unbindTextures();
- /** \brief Scans tsdf volume and retrieves occuped voxes
+ /** \brief Scans tsdf volume and retrieves occupied voxels
* \param[in] volume tsdf volume
- * \param[out] occupied_voxels buffer for occuped voxels. The function fulfills first row with voxel ids and second row with number of vertextes.
+ * \param[out] occupied_voxels buffer for occupied voxels. The function fulfills first row with voxel ids and second row with number of vertices.
* \return number of voxels in the buffer
*/
int
getOccupiedVoxels(const PtrStep<short2>& volume, DeviceArray2D<int>& occupied_voxels);
/** \brief Computes total number of vertexes for all voxels and offsets of vertexes in final triangle array
- * \param[out] occupied_voxels buffer with occuped voxels. The function fulfills 3nd only with offsets
+ * \param[out] occupied_voxels buffer with occupied voxels. The function fulfills 3nd only with offsets
* \return total number of vertexes
*/
int
/** \brief Generates final triangle array
* \param[in] volume tsdf volume
- * \param[in] occupied_voxels occuped voxel ids (first row), number of vertexes(second row), offsets(third row).
+ * \param[in] occupied_voxels occupied voxel ids (first row), number of vertexes(second row), offsets(third row).
* \param[in] volume_size volume size in meters
* \param[out] output triangle array
*/
coresps_[i].create (pyr_rows, pyr_cols);
}
depthRawScaled_.create (rows, cols);
- // see estimate tranform for the magic numbers
+ // see estimate transform for the magic numbers
gbuf_.create (27, 20*60);
sumbuf_.create (27);
}
}
else
{
- Rcurr = Rprev; // tranform to global coo for ith camera pose
+ Rcurr = Rprev; // transform to global coo for ith camera pose
tcurr = tprev;
}
{
}
}
}
- //save tranform
+ //save transform
rmats_.push_back (Rcurr);
tvecs_.push_back (tcurr);
}
void
capture (Eigen::Isometry3d pose_in,unsigned short* depth_buffer_mm,const uint8_t* color_buffer)//, string point_cloud_fname)
{
- // No reference image - but this is kept for compatability with range_test_v2:
+ // No reference image - but this is kept for compatibility with range_test_v2:
float* reference = new float[range_likelihood_->getRowHeight() * range_likelihood_->getColWidth()];
//const float* depth_buffer = range_likelihood_->getDepthBuffer();
// Copy one image from our last as a reference.
PtrStepSz<const KinfuTracker::PixelRGB> rgb24_sim = PtrStepSz<const KinfuTracker::PixelRGB>(height, width, color_buf_, width);
tic_toc.push_back (getTime ());
- if (1==0){ // live capture - probably doesnt work anymore, left in here for comparison:
+ if (1==0){ // live capture - probably doesn't work anymore, left in here for comparison:
bool has_frame = evaluation_ptr_ ? evaluation_ptr_->grab(i, depth) : capture_.grab (depth, rgb24);
if (!has_frame)
{
cout << " --registration, -r : enable registration mode" << endl;
cout << " --integrate-colors, -ic : enable color integration mode ( allows to get cloud with colors )" << endl;
cout << " -volume_suze <size_in_meters> : define integration volume size" << endl;
- cout << " -dev <deivce>, -oni <oni_file> : select depth source. Default will be selected if not specified" << endl;
+ cout << " -dev <device>, -oni <oni_file> : select depth source. Default will be selected if not specified" << endl;
cout << "";
cout << " For RGBD benchmark (Requires OpenCV):" << endl;
cout << " -eval <eval_folder> [-match_file <associations_file_in_the_folder>]" << endl;
// parse input cloud file
pc::parse_argument (argc, argv, "-cf", cloud_file);
- // pase output volume file
+ // parse output volume file
pc::parse_argument (argc, argv, "-vf", volume_file);
// parse options to extract and save cloud from volume
////////////////////////////////////////////////////////////////////////////////////////
// Functionality
- /** \brief Converts volume to cloud of TSDF values*/
+ /** \brief Converts volume to cloud of TSDF values
+ * \param[ou] cloud - the output point cloud
+ * \param[in] step - the decimation step to use
+ */
void
- convertToTsdfCloud (pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud) const;
+ convertToTsdfCloud (pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud,
+ const unsigned step = 2) const;
/** \brief Converts the volume to a surface representation via a point cloud */
// void
// template <typename PointT> void
// createFromCloud (const typename pcl::PointCloud<PointT>::ConstPtr &cloud, const Intr &intr);
- /** \brief Retunrs the 3D voxel coordinate */
+ /** \brief Returns the 3D voxel coordinate */
template <typename PointT> void
getVoxelCoord (const PointT &point, Eigen::Vector3i &voxel_coord) const;
- /** \brief Retunrs the 3D voxel coordinate and point offset wrt. to the voxel center (in mm) */
+ /** \brief Returns the 3D voxel coordinate and point offset wrt. to the voxel center (in mm) */
template <typename PointT> void
getVoxelCoordAndOffset (const PointT &point, Eigen::Vector3i &voxel_coord, Eigen::Vector3f &offset) const;
template <typename VoxelT, typename WeightT> void
-pcl::TSDFVolume<VoxelT, WeightT>::convertToTsdfCloud (pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud) const
+pcl::TSDFVolume<VoxelT, WeightT>::convertToTsdfCloud (pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud,
+ const unsigned step) const
{
int sx = header_.resolution(0);
int sy = header_.resolution(1);
int sz = header_.resolution(2);
- const int step = 2;
const int cloud_size = header_.getVolumeSize() / (step*step*step);
cloud->clear();
}
-/** \brief Retunrs the 3D voxel coordinate and point offset wrt. to the voxel center (in mm) */
+/** \brief Returns the 3D voxel coordinate and point offset wrt. to the voxel center (in mm) */
template <typename VoxelT, typename WeightT> template <typename PointT> void
pcl::TSDFVolume<VoxelT, WeightT>::getVoxelCoordAndOffset (const PointT &point,
Eigen::Vector3i &coord, Eigen::Vector3f &offset) const
*/
ColorVolume(const TsdfVolume& tsdf, int max_weight = -1);
- /** \brief Desctructor */
+ /** \brief Destructor */
~ColorVolume();
/** \brief Resets color volume to uninitialized state */
}
/** \brief Check if shifting needs to be performed, returns true if so.
- Shifting is considered needed if the target point is farther than distance_treshold_.
+ Shifting is considered needed if the target point is farther than distance_threshold_.
The target point is located at distance_camera_point on the local Z axis of the camera.
* \param[in] volume pointer to the TSDFVolume living in GPU
* \param[in] cam_pose global pose of the camera in the world
}
else
{
- PCL_INFO ("Extracted cube was empty, skiping this one.\n");
+ PCL_INFO ("Extracted cube was empty, skipping this one.\n");
}
origin.z += cubeSide * step_increment;
}
void
setDepthIntrinsics (float fx, float fy, float cx = -1, float cy = -1);
- /** \brief Sets initial camera pose relative to volume coordiante space
+ /** \brief Sets initial camera pose relative to volume coordinate space
* \param[in] pose Initial camera pose
*/
void
pcl::device::kinfuLS::Mat33& transform_out, float3& translation_out);
/** \brief helper function that pre-process a raw detph map the kinect fusion algorithm.
- * The raw depth map is first blured, eventually truncated, and downsampled for each pyramid level.
+ * The raw depth map is first blurred, eventually truncated, and downsampled for each pyramid level.
* Then, vertex and normal maps are computed for each pyramid level.
* \param[in] depth_raw the raw depth map to process
* \param[in] cam_intrinsics intrinsics of the camera used to acquire the depth map
/** \brief Runs marching cubes triangulation.
* \param[in] tsdf
* \param[in] triangles_buffer Buffer for triangles. Its size determines max extracted triangles. If empty, it will be allocated with default size will be used.
- * \return Array with triangles. Each 3 consequent poits belond to a single triangle. The returned array points to 'triangles_buffer' data.
+ * \return Array with triangles. Each 3 consequent points belong to a single triangle. The returned array points to 'triangles_buffer' data.
*/
DeviceArray<PointType>
run(const TsdfVolume& tsdf, DeviceArray<PointType>& triangles_buffer);
/** \brief Edge table for marching cubes */
DeviceArray<int> edgeTable_;
- /** \brief Number of vertextes table for marching cubes */
+ /** \brief Number of vertices table for marching cubes */
DeviceArray<int> numVertsTable_;
/** \brief Triangles table for marching cubes */
DeviceArray<int> triTable_;
- /** \brief Temporary buffer used by marching cubes (first row stores occuped voxes id, second number of vetexes, third poits offsets */
+ /** \brief Temporary buffer used by marching cubes (first row stores occupied voxel id, second number of vertices, third points offsets */
DeviceArray2D<int> occupied_voxels_buffer_;
};
}
void
setIntrinsics(float fx = 525.f, float fy = 525.f, float cx = -1, float cy = -1);
- /** \brief Runs raycasting algorithm from given camera pose. It writes results to internal fiels.
+ /** \brief Runs raycasting algorithm from given camera pose. It writes results to internal files.
* \param[in] volume tsdf volume container
* \param[in] camera_pose camera pose
* \param buffer
*/
TsdfVolume (const Eigen::Vector3i& resolution);
- /** \brief Sets Tsdf volume size for each dimention
+ /** \brief Sets Tsdf volume size for each dimension
* \param[in] size size of tsdf volume in meters
*/
void
void
setTsdfTruncDist (float distance);
- /** \brief Returns tsdf volume container that point to data in GPU memroy */
+ /** \brief Returns tsdf volume container that point to data in GPU memory */
DeviceArray2D<int>
data () const;
return header_.getVolumeSize ();
}
- /** \brief Converts volume stored on host to cloud of TSDF values*/
+ /** \brief Converts volume stored on host to cloud of TSDF values
+ * \param[ou] cloud - the output point cloud
+ * \param[in] step - the decimation step to use
+ */
void
- convertToTsdfCloud (pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud) const;
+ convertToTsdfCloud (pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud,
+ const unsigned step = 2) const;
/** \brief Returns the voxel grid resolution */
inline const Eigen::Vector3i &
/** \brief WorldModel maintains a 3D point cloud that can be queried and updated via helper functions.\n
* The world is represented as a point cloud.\n
* When new points are added to the world, we replace old ones by the newest ones.
- * This is acheived by setting old points to nan (for speed)
+ * This is achieved by setting old points to nan (for speed)
* \author Raphael Favier
*/
template <typename PointT>
void addSlice (const PointCloudPtr new_cloud);
- /** \brief Retreive existing data from the world model, after a shift
+ /** \brief Retrieve existing data from the world model, after a shift
* \param[in] previous_origin_x global origin of the cube on X axis, before the shift
* \param[in] previous_origin_y global origin of the cube on Y axis, before the shift
* \param[in] previous_origin_z global origin of the cube on Z axis, before the shift
* \param[in] size the size of a 3D cube.
* \param[out] cubes a vector of point clouds representing each cube (in their original world coordinates).
* \param[out] transforms a vector containing the xyz position of each cube in world coordinates.
- * \param[in] overlap optional overlap (in percent) between each cube (usefull to create overlapped meshes).
+ * \param[in] overlap optional overlap (in percent) between each cube (useful to create overlapped meshes).
*/
void getWorldAsCubes (double size, std::vector<PointCloudPtr> &cubes, std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &transforms, double overlap = 0.0);
__shared__ int cta_buffer[CTA_SIZE];
#endif
- #if __CUDA_ARCH__ >= 120
+ #if CUDA_VERSION >= 9000
+ if (__all_sync (__activemask (), x >= VOLUME_X)
+ || __all_sync (__activemask (), y >= VOLUME_Y))
+ return;
+ #elif __CUDA_ARCH__ >= 120
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#else
}/* if (x < VOLUME_X && y < VOLUME_Y) */
- #if __CUDA_ARCH__ >= 200
+ #if CUDA_VERSION >= 9000
+ int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ + __popc (__ballot_sync (__activemask (), local_count > 1))
+ + __popc (__ballot_sync (__activemask (), local_count > 2));
+ #elif __CUDA_ARCH__ >= 200
//not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
// local_count counts the number of zero crossing for the current thread. Now we need to merge this knowledge with the other threads
// not we fulfilled points array at current iteration
- int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
-
+ #if CUDA_VERSION >= 9000
+ int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ + __popc (__ballot_sync (__activemask (), local_count > 1))
+ + __popc (__ballot_sync (__activemask (), local_count > 2));
+ #else
+ int total_warp = __popc (__ballot (local_count > 0))
+ + __popc (__ballot (local_count > 1))
+ + __popc (__ballot (local_count > 2));
+ #endif
if (total_warp > 0) ///more than 0 zero-crossings
{
if (x < cols && y < rows)
{
- //vetexes
+ //vertices
float3 vsrc, vdst = make_float3 (qnan, qnan, qnan);
vsrc.x = vmap_src.ptr (y)[x];
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
+ #if CUDA_VERSION >= 9000
+ if (__all_sync (__activemask (), x >= VOLUME_X)
+ || __all_sync (__activemask (), y >= VOLUME_Y))
+ return;
+ #else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
+ #endif
int ftid = Block::flattenedThreadId ();
int warp_id = Warp::id();
numVerts = (cubeindex == 0 || cubeindex == 255) ? 0 : tex1Dfetch (numVertsTex, cubeindex);
}
+ #if CUDA_VERSION >= 9000
+ int total = __popc (__ballot_sync (__activemask (), numVerts > 0));
+ #else
int total = __popc (__ballot (numVerts > 0));
+ #endif
if (total == 0)
continue;
}
int old_global_voxels_count = warps_buffer[warp_id];
+ #if CUDA_VERSION >= 9000
+ int offs = Warp::binaryExclScan (__ballot_sync (__activemask (), numVerts > 0));
+ #else
int offs = Warp::binaryExclScan (__ballot (numVerts > 0));
+ #endif
if (old_global_voxels_count + offs < max_size && numVerts > 0)
{
#pragma unroll
for(int z = 0; z < buffer.voxels_size.z; ++z, pos+=z_step)
{
- ///If we went outside of the memory, make sure we go back to the begining of it
+ ///If we went outside of the memory, make sure we go back to the beginning of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
#pragma unroll
for(int z = 0; z < nbSteps; ++z, pos+=z_step)
{
- ///If we went outside of the memory, make sure we go back to the begining of it
+ ///If we went outside of the memory, make sure we go back to the beginning of it
if(pos > buffer.tsdf_memory_end)
pos = pos - size;
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
- //tranform to curr cam coo space
+ //transform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
#ifndef PCL_GPU_KINFU_CUDA_UTILS_HPP_
#define PCL_GPU_KINFU_CUDA_UTILS_HPP_
-//#include <boost/graph/buffer_concepts.hpp>
+#include <cuda.h>
namespace pcl
{
if (roots.x >= roots.y)
swap (roots.x, roots.y);
}
- if (roots.x <= 0) // eigenval for symetric positive semi-definite matrix can not be negative! Set it to 0
+ if (roots.x <= 0) // eigenval for symmetric positive semi-definite matrix can not be negative! Set it to 0
computeRoots2 (c2, c1, roots);
}
}
static __forceinline__ __device__ int
Ballot(int predicate, volatile int* cta_buffer)
{
- #if __CUDA_ARCH__ >= 200
+ #if CUDA_VERSION >= 9000
(void)cta_buffer;
- return __ballot(predicate);
+ return __ballot_sync (__activemask (), predicate);
+ #elif __CUDA_ARCH__ >= 200
+ (void)cta_buffer;
+ return __ballot (predicate);
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
static __forceinline__ __device__ bool
All(int predicate, volatile int* cta_buffer)
{
- #if __CUDA_ARCH__ >= 200
+ #if CUDA_VERSION >= 9000
+ (void)cta_buffer;
+ return __all_sync (__activemask (), predicate);
+ #elif __CUDA_ARCH__ >= 200
(void)cta_buffer;
- return __all(predicate);
+ return __all (predicate);
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = predicate ? 1 : 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Maps
- /** \brief Perfoms bilateral filtering of disparity map
- * \param[in] src soruce map
+ /** \brief Performs bilateral filtering of disparity map
+ * \param[in] src source map
* \param[out] dst output map
*/
void
void
computeNormalsEigen (const MapArr& vmap, MapArr& nmap);
- /** \brief Performs affine tranform of vertex and normal maps
+ /** \brief Performs affine transform of vertex and normal maps
* \param[in] vmap_src source vertex map
* \param[in] nmap_src source vertex map
* \param[in] Rmat Rotation mat
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// ICP
- /** \brief (now it's exra code) Computes corespondances map
+ /** \brief (now it's extra code) Computes corespondances map
* \param[in] vmap_g_curr current vertex map in global coo space
* \param[in] nmap_g_curr current normals map in global coo space
* \param[in] Rprev_inv inverse camera rotation at previous pose
findCoresp (const MapArr& vmap_g_curr, const MapArr& nmap_g_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr,
const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, PtrStepSz<short2> coresp);
- /** \brief (now it's exra code) Computation Ax=b for ICP iteration
+ /** \brief (now it's extra code) Computation Ax=b for ICP iteration
* \param[in] v_dst destination vertex map (previous frame cloud)
* \param[in] n_dst destination normal map (previous frame normals)
* \param[in] v_src source normal map (current frame cloud)
/** \brief Perform point cloud extraction from tsdf volume
* \param[in] volume tsdf volume
* \param[in] volume_size size of the volume
- * \param[out] output buffer large enought to store point cloud
+ * \param[out] output buffer large enough to store point cloud
* \return number of point stored to passed buffer
*/
PCL_EXPORTS size_t
* \param[in] shiftX Offset in indices that will be cleared from the TSDF volume. The clearing start from buffer.OriginX and stops in OriginX + shiftX
* \param[in] shiftY Offset in indices that will be cleared from the TSDF volume. The clearing start from buffer.OriginY and stops in OriginY + shiftY
* \param[in] shiftZ Offset in indices that will be cleared from the TSDF volume. The clearing start from buffer.OriginZ and stops in OriginZ + shiftZ
- * \param[out] output_xyz buffer large enought to store point cloud xyz values
- * \param[out] output_intensities buffer large enought to store point cloud intensity values
+ * \param[out] output_xyz buffer large enough to store point cloud xyz values
+ * \param[out] output_intensities buffer large enough to store point cloud intensity values
* \return number of point stored to passed buffer
*/
PCL_EXPORTS size_t
extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, const int shiftX, const int shiftY, const int shiftZ, PtrSz<PointType> output_xyz, PtrSz<float> output_intensities);
- /** \brief Performs normals computation for given poins using tsdf volume
+ /** \brief Performs normals computation for given points using tsdf volume
* \param[in] volume tsdf volume
* \param[in] volume_size volume size
* \param[in] input points where normals are computed
void
unbindTextures();
- /** \brief Scans tsdf volume and retrieves occuped voxes
+ /** \brief Scans tsdf volume and retrieves occupied voxels
* \param[in] volume tsdf volume
- * \param[out] occupied_voxels buffer for occuped voxels. The function fulfills first row with voxel ids and second row with number of vertextes.
+ * \param[out] occupied_voxels buffer for occupied voxels. The function fulfills first row with voxel ids and second row with number of vertices.
* \return number of voxels in the buffer
*/
int
getOccupiedVoxels(const PtrStep<short2>& volume, DeviceArray2D<int>& occupied_voxels);
- /** \brief Computes total number of vertexes for all voxels and offsets of vertexes in final triangle array
- * \param[out] occupied_voxels buffer with occuped voxels. The function fulfills 3nd only with offsets
- * \return total number of vertexes
+ /** \brief Computes total number of vertices for all voxels and offsets of vertices in final triangle array
+ * \param[out] occupied_voxels buffer with occupied voxels. The function fulfills 3nd only with offsets
+ * \return total number of vertices
*/
int
computeOffsetsAndTotalVertexes(DeviceArray2D<int>& occupied_voxels);
/** \brief Generates final triangle array
* \param[in] volume tsdf volume
- * \param[in] occupied_voxels occuped voxel ids (first row), number of vertexes(second row), offsets(third row).
+ * \param[in] occupied_voxels occupied voxel ids (first row), number of vertices(second row), offsets(third row).
* \param[in] volume_size volume size in meters
* \param[out] output triangle array
*/
coresps_[i].create (pyr_rows, pyr_cols);
}
depthRawScaled_.create (rows, cols);
- // see estimate tranform for the magic numbers
+ // see estimate transform for the magic numbers
int r = (int)ceil ( ((float)rows) / ESTIMATE_COMBINED_CUDA_GRID_Y );
int c = (int)ceil ( ((float)cols) / ESTIMATE_COMBINED_CUDA_GRID_X );
gbuf_.create (27, r * c);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // since raw depthmaps are quite noisy, we make sure the estimated transform is big enought to be taken into account
+ // since raw depthmaps are quite noisy, we make sure the estimated transform is big enough to be taken into account
float rnorm = rodrigues2(current_rotation).norm();
float tnorm = (current_translation).norm();
const float alpha = 1.f;
///////////////////////////////////////////////////////////////////////////////////////////
// Initialization at first frame
- if (global_time_ == 0) // this is the frist frame, the tsdf volume needs to be initialized
+ if (global_time_ == 0) // this is the first frame, the tsdf volume needs to be initialized
{
// Initial rotation
Matrix3frm initial_cam_rot = rmats_[0]; // [Ri|ti] - pos of camera
device_current_translation_local -= getCyclicalBufferStructure()->origin_metric; // translation (local translation = global translation - origin of cube)
///////////////////////////////////////////////////////////////////////////////////////////
- // Integration check - We do not integrate volume if camera does not move far enought.
+ // Integration check - We do not integrate volume if camera does not move far enough.
{
float rnorm = rodrigues2(current_global_rotation.inverse() * last_known_global_rotation).norm();
float tnorm = (current_global_translation - last_known_global_translation).norm();
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
-pcl::gpu::kinfuLS::TsdfVolume::convertToTsdfCloud (pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud) const
+pcl::gpu::kinfuLS::TsdfVolume::convertToTsdfCloud ( pcl::PointCloud<pcl::PointXYZI>::Ptr &cloud,
+ const unsigned step) const
{
int sx = header_.resolution(0);
int sy = header_.resolution(1);
int sz = header_.resolution(2);
- const int step = 2;
const int cloud_size = static_cast<int> (header_.getVolumeSize() / (step*step*step));
cloud->clear();
void
capture (Eigen::Isometry3d pose_in,unsigned short* depth_buffer_mm,const uint8_t* color_buffer)//, string point_cloud_fname)
{
- // No reference image - but this is kept for compatability with range_test_v2:
+ // No reference image - but this is kept for compatibility with range_test_v2:
float* reference = new float[range_likelihood_->getRowHeight() * range_likelihood_->getColWidth()];
//const float* depth_buffer = range_likelihood_->getDepthBuffer();
// Copy one image from our last as a reference.
PtrStepSz<const KinfuTracker::PixelRGB> rgb24_sim = PtrStepSz<const KinfuTracker::PixelRGB>(height, width, color_buf_, width);
tic_toc.push_back (getTime ());
- if (1==0){ // live capture - probably doesnt work anymore, left in here for comparison:
+ if (1==0){ // live capture - probably doesn't work anymore, left in here for comparison:
bool has_frame = evaluation_ptr_ ? evaluation_ptr_->grab(i, depth) : capture_.grab (depth, rgb24);
if (!has_frame)
{
cout << " --registration, -r : enable registration mode" << endl;
cout << " --integrate-colors, -ic : enable color integration mode ( allows to get cloud with colors )" << endl;
cout << " -volume_suze <size_in_meters> : define integration volume size" << endl;
- cout << " -dev <deivce>, -oni <oni_file> : select depth source. Default will be selected if not specified" << endl;
+ cout << " -dev <device>, -oni <oni_file> : select depth source. Default will be selected if not specified" << endl;
cout << "";
cout << " For RGBD benchmark (Requires OpenCV):" << endl;
cout << " -eval <eval_folder> [-match_file <associations_file_in_the_folder>]" << endl;
{
boost::mutex::scoped_lock io_lock (io_mutex);
- PCL_INFO ("Writing remaing %d maps in the buffer to disk...\n", buff.getSize ());
+ PCL_INFO ("Writing remaining %d maps in the buffer to disk...\n", buff.getSize ());
}
while (!buff.isEmpty ())
{
// parse input cloud file
pc::parse_argument (argc, argv, "-cf", cloud_file);
- // pase output volume file
+ // parse output volume file
pc::parse_argument (argc, argv, "-vf", volume_file);
// parse options to extract and save cloud from volume
int f_idx = 0;
// int idx_vt =0;
- PCL_INFO ("Writting faces...\n");
+ PCL_INFO ("Writing faces...\n");
for (int m = 0; m < nr_meshes; ++m)
{
if (m > 0)
PCL_INFO ("Closing obj file\n");
fs.close ();
- /* Write material defination for OBJ file*/
+ /* Write material definition for OBJ file*/
// Open file
PCL_INFO ("Writing material files\n");
- //dont do it if no material to write
+ //don't do it if no material to write
if(tex_mesh.tex_materials.size() ==0)
return (0);
return (file);
}
-/** \brief Helper function that reads a camera file outputed by Kinfu */
+/** \brief Helper function that reads a camera file outputted by Kinfu */
bool readCamPoseFile(std::string filename, pcl::TextureMapping<pcl::PointXYZ>::Camera &cam)
{
ifstream myReadFile;
namespace gpu
{
/**
- * \brief Octree implementation on GPU. It suppors parallel building and paralel batch search as well .
+ * \brief Octree implementation on GPU. It suppors parallel building and parallel batch search as well .
* \author Anaoly Baksheev, Itseez, myname.mysurname@mycompany.com
*/
/** \brief Returns true if tree has been built */
bool isBuilt();
- /** \brief Downloads Octree from GPU to search using CPU fucntion. It use usefull for signle (not-batch) search */
+ /** \brief Downloads Octree from GPU to search using CPU function. It use useful for single (not-batch) search */
void internalDownload();
- /** \brief Performs search of all points wihtin given radius on CPU. It call \a internalDownload if nessesary
+ /** \brief Performs search of all points within given radius on CPU. It call \a internalDownload if necessary
* \param[in] center center of sphere
* \param[in] radius radious of sphere
* \param[out] out indeces of points within give sphere
*/
void radiusSearchHost(const PointType& center, float radius, std::vector<int>& out, int max_nn = INT_MAX);
- /** \brief Performs approximate neares neighbor search on CPU. It call \a internalDownload if nessesary
+ /** \brief Performs approximate nearest neighbor search on CPU. It call \a internalDownload if necessary
* \param[in] query 3D point for which neighbour is be fetched
* \param[out] out_index neighbour index
* \param[out] sqr_dist square distance to the neighbour returned
* \param[in] centers array of centers
* \param[in] radius radius for all queries
* \param[in] max_results max number of returned points for each querey
- * \param[out] result results packed to signle array
+ * \param[out] result results packed to single array
*/
void radiusSearch(const Queries& centers, float radius, int max_results, NeighborIndices& result) const;
* \param[in] centers array of centers
* \param[in] radiuses array of radiuses
* \param[in] max_results max number of returned points for each querey
- * \param[out] result results packed to signle array
+ * \param[out] result results packed to single array
*/
void radiusSearch(const Queries& centers, const Radiuses& radiuses, int max_results, NeighborIndices& result) const;
* \param[in] indices indices for centers array (only for these points search is performed)
* \param[in] radius radius for all queries
* \param[in] max_results max number of returned points for each querey
- * \param[out] result results packed to signle array
+ * \param[out] result results packed to single array
*/
void radiusSearch(const Queries& centers, const Indices& indices, float radius, int max_results, NeighborIndices& result) const;
/** \brief Batch exact k-nearest search on GPU for k == 1 only!
* \param[in] queries array of centers
- * \param[in] k nubmer of neighbors (only k == 1 is supported)
+ * \param[in] k number of neighbors (only k == 1 is supported)
* \param[out] results array of results
*/
void nearestKSearchBatch(const Queries& queries, int k, NeighborIndices& results) const;
__device__ __forceinline__ void operator()() const
{
- //32 is a perfomance penalty step for search
+ //32 is a performance penalty step for search
Static<(max_points_per_leaf % 32) == 0>::check();
if (threadIdx.x == 0)
namespace
{
- int getBitsNum(int interger)
+ int getBitsNum(int integer)
{
int count = 0;
- while(interger > 0)
+ while(integer > 0)
{
- if (interger & 1)
+ if (integer & 1)
++count;
- interger>>=1;
+ integer>>=1;
}
return count;
}
{
int level;
int node_idx;
- int lenght;
+ int length;
const OctreeGlobalWithBox& octree;
__device__ __forceinline__ OctreeIteratorDeviceNS(const OctreeGlobalWithBox& octree_arg) : octree(octree_arg)
{
node_idx = 0;
level = 0;
- lenght = 1;
+ length = 1;
}
__device__ __forceinline__ void gotoNextLevel(int first, int len)
{
node_idx = first;
- lenght = len;
+ length = len;
++level;
}
#if 1
while(level >= 0)
{
- if (lenght > 1)
+ if (length > 1)
{
- lenght--;
+ length--;
node_idx++;
break;
}
int pos = node_idx - parent_first;
- lenght = parent_len - pos;
+ length = parent_len - pos;
}
#endif
}
{
int level;
int node_idx;
- int lenght;
+ int length;
const OctreeGlobalWithBox& octree;
__device__ __forceinline__ OctreePriorityIteratorDevice(const OctreeGlobalWithBox& octree_arg) : octree(octree_arg)
{
node_idx = 0;
level = 0;
- lenght = 1;
+ length = 1;
}
__device__ __forceinline__ void gotoNextLevel(int first, int len)
{
node_idx = first;
- lenght = len;
+ length = len;
++level;
}
#if 1
while(level >= 0)
{
- if (lenght > 1)
+ if (length > 1)
{
- lenght--;
+ length--;
node_idx++;
break;
}
int pos = node_idx - parent_first;
- lenght = parent_len - pos;
+ length = parent_len - pos;
}
#endif
}
#include "opencv2/contrib/contrib.hpp"
#endif
-//TEST(PCL_OctreeGPU, DISABLED_perfomance)
-TEST(PCL_OctreeGPU, perfomance)
+//TEST(PCL_OctreeGPU, DISABLED_performance)
+TEST(PCL_OctreeGPU, performance)
{
DataGenerator data;
data.data_size = 871000;
cout << "[!] Host octree resolution: " << host_octree_resolution << endl << endl;
- cout << "====== Build perfomance =====" << endl;
+ cout << "====== Build performance =====" << endl;
// build device octree
pcl::gpu::Octree octree_device;
octree_device.setCloud(cloud_device);
}
#endif
- //// Radius search perfomance ///
+ //// Radius search performance ///
const int max_answers = 500;
float dist;
for(size_t i = 0; i < data.tests_num; ++i)
{
- //search host on octree tha was built on device
+ //search host on octree that was built on device
vector<int> results_host_gpu; //host search
octree_device.radiusSearchHost(data.queries[i], data.radiuses[i], results_host_gpu);
find_cuda_helper_libs(npp)
else()
find_cuda_helper_libs(nppc)
- find_cuda_helper_libs(nppi)
find_cuda_helper_libs(npps)
+ if(${CUDA_VERSION} VERSION_GREATER_EQUAL "9.0")
+ find_cuda_helper_libs(nppim)
+ find_cuda_helper_libs(nppidei)
+ else()
+ find_cuda_helper_libs(nppi)
+ endif()
- set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} CACHE STRING "npp library")
+ if(${CUDA_VERSION} VERSION_GREATER_EQUAL "9.0")
+ set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppim_LIBRARY} ${CUDA_nppidei_LIBRARY} ${CUDA_npps_LIBRARY} CACHE STRING "npp library")
+ else()
+ set(CUDA_npp_LIBRARY ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY} CACHE STRING "npp library")
+ endif()
endif()
#Label_skeleton
namespace people
{
/**
- * @brief This structure containts all parameters to describe blobs and their parent/child relations
+ * @brief This structure contains all parameters to describe blobs and their parent/child relations
* @todo: clean this out in the end, perhaps place the children in a separate struct
*/
struct Blob2
};
/**
- * @brief This LUT contains the ideal lenght between this part and his children
+ * @brief This LUT contains the ideal length between this part and his children
**/
static const float LUT_ideal_length[][4] =
{
};
/**
- * @brief This LUT contains the max lenght between this part and his children
+ * @brief This LUT contains the max length between this part and his children
**/
static const float LUT_max_length_offset[][4] =
{
* \param[in] dmap the cvMat with the depths, must be CV_16U in mm
* \param[out] lmap_out the smoothed output labelmap as cvMat
* \param[in] patch_size make the patch size for smoothing
- * \param[in] depthThres the z-distance thresshold
+ * \param[in] depthThres the z-distance threshold
* \todo add a Gaussian contribution function to depth and vote
**/
//inline void smoothLabelImage ( cv::Mat& lmap_in,
namespace people
{
/**
- * @brief This structure containts all parameters to describe the segmented tree
+ * @brief This structure contains all parameters to describe the segmented tree
*/
struct Tree2
{
* @param[in] parent_label this is the part label that indicates the row
* @param[in] child_label this is the part label that indicates the childs needed to be investigated
* @param[in] child_number the number of this child in the parent, some parents have multiple childs
- * @return zero if successfull
+ * @return zero if successful
* @todo once we have good evaluation function reconsider best_value
**/
inline int
* @param[in] child_label this is the part label that indicates the childs needed to be investigated
* @param[in] child_number the number of this child in the parent, some parents have multiple childs
* @param person_attribs
- * @return zero if successfull
+ * @return zero if successful
* @todo once we have good evaluation function reconsider best_value
**/
inline int
/**
* \brief Read XML configuration file for a specific person
* \param[in] is input stream of file
- * \return 0 when successfull, -1 when an error occured, datastructure might become corrupted in the process
+ * \return 0 when successful, -1 when an error occurred, datastructure might become corrupted in the process
**/
int
readPersonXMLConfig (std::istream& is);
if (num_parts <= 32)
pcl::device::smoothKernel<32><<<grid, block>>>(src, depth, dst, patch_size, depthThres, num_parts);
else
- throw std::exception(); //should instanciate another smoothKernel<N>
+ throw std::exception(); //should instantiate another smoothKernel<N>
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
template <typename T> void
savePNGFile (const std::string& filename, const pcl::PointCloud<T>& cloud)
{
- pcl::io::savePNGFile(filename, cloud);
+ pcl::io::savePNGFile(filename, cloud, "rgb");
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T> void
savePNGFile (const std::string& filename, const pcl::PointCloud<T>& cloud)
{
- pcl::io::savePNGFile(filename, cloud);
+ pcl::io::savePNGFile(filename, cloud, "rgb");
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
- if (__all(idx >= facet_count))
+#if CUDA_VERSION >= 9000
+ if (__all_sync (__activemask (), idx >= facet_count))
+ return;
+#else
+ if (__all (idx >= facet_count))
return;
-
+#endif
+
int empty = 0;
if(idx < facet_count)
empty = 1;
}
- int total = __popc(__ballot(empty));
+#if CUDA_VERSION >= 9000
+ int total = __popc (__ballot_sync (__activemask (), empty));
+#else
+ int total = __popc (__ballot (empty));
+#endif
if (total > 0)
{
- int offset = Warp::binaryExclScan(__ballot(empty));
+#if CUDA_VERSION >= 9000
+ int offset = Warp::binaryExclScan (__ballot_sync (__activemask (), empty));
+#else
+ int offset = Warp::binaryExclScan (__ballot (empty));
+#endif
volatile __shared__ int wapr_buffer[WARPS];
public:
FacetStream(size_t buffer_size);
- // indeces: in each col indeces of vertexes for sigle facet
+ // indeces: in each col indeces of vertexes for single facet
DeviceArray2D<int> verts_inds;
DeviceArray<int> head_points;
PCL_MAKE_PKGCONFIG("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}" "${SUBSYS_DEPS}" "${EXT_DEPS}" "" "" "")
# Install include files
- PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}" ${dev_incs} ${incs} ${srcs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}" ${incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_PATH}/device" ${dev_incs})
endif()
}
template<typename InIt, typename OutIt, class UnOp>
- static __device__ __forceinline__ void transfrom(InIt beg, InIt end, OutIt out, UnOp op)
+ static __device__ __forceinline__ void transform(InIt beg, InIt end, OutIt out, UnOp op)
{
int STRIDE = stride();
InIt t = beg + flattenedThreadId();
}
template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
- static __device__ __forceinline__ void transfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
+ static __device__ __forceinline__ void transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
{
int STRIDE = stride();
InIt1 t1 = beg1 + flattenedThreadId();
* Author: Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
-#ifndef PCL_GPU_DEVUCE_REDUCE_HPP_
-#define PCL_GPU_DEVUCE_REDUCE_HPP_
+#ifndef PCL_GPU_DEVICE_REDUCE_HPP_
+#define PCL_GPU_DEVICE_REDUCE_HPP_
namespace pcl
{
PCL_ADD_DOC("${SUBSYS_NAME}")
+if(NOT WIN32)
+ option(PCL_IO_ENABLE_MAND_LOCKING "Enables the use of mandatory locking on filesystems mounted using the mand option." ON)
+ mark_as_advanced(PCL_IO_ENABLE_MAND_LOCKING)
+ if (NOT PCL_IO_ENABLE_MAND_LOCKING)
+ add_definitions(-DNO_MANDATORY_LOCKING)
+ endif(NOT PCL_IO_ENABLE_MAND_LOCKING)
+endif(NOT WIN32)
+
if(build)
if(WITH_OPENNI2)
set(IMAGE_INCLUDES
"include/pcl/${SUBSYS_NAME}/debayer.h"
"include/pcl/${SUBSYS_NAME}/file_io.h"
"include/pcl/${SUBSYS_NAME}/auto_io.h"
+ "include/pcl/${SUBSYS_NAME}/low_level_io.h"
"include/pcl/${SUBSYS_NAME}/lzf.h"
"include/pcl/${SUBSYS_NAME}/lzf_image_io.h"
"include/pcl/${SUBSYS_NAME}/io.h"
/** \brief Decode color information
* \param outputCloud_arg output point cloud
- * \param beginIdx_arg index indicating first point to be assiged with color information
- * \param endIdx_arg index indicating last point to be assiged with color information
+ * \param beginIdx_arg index indicating first point to be assigned with color information
+ * \param endIdx_arg index indicating last point to be assigned with color information
* \param rgba_offset_arg offset to color information
*/
void
/** \brief Set default color to points
* \param outputCloud_arg output point cloud
- * \param beginIdx_arg index indicating first point to be assiged with color information
- * \param endIdx_arg index indicating last point to be assiged with color information
+ * \param beginIdx_arg index indicating first point to be assigned with color information
+ * \param endIdx_arg index indicating last point to be assigned with color information
* \param rgba_offset_arg offset to color information
* */
void
compressedDataOut_arg.write (reinterpret_cast<const char*> (&cloud_height), sizeof (cloud_height));
// encode frame max depth
compressedDataOut_arg.write (reinterpret_cast<const char*> (&maxDepth), sizeof (maxDepth));
- // encode frame focal lenght
+ // encode frame focal length
compressedDataOut_arg.write (reinterpret_cast<const char*> (&focalLength), sizeof (focalLength));
// encode frame disparity scale
compressedDataOut_arg.write (reinterpret_cast<const char*> (&disparityScale), sizeof (disparityScale));
compressedDataOut_arg.write (reinterpret_cast<const char*> (&height_arg), sizeof (height_arg));
// encode frame max depth
compressedDataOut_arg.write (reinterpret_cast<const char*> (&maxDepth), sizeof (maxDepth));
- // encode frame focal lenght
+ // encode frame focal length
compressedDataOut_arg.write (reinterpret_cast<const char*> (&focalLength_arg), sizeof (focalLength_arg));
// encode frame disparity scale
compressedDataOut_arg.write (reinterpret_cast<const char*> (&disparityScale_arg), sizeof (disparityScale_arg));
/** \brief Vector for storing binary tree structure */
std::vector<char> binary_tree_data_vector_;
- /** \brief Interator on binary tree structure vector */
+ /** \brief Iterator on binary tree structure vector */
std::vector<char> binary_color_tree_vector_;
/** \brief Vector for storing points per voxel information */
std::vector<unsigned int> point_count_data_vector_;
- /** \brief Interator on points per voxel vector */
+ /** \brief Iterator on points per voxel vector */
std::vector<unsigned int>::const_iterator point_count_data_vector_iterator_;
/** \brief Color coding instance */
* \param[in] compressedDataIn_arg: binary input stream containing compressed data
* \param[out] cloud_arg: reference to decoded point cloud
* \param[in] bShowStatistics_arg: show compression statistics during decoding
- * \return false if an I/O error occured.
+ * \return false if an I/O error occurred.
*/
bool decodePointCloud (std::istream& compressedDataIn_arg,
PointCloudPtr &cloud_arg,
if (convertToMono)
{
// Encode point color
- const uint32_t rgb = *reinterpret_cast<const int*> (&point.rgb);
- uint8_t grayvalue = static_cast<uint8_t>(0.2989 * static_cast<float>((rgb >> 16) & 0x0000ff) +
- 0.5870 * static_cast<float>((rgb >> 8) & 0x0000ff) +
- 0.1140 * static_cast<float>((rgb >> 0) & 0x0000ff));
+ uint8_t grayvalue = static_cast<uint8_t>(0.2989 * point.r
+ + 0.5870 * point.g
+ + 0.1140 * point.b);
rgbData_arg.push_back (grayvalue);
} else
{
// Encode point color
- const uint32_t rgb = *reinterpret_cast<const int*> (&point.rgb);
-
- rgbData_arg.push_back ( (rgb >> 16) & 0x0000ff);
- rgbData_arg.push_back ( (rgb >> 8) & 0x0000ff);
- rgbData_arg.push_back ( (rgb >> 0) & 0x0000ff);
+ rgbData_arg.push_back (point.r);
+ rgbData_arg.push_back (point.g);
+ rgbData_arg.push_back (point.b);
}
{
if (monoImage_arg)
{
- const uint8_t& pixel_r = rgbData_arg[i];
- const uint8_t& pixel_g = rgbData_arg[i];
- const uint8_t& pixel_b = rgbData_arg[i];
-
// Define point color
- uint32_t rgb = (static_cast<uint32_t>(pixel_r) << 16
- | static_cast<uint32_t>(pixel_g) << 8
- | static_cast<uint32_t>(pixel_b));
- newPoint.rgb = *reinterpret_cast<float*>(&rgb);
+ newPoint.r = rgbData_arg[i];
+ newPoint.g = rgbData_arg[i];
+ newPoint.b = rgbData_arg[i];
} else
{
- const uint8_t& pixel_r = rgbData_arg[i*3+0];
- const uint8_t& pixel_g = rgbData_arg[i*3+1];
- const uint8_t& pixel_b = rgbData_arg[i*3+2];
-
// Define point color
- uint32_t rgb = (static_cast<uint32_t>(pixel_r) << 16
- | static_cast<uint32_t>(pixel_g) << 8
- | static_cast<uint32_t>(pixel_b));
- newPoint.rgb = *reinterpret_cast<float*>(&rgb);
+ newPoint.r = rgbData_arg[i*3+0];
+ newPoint.g = rgbData_arg[i*3+1];
+ newPoint.b = rgbData_arg[i*3+2];
}
} else
{
// Set white point color
- uint32_t rgb = (static_cast<uint32_t>(255) << 16
- | static_cast<uint32_t>(255) << 8
- | static_cast<uint32_t>(255));
- newPoint.rgb = *reinterpret_cast<float*>(&rgb);
+ newPoint.rgba = 0xffffffffu;
}
} else
{
{
if (monoImage_arg)
{
- const uint8_t& pixel_r = rgbData_arg[i];
- const uint8_t& pixel_g = rgbData_arg[i];
- const uint8_t& pixel_b = rgbData_arg[i];
-
// Define point color
- uint32_t rgb = (static_cast<uint32_t>(pixel_r) << 16
- | static_cast<uint32_t>(pixel_g) << 8
- | static_cast<uint32_t>(pixel_b));
- newPoint.rgb = *reinterpret_cast<float*>(&rgb);
+ newPoint.r = rgbData_arg[i];
+ newPoint.g = rgbData_arg[i];
+ newPoint.b = rgbData_arg[i];
} else
{
- const uint8_t& pixel_r = rgbData_arg[i*3+0];
- const uint8_t& pixel_g = rgbData_arg[i*3+1];
- const uint8_t& pixel_b = rgbData_arg[i*3+2];
-
// Define point color
- uint32_t rgb = (static_cast<uint32_t>(pixel_r) << 16
- | static_cast<uint32_t>(pixel_g) << 8
- | static_cast<uint32_t>(pixel_b));
- newPoint.rgb = *reinterpret_cast<float*>(&rgb);
+ newPoint.r = rgbData_arg[i*3+0];
+ newPoint.g = rgbData_arg[i*3+1];
+ newPoint.b = rgbData_arg[i*3+2];
}
} else
{
// Set white point color
- uint32_t rgb = (static_cast<uint32_t>(255) << 16
- | static_cast<uint32_t>(255) << 8
- | static_cast<uint32_t>(255));
- newPoint.rgb = *reinterpret_cast<float*>(&rgb);
+ newPoint.rgba = 0xffffffffu;
}
} else
{
/** \brief Decode differential point information
* \param outputCloud_arg output point cloud
* \param referencePoint_arg coordinates of reference point
- * \param beginIdx_arg index indicating first point to be assiged with color information
- * \param endIdx_arg index indicating last point to be assiged with color information
+ * \param beginIdx_arg index indicating first point to be assigned with color information
+ * \param endIdx_arg index indicating last point to be assigned with color information
*/
void
decodePoints (PointCloudPtr outputCloud_arg, const double* referencePoint_arg, std::size_t beginIdx_arg,
}
- /** \brief Set the Separting characters for the ascii point fields 2.
+ /** \brief Set the Separating characters for the ascii point fields 2.
* \param[in] chars string of separating characters
* Sets the separating characters for the point fields. The
* default separating characters are " \n\t,"
#if defined __GNUC__
# pragma GCC system_header
#endif
-#ifndef __CUDACC__
//https://bugreports.qt-project.org/browse/QTBUG-22829
#ifndef Q_MOC_RUN
+#ifndef __CUDACC__
#include <boost/version.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/mpl/joint_view.hpp>
#include <boost/mpl/transform.hpp>
#include <boost/mpl/vector.hpp>
-#include <boost/algorithm/string.hpp>
-#ifndef Q_MOC_RUN
#include <boost/date_time/posix_time/posix_time.hpp>
-#endif
#if BOOST_VERSION >= 104700
#include <boost/chrono.hpp>
#endif
#include <boost/tokenizer.hpp>
#include <boost/foreach.hpp>
#include <boost/shared_array.hpp>
-#include <boost/interprocess/sync/file_lock.hpp>
#if BOOST_VERSION >= 104900
#include <boost/interprocess/permissions.hpp>
#endif
#include <boost/signals2.hpp>
#include <boost/signals2/slot.hpp>
#endif
+#include <boost/algorithm/string.hpp>
+#include <boost/interprocess/sync/file_lock.hpp>
#endif
#endif // _PCL_IO_BOOST_H_
namespace pcl
{
/** \brief Point Cloud Data (FILE) file format reader interface.
- * Any (FILE) format file reader should implement its virtual methodes.
+ * Any (FILE) format file reader should implement its virtual methods.
* \author Nizar Sallem
* \ingroup io
*/
};
/** \brief Point Cloud Data (FILE) file format writer.
- * Any (FILE) format file reader should implement its virtual methodes
+ * Any (FILE) format file reader should implement its virtual methods
* \author Nizar Sallem
* \ingroup io
*/
}
};
- /** \brief insers a value of type Type (uchar, char, uint, int, float, double, ...) into a stringstream.
+ /** \brief inserts a value of type Type (uchar, char, uint, int, float, double, ...) into a stringstream.
*
- * If the value is NaN, it inserst "nan".
+ * If the value is NaN, it inserts "nan".
*
* \param[in] cloud the cloud to copy from
* \param[in] point_index the index of the point
void
setupDevice (const FZ_DEVICE_INFO& device_info, const Mode& depth_mode, const Mode& image_mode);
- /** \brief Continously asks for data from the device and publishes it if available. */
+ /** \brief Continuously asks for data from the device and publishes it if available. */
void
processGrabbing ();
/** \brief Constructor. */
Grabber () : signals_ (), connections_ (), shared_connections_ () {}
- /** \brief virtual desctructor. */
+ /** \brief virtual destructor. */
virtual inline ~Grabber () throw ();
/** \brief registers a callback function/method to a signal with the corresponding signature
(sig_cb_velodyne_hdl_scan_point_cloud_xyz) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZ> >&,
float,
float);
+
/** \brief Signal used for a single sector
* Represents 1 corrected packet from the HDL Velodyne. Each laser has a different RGB
*/
typedef void
- (sig_cb_velodyne_hdl_scan_point_cloud_xyzrgb) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZRGBA> >&,
- float,
- float);
+ (sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZRGBA> >&,
+ float,
+ float);
+
+ typedef PCL_DEPRECATED ("Use 'sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba' instead")
+ sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba sig_cb_velodyne_hdl_scan_point_cloud_xyzrgb;
+
/** \brief Signal used for a single sector
* Represents 1 corrected packet from the HDL Velodyne with the returned intensity.
*/
(sig_cb_velodyne_hdl_scan_point_cloud_xyzi) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZI> >&,
float startAngle,
float);
+
/** \brief Signal used for a 360 degree sweep
* Represents multiple corrected packets from the HDL Velodyne
* This signal is sent when the Velodyne passes angle "0"
*/
typedef void
(sig_cb_velodyne_hdl_sweep_point_cloud_xyz) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZ> >&);
+
/** \brief Signal used for a 360 degree sweep
* Represents multiple corrected packets from the HDL Velodyne with the returned intensity
* This signal is sent when the Velodyne passes angle "0"
*/
typedef void
(sig_cb_velodyne_hdl_sweep_point_cloud_xyzi) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZI> >&);
+
/** \brief Signal used for a 360 degree sweep
* Represents multiple corrected packets from the HDL Velodyne
* This signal is sent when the Velodyne passes angle "0". Each laser has a different RGB
*/
typedef void
- (sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgb) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZRGBA> >&);
+ (sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZRGBA> >&);
+
+ typedef PCL_DEPRECATED ("Use 'sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba' instead")
+ sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgb;
/** \brief Constructor taking an optional path to an HDL corrections file. The Grabber will listen on the default IP/port for data packets [192.168.3.255/2368]
* \param[in] correctionsFile Path to a file which contains the correction parameters for the HDL. This parameter is mandatory for the HDL-64, optional for the HDL-32
HDLGrabber (const std::string& correctionsFile = "",
const std::string& pcapFile = "");
- /** \brief Constructor taking a pecified IP/port and an optional path to an HDL corrections file.
+ /** \brief Constructor taking a specified IP/port and an optional path to an HDL corrections file.
* \param[in] ipAddress IP Address that should be used to listen for HDL packets
* \param[in] port UDP Port that should be used to listen for HDL packets
* \param[in] correctionsFile Path to a file which contains the correction parameters for the HDL. This field is mandatory for the HDL-64, optional for the HDL-32
*/
HDLGrabber (const boost::asio::ip::address& ipAddress,
- const unsigned short port,
+ const uint16_t port,
const std::string& correctionsFile = "");
/** \brief virtual Destructor inherited from the Grabber interface. It never throws. */
*/
void
filterPackets (const boost::asio::ip::address& ipAddress,
- const unsigned short port = 443);
+ const uint16_t port = 443);
- /** \brief Allows one to customize the colors used for each of the lasers.
+ /** \brief Allows one to customize the colors used by each laser.
+ * \param[in] color RGB color to set
+ * \param[in] laserNumber Number of laser to set color
*/
void
setLaserColorRGB (const pcl::RGB& color,
- unsigned int laserNumber);
+ const uint8_t laserNumber);
+
+ /** \brief Allows one to customize the colors used for each of the lasers.
+ * \param[in] begin begin iterator of RGB color array
+ * \param[in] end end iterator of RGB color array
+ */
+ template<typename IterT> void
+ setLaserColorRGB (const IterT& begin, const IterT& end)
+ {
+ std::copy (begin, end, laser_rgb_mapping_);
+ }
/** \brief Any returns from the HDL with a distance less than this are discarded.
* This value is in meters
float
getMaximumDistanceThreshold ();
+ /** \brief Returns the maximum number of lasers
+ */
+ virtual uint8_t
+ getMaximumNumberOfLasers () const;
+
protected:
- static const int HDL_DATA_PORT = 2368;
- static const int HDL_NUM_ROT_ANGLES = 36001;
- static const int HDL_LASER_PER_FIRING = 32;
- static const int HDL_MAX_NUM_LASERS = 64;
- static const int HDL_FIRING_PER_PKT = 12;
+ static const uint16_t HDL_DATA_PORT = 2368;
+ static const uint16_t HDL_NUM_ROT_ANGLES = 36001;
+ static const uint8_t HDL_LASER_PER_FIRING = 32;
+ static const uint8_t HDL_MAX_NUM_LASERS = 64;
+ static const uint8_t HDL_FIRING_PER_PKT = 12;
enum HDLBlock
{
#pragma pack(push, 1)
typedef struct HDLLaserReturn
{
- unsigned short distance;
- unsigned char intensity;
+ uint16_t distance;
+ uint8_t intensity;
} HDLLaserReturn;
#pragma pack(pop)
struct HDLFiringData
{
- unsigned short blockIdentifier;
- unsigned short rotationalPosition;
+ uint16_t blockIdentifier;
+ uint16_t rotationalPosition;
HDLLaserReturn laserReturns[HDL_LASER_PER_FIRING];
};
struct HDLDataPacket
{
HDLFiringData firingData[HDL_FIRING_PER_PKT];
- unsigned int gpsTimestamp;
- unsigned char mode;
- unsigned char sensorType;
+ uint32_t gpsTimestamp;
+ uint8_t mode;
+ uint8_t sensorType;
};
struct HDLLaserCorrection
};
HDLLaserCorrection laser_corrections_[HDL_MAX_NUM_LASERS];
- unsigned int last_azimuth_;
+ uint16_t last_azimuth_;
boost::shared_ptr<pcl::PointCloud<pcl::PointXYZ> > current_scan_xyz_, current_sweep_xyz_;
boost::shared_ptr<pcl::PointCloud<pcl::PointXYZI> > current_scan_xyzi_, current_sweep_xyzi_;
- boost::shared_ptr<pcl::PointCloud<pcl::PointXYZRGBA> > current_scan_xyzrgb_, current_sweep_xyzrgb_;
+ boost::shared_ptr<pcl::PointCloud<pcl::PointXYZRGBA> > current_scan_xyzrgba_, current_sweep_xyzrgba_;
boost::signals2::signal<sig_cb_velodyne_hdl_sweep_point_cloud_xyz>* sweep_xyz_signal_;
- boost::signals2::signal<sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgb>* sweep_xyzrgb_signal_;
+ boost::signals2::signal<sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba>* sweep_xyzrgba_signal_;
boost::signals2::signal<sig_cb_velodyne_hdl_sweep_point_cloud_xyzi>* sweep_xyzi_signal_;
boost::signals2::signal<sig_cb_velodyne_hdl_scan_point_cloud_xyz>* scan_xyz_signal_;
- boost::signals2::signal<sig_cb_velodyne_hdl_scan_point_cloud_xyzrgb>* scan_xyzrgb_signal_;
+ boost::signals2::signal<sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba>* scan_xyzrgba_signal_;
boost::signals2::signal<sig_cb_velodyne_hdl_scan_point_cloud_xyzi>* scan_xyzi_signal_;
void
fireCurrentSweep ();
void
- fireCurrentScan (const unsigned short startAngle,
- const unsigned short endAngle);
+ fireCurrentScan (const uint16_t startAngle,
+ const uint16_t endAngle);
void
computeXYZI (pcl::PointXYZI& pointXYZI,
- int azimuth,
+ uint16_t azimuth,
HDLLaserReturn laserReturn,
HDLLaserCorrection correction);
private:
static double *cos_lookup_table_;
static double *sin_lookup_table_;
- pcl::SynchronizedQueue<unsigned char *> hdl_data_;
+ pcl::SynchronizedQueue<uint8_t *> hdl_data_;
boost::asio::ip::udp::endpoint udp_listener_endpoint_;
boost::asio::ip::address source_address_filter_;
- unsigned short source_port_filter_;
+ uint16_t source_port_filter_;
boost::asio::io_service hdl_read_socket_service_;
boost::asio::ip::udp::socket *hdl_read_socket_;
std::string pcap_file_name_;
processVelodynePackets ();
void
- enqueueHDLPacket (const unsigned char *data,
+ enqueueHDLPacket (const uint8_t *data,
std::size_t bytesReceived);
void
constant_y = 1.0 / parameters_.focal_length_y;
#ifdef _OPENMP
#pragma omp parallel for num_threads (num_threads)
+#else
+ (void) num_threads; // suppress warning if OMP is not present
#endif
for (int i = 0; i < static_cast< int> (cloud.size ()); ++i)
{
#ifdef _OPENMP
#pragma omp parallel for num_threads (num_threads)
+#else
+ (void) num_threads; // suppress warning if OMP is not present
#endif//_OPENMP
for (long int i = 0; i < cloud.size (); ++i)
{
#ifdef _OPENMP
#pragma omp parallel for num_threads (num_threads)
+#else
+ (void) num_threads; //suppress warning if OMP is not present
#endif//_OPENMP
for (int i = 0; i < wh2; ++i)
{
cloud.resize (getWidth () * getHeight ());
#ifdef _OPENMP
#pragma omp parallel for num_threads (num_threads)
+#else
+ (void) num_threads; //suppress warning if OMP is not present
#endif//_OPENMP
for (long int i = 0; i < cloud.size (); ++i)
{
#include <fcntl.h>
#include <string>
#include <stdlib.h>
-#include <pcl/io/boost.h>
#include <pcl/console/print.h>
+#include <pcl/io/boost.h>
+#include <pcl/io/low_level_io.h>
#include <pcl/io/pcd_io.h>
-#ifdef _WIN32
-# include <io.h>
-# ifndef WIN32_LEAN_AND_MEAN
-# define WIN32_LEAN_AND_MEAN
-# endif // WIN32_LEAN_AND_MEAN
-# ifndef NOMINMAX
-# define NOMINMAX
-# endif // NOMINMAX
-# include <windows.h>
-# define pcl_open _open
-# define pcl_close(fd) _close(fd)
-# define pcl_lseek(fd,offset,origin) _lseek(fd,offset,origin)
-#else
-# include <sys/mman.h>
-# define pcl_open open
-# define pcl_close(fd) close(fd)
-# define pcl_lseek(fd,offset,origin) lseek(fd,offset,origin)
-#endif
-
#include <pcl/io/lzf.h>
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
return (-1);
}
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ int fd = io::raw_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during open!");
// Prepare the map
#if _WIN32
HANDLE fm = CreateFileMappingA (h_native_file, NULL, PAGE_READWRITE, 0, (DWORD) (data_idx + data_size), NULL);
+ if (fm == NULL)
+ {
+ throw pcl::IOException("[pcl::PCDWriter::writeBinary] Error during memory map creation ()!");
+ return (-1);
+ }
char *map = static_cast<char*>(MapViewOfFile (fm, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, data_idx + data_size));
CloseHandle (fm);
#else
- // Stretch the file size to the size of the data
- off_t result = pcl_lseek (fd, getpagesize () + data_size - 1, SEEK_SET);
-
- if (result < 0)
+ // Allocate disk space for the entire file to prevent bus errors.
+ if (io::raw_fallocate (fd, data_idx + data_size) != 0)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
- PCL_ERROR ("[pcl::PCDWriter::writeBinary] lseek errno: %d strerror: %s\n", errno, strerror (errno));
+ PCL_ERROR ("[pcl::PCDWriter::writeBinary] posix_fallocate errno: %d strerror: %s\n", errno, strerror (errno));
- throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during lseek ()!");
- return (-1);
- }
- // Write a bogus entry so that the new file size comes in effect
- result = static_cast<int> (::write (fd, "", 1));
- if (result != 1)
- {
- pcl_close (fd);
- resetLockingPermissions (file_name, file_lock);
- throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during write ()!");
+ throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during posix_fallocate ()!");
return (-1);
}
- char *map = static_cast<char*> (mmap (0, data_idx + data_size, PROT_WRITE, MAP_SHARED, fd, 0));
+ char *map = static_cast<char*> (::mmap (0, data_idx + data_size, PROT_WRITE, MAP_SHARED, fd, 0));
if (map == reinterpret_cast<char*> (-1)) //MAP_FAILED)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during mmap ()!");
return (-1);
#if _WIN32
UnmapViewOfFile (map);
#else
- if (munmap (map, (data_idx + data_size)) == -1)
+ if (::munmap (map, (data_idx + data_size)) == -1)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during munmap ()!");
return (-1);
#if _WIN32
CloseHandle (h_native_file);
#else
- pcl_close (fd);
+ io::raw_close (fd);
#endif
resetLockingPermissions (file_name, file_lock);
return (0);
return (-1);
}
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ int fd = io::raw_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during open!");
// Compute the size of data
data_size = cloud.points.size () * fsize;
+ // If the data is to large the two 32 bit integers used to store the
+ // compressed and uncompressed size will overflow.
+ if (data_size * 3 / 2 > std::numeric_limits<uint32_t>::max ())
+ {
+ PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] The input data exceeds the maximum size for compressed version 0.7 pcds of %l bytes.\n",
+ static_cast<size_t> (std::numeric_limits<uint32_t>::max ()) * 2 / 3);
+ return (-2);
+ }
+
//////////////////////////////////////////////////////////////////////
// Empty array holding only the valid data
// data_size = nr_points * point_size
// pters[3] = &only_valid_data[offset_of_plane_RGB];
//
std::vector<char*> pters (fields.size ());
- int toff = 0;
+ size_t toff = 0;
for (size_t i = 0; i < pters.size (); ++i)
{
pters[i] = &only_valid_data[toff];
- toff += fields_sizes[i] * static_cast<int> (cloud.points.size ());
+ toff += static_cast<size_t>(fields_sizes[i]) * cloud.points.size();
}
// Go over all the points, and copy the data in the appropriate places
else
{
#if !_WIN32
- pcl_close (fd);
+ io::raw_close (fd);
#endif
resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during compression!");
return (-1);
}
-#if !_WIN32
- // Stretch the file size to the size of the data
- off_t result = pcl_lseek (fd, getpagesize () + data_size - 1, SEEK_SET);
- if (result < 0)
- {
- pcl_close (fd);
- resetLockingPermissions (file_name, file_lock);
- PCL_ERROR ("[pcl::PCDWriter::writeBinary] lseek errno: %d strerror: %s\n", errno, strerror (errno));
-
- throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during lseek ()!");
- return (-1);
- }
- // Write a bogus entry so that the new file size comes in effect
- result = static_cast<int> (::write (fd, "", 1));
- if (result != 1)
- {
- pcl_close (fd);
- resetLockingPermissions (file_name, file_lock);
- throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during write ()!");
- return (-1);
- }
-#endif
-
// Prepare the map
#if _WIN32
HANDLE fm = CreateFileMapping (h_native_file, NULL, PAGE_READWRITE, 0, compressed_final_size, NULL);
CloseHandle (fm);
#else
- char *map = static_cast<char*> (mmap (0, compressed_final_size, PROT_WRITE, MAP_SHARED, fd, 0));
+ // Allocate disk space for the entire file to prevent bus errors.
+ if (io::raw_fallocate (fd, compressed_final_size) != 0)
+ {
+ io::raw_close (fd);
+ resetLockingPermissions (file_name, file_lock);
+ PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] posix_fallocate errno: %d strerror: %s\n", errno, strerror (errno));
+
+ throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during posix_fallocate ()!");
+ return (-1);
+ }
+
+ char *map = static_cast<char*> (::mmap (0, compressed_final_size, PROT_WRITE, MAP_SHARED, fd, 0));
if (map == reinterpret_cast<char*> (-1)) //MAP_FAILED)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during mmap ()!");
return (-1);
#if _WIN32
UnmapViewOfFile (map);
#else
- if (munmap (map, (compressed_final_size)) == -1)
+ if (::munmap (map, (compressed_final_size)) == -1)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during munmap ()!");
return (-1);
#if _WIN32
CloseHandle (h_native_file);
#else
- pcl_close (fd);
+ io::raw_close (fd);
#endif
resetLockingPermissions (file_name, file_lock);
return (-1);
}
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ int fd = io::raw_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during open!");
CloseHandle (fm);
#else
- // Stretch the file size to the size of the data
- off_t result = pcl_lseek (fd, getpagesize () + data_size - 1, SEEK_SET);
- if (result < 0)
- {
- pcl_close (fd);
- resetLockingPermissions (file_name, file_lock);
- PCL_ERROR ("[pcl::PCDWriter::writeBinary] lseek errno: %d strerror: %s\n", errno, strerror (errno));
-
- throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during lseek ()!");
- return (-1);
- }
- // Write a bogus entry so that the new file size comes in effect
- result = static_cast<int> (::write (fd, "", 1));
- if (result != 1)
+ // Allocate disk space for the entire file to prevent bus errors.
+ if (io::raw_fallocate (fd, data_idx + data_size) != 0)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
- throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during write ()!");
+ PCL_ERROR ("[pcl::PCDWriter::writeBinary] posix_fallocate errno: %d strerror: %s\n", errno, strerror (errno));
+
+ throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during posix_fallocate ()!");
return (-1);
}
- char *map = static_cast<char*> (mmap (0, data_idx + data_size, PROT_WRITE, MAP_SHARED, fd, 0));
+ char *map = static_cast<char*> (::mmap (0, data_idx + data_size, PROT_WRITE, MAP_SHARED, fd, 0));
if (map == reinterpret_cast<char*> (-1)) //MAP_FAILED)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during mmap ()!");
return (-1);
#if _WIN32
UnmapViewOfFile (map);
#else
- if (munmap (map, (data_idx + data_size)) == -1)
+ if (::munmap (map, (data_idx + data_size)) == -1)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during munmap ()!");
return (-1);
#if _WIN32
CloseHandle(h_native_file);
#else
- pcl_close (fd);
+ io::raw_close (fd);
#endif
resetLockingPermissions (file_name, file_lock);
fs << result << "\n";
}
fs.close (); // Close file
-
+
resetLockingPermissions (file_name, file_lock);
return (0);
}
#endif //#ifndef PCL_IO_PCD_IO_H_
-
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2012-, Open Perception, Inc.
+ * Copyright (c) 2018 Fizyr BV. - https://fizyr.com
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file defines compatibility wrappers for low level I/O functions.
+ * Implemented as inlinable functions to prevent any performance overhead.
+ */
+
+#ifndef __PCL_IO_LOW_LEVEL_IO__
+#define __PCL_IO_LOW_LEVEL_IO__
+
+#ifdef _WIN32
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <io.h>
+# include <windows.h>
+# include <BaseTsd.h>
+typedef SSIZE_T ssize_t;
+#else
+# include <unistd.h>
+# include <sys/mman.h>
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <sys/fcntl.h>
+# include <cerrno>
+#endif
+
+namespace pcl
+{
+ namespace io
+ {
+#ifdef _WIN32
+ inline int raw_open(const char * pathname, int flags, int mode)
+ {
+ return ::_open(pathname, flags, mode);
+ }
+
+ inline int raw_open(const char * pathname, int flags)
+ {
+ return ::_open(pathname, flags);
+ }
+
+ inline int raw_close(int fd)
+ {
+ return ::_close(fd);
+ }
+
+ inline int raw_lseek(int fd, long offset, int whence)
+ {
+ return ::_lseek(fd, offset, whence);
+ }
+
+ inline int raw_read(int fd, void * buffer, size_t count)
+ {
+ return ::_read(fd, buffer, count);
+ }
+
+ inline int raw_write(int fd, const void * buffer, size_t count)
+ {
+ return ::_write(fd, buffer, count);
+ }
+
+ inline int raw_ftruncate(int fd, long length)
+ {
+ return ::_chsize(fd, length);
+ }
+
+ inline int raw_fallocate(int fd, long length)
+ {
+ // Doesn't actually allocate, but best we can do?
+ return raw_ftruncate(fd, length);
+ }
+#else
+ inline int raw_open(const char * pathname, int flags, int mode)
+ {
+ return ::open(pathname, flags, mode);
+ }
+
+ inline int raw_open(const char * pathname, int flags)
+ {
+ return ::open(pathname, flags);
+ }
+
+ inline int raw_close(int fd)
+ {
+ return ::close(fd);
+ }
+
+ inline off_t raw_lseek(int fd, off_t offset, int whence)
+ {
+ return ::lseek(fd, offset, whence);
+ }
+
+ inline ssize_t raw_read(int fd, void * buffer, size_t count)
+ {
+ return ::read(fd, buffer, count);
+ }
+
+ inline ssize_t raw_write(int fd, const void * buffer, size_t count)
+ {
+ return ::write(fd, buffer, count);
+ }
+
+ inline int raw_ftruncate(int fd, off_t length)
+ {
+ return ::ftruncate(fd, length);
+ }
+
+# ifdef __APPLE__
+ inline int raw_fallocate(int fd, off_t length)
+ {
+ // OS X doesn't have posix_fallocate, but it has a fcntl that does the job.
+ // It may make the file too big though, so we truncate before returning.
+
+ // Try to allocate contiguous space first.
+ ::fstore_t store = {F_ALLOCATEALL | F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, length};
+ if (::fcntl(fd, F_PREALLOCATE, &store) != -1)
+ return raw_ftruncate(fd, length);
+
+ // Try fragmented if it failed.
+ store.fst_flags = F_ALLOCATEALL;
+ if (::fcntl(fd, F_PREALLOCATE, &store) != -1)
+ return raw_ftruncate(fd, length);
+
+ // Fragmented also failed.
+ return -1;
+ }
+
+# else // __APPLE__
+ inline int raw_fallocate(int fd, off_t length)
+ {
+# ifdef ANDROID
+ // Android's libc doesn't have posix_fallocate.
+ if (::fallocate(fd, 0, 0, length) == 0)
+ return 0;
+# else
+ // Conforming POSIX systems have posix_fallocate.
+ if (::posix_fallocate(fd, 0, length) == 0)
+ return 0;
+# endif
+
+ // EINVAL should indicate an unsupported filesystem.
+ // All other errors are passed up.
+ if (errno != EINVAL)
+ return -1;
+
+ // Try to deal with unsupported filesystems by simply seeking + writing.
+ // This may not really allocate space, but the file size will be set.
+ // Writes to the mmapped file may still trigger SIGBUS errors later.
+
+ // Remember the old position and seek to the desired length.
+ off_t old_pos = raw_lseek(fd, 0, SEEK_CUR);
+ if (old_pos == -1)
+ return -1;
+ if (raw_lseek(fd, length - 1, SEEK_SET) == -1)
+ return -1;
+
+ // Write a single byte to resize the file.
+ char buffer = 0;
+ ssize_t written = raw_write(fd, &buffer, 1);
+
+ // Seek back to the old position.
+ if (raw_lseek(fd, old_pos, SEEK_SET) == -1)
+ return -1;
+
+ // Fail if we didn't write exactly one byte,
+ if (written != 1)
+ return -1;
+
+ return 0;
+ }
+# endif // __APPLE
+#endif // _WIN32
+
+ }
+}
+#endif // __PCL_IO_LOW_LEVEL_IO__
typedef void (sig_cb_openni_point_cloud_rgba) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZRGBA> >&);
typedef void (sig_cb_openni_point_cloud_i) (const boost::shared_ptr<const pcl::PointCloud<pcl::PointXYZI> >&);
- /** \brief constuctor
+ /** \brief constructor
* \param[in] file_name the path to the ONI file
* \param[in] repeat whether the play back should be in an infinite loop or not
* \param[in] stream whether the playback should be in streaming mode or in triggered mode.
void
setUseDeviceTimer (bool enable);
- /** \brief Get absolut number of depth frames in the current stream.
+ /** \brief Get absolute number of depth frames in the current stream.
* This function returns 0 if the current device is not a file stream or
* if the current mode has no depth stream.
*/
int
getDepthFrameCount ();
- /** \brief Get absolut number of color frames in the current stream.
+ /** \brief Get absolute number of color frames in the current stream.
* This function returns 0 if the current device is not a file stream or
* if the current mode has no color stream.
*/
int
getColorFrameCount ();
- /** \brief Get absolut number of ir frames in the current stream.
+ /** \brief Get absolute number of ir frames in the current stream.
* This function returns 0 if the current device is not a file stream or
* if the current mode has no ir stream.
*/
OpenNI2DeviceManager ();
virtual ~OpenNI2DeviceManager ();
- // This may not actually be a sigleton yet. Need to work out cross-dll incerface.
+ // This may not actually be a singleton yet. Need to work out cross-dll incerface.
// Based on http://stackoverflow.com/a/13431981/1789618
static boost::shared_ptr<OpenNI2DeviceManager> getInstance ()
{
/**
* @author Suat Gedikli
* @brief decomposes the connection string into vendor id and product id.
- * @param[in] connection_string the string containing teh connection information
+ * @param[in] connection_string the string containing the connection information
* @param[out] vendorId the vendor id
* @param[out] productId the product id
*/
* - POINTS ...
* - DATA ascii/binary
*
- * Everything that follows \b DATA is intepreted as data points and
+ * Everything that follows \b DATA is interpreted as data points and
* will be read accordingly.
*
* PCD_V7 represents PCD files with version 0.7 and has an important
PCD_V7 = 1
};
+ /** \brief Read a point cloud data header from a PCD-formatted, binary istream.
+ *
+ * Load only the meta information (number of points, their types, etc),
+ * and not the points themselves, from a given PCD stream. Useful for fast
+ * evaluation of the underlying data structure.
+ *
+ * \attention The PCD data is \b always stored in ROW major format! The
+ * read/write PCD methods will detect column major input and automatically convert it.
+ *
+ * \param[in] binary_istream a std::istream with openmode set to std::ios::binary.
+ * \param[out] cloud the resultant point cloud dataset (only these
+ * members will be filled: width, height, point_step,
+ * row_step, fields[]; data is resized but not written)
+ * \param[out] origin the sensor acquisition origin (only for > PCD_V7 - null if not present)
+ * \param[out] orientation the sensor acquisition orientation (only for > PCD_V7 - identity if not present)
+ * \param[out] pcd_version the PCD version of the file (i.e., PCD_V6, PCD_V7)
+ * \param[out] data_type the type of data (0 = ASCII, 1 = Binary, 2 = Binary compressed)
+ * \param[out] data_idx the offset of cloud data within the file
+ *
+ * \return
+ * * < 0 (-1) on error
+ * * == 0 on success
+ */
+ int
+ readHeader (std::istream &binary_istream, pcl::PCLPointCloud2 &cloud,
+ Eigen::Vector4f &origin, Eigen::Quaternionf &orientation, int &pcd_version,
+ int &data_type, unsigned int &data_idx);
+
/** \brief Read a point cloud data header from a PCD file.
*
* Load only the meta information (number of points, their types, etc),
* read/write PCD methods will detect column major input and automatically convert it.
*
* \param[in] file_name the name of the file to load
- * \param[out] cloud the resultant point cloud dataset (only the header will be filled)
+ * \param[out] cloud the resultant point cloud dataset (only these
+ * members will be filled: width, height, point_step,
+ * row_step, fields[]; data is resized but not written)
* \param[out] origin the sensor acquisition origin (only for > PCD_V7 - null if not present)
* \param[out] orientation the sensor acquisition orientation (only for > PCD_V7 - identity if not present)
* \param[out] pcd_version the PCD version of the file (i.e., PCD_V6, PCD_V7)
* read/write PCD methods will detect column major input and automatically convert it.
*
* \param[in] file_name the name of the file to load
- * \param[out] cloud the resultant point cloud dataset (only the header will be filled)
+ * \param[out] cloud the resultant point cloud dataset (only these
+ * members will be filled: width, height, point_step,
+ * row_step, fields[]; data is resized but not written)
* \param[in] offset the offset of where to expect the PCD Header in the
* file (optional parameter). One usage example for setting the offset
* parameter is for reading data from a TAR "archive containing multiple
int
readHeader (const std::string &file_name, pcl::PCLPointCloud2 &cloud, const int offset = 0);
+ /** \brief Read the point cloud data (body) from a PCD stream.
+ *
+ * Reads the cloud points from a text-formatted stream. For use after
+ * readHeader(), when the resulting data_type == 0.
+ *
+ * \attention This assumes the stream has been seeked to the position
+ * indicated by the data_idx result of readHeader().
+ *
+ * \param[in] stream the stream from which to read the body.
+ * \param[out] cloud the resultant point cloud dataset to be filled.
+ * \param[in] pcd_version the PCD version of the stream (from readHeader()).
+ *
+ * \return
+ * * < 0 (-1) on error
+ * * == 0 on success
+ */
+ int
+ readBodyASCII (std::istream &stream, pcl::PCLPointCloud2 &cloud, int pcd_version);
+
+ /** \brief Read the point cloud data (body) from a block of memory.
+ *
+ * Reads the cloud points from a binary-formatted memory block. For use
+ * after readHeader(), when the resulting data_type is nonzero.
+ *
+ * \param[in] data the memory location from which to read the body.
+ * \param[out] cloud the resultant point cloud dataset to be filled.
+ * \param[in] pcd_version the PCD version of the stream (from readHeader()).
+ * \param[in] compressed indicates whether the PCD block contains compressed
+ * data. This should be true if the data_type returne by readHeader() == 2.
+ * \param[in] data_idx the offset of the body, as reported by readHeader().
+ *
+ * \return
+ * * < 0 (-1) on error
+ * * == 0 on success
+ */
+ int
+ readBodyBinary (const unsigned char *data, pcl::PCLPointCloud2 &cloud,
+ int pcd_version, bool compressed, unsigned int data_idx);
+
/** \brief Read a point cloud data from a PCD file and store it into a pcl/PCLPointCloud2.
* \param[in] file_name the name of the file containing the actual PointCloud data
* \param[out] cloud the resultant PointCloud message read from disk
const Eigen::Quaternionf &orientation);
/** \brief Generate the header of a BINARY_COMPRESSED PCD file format
+ * \param[out] os the stream into which to write the header
+ * \param[in] cloud the point cloud data message
+ * \param[in] origin the sensor acquisition origin
+ * \param[in] orientation the sensor acquisition orientation
+ *
+ * \return
+ * * < 0 (-1) on error
+ * * == 0 on success
+ */
+ int
+ generateHeaderBinaryCompressed (std::ostream &os,
+ const pcl::PCLPointCloud2 &cloud,
+ const Eigen::Vector4f &origin,
+ const Eigen::Quaternionf &orientation);
+
+ /** \brief Generate the header of a BINARY_COMPRESSED PCD file format
+ * \param[out] os the stream into which to write the header
* \param[in] cloud the point cloud data message
* \param[in] origin the sensor acquisition origin
* \param[in] orientation the sensor acquisition orientation
* \param[in] cloud the point cloud data message
* \param[in] origin the sensor acquisition origin
* \param[in] orientation the sensor acquisition orientation
+ * \return
+ * (-1) for a general error
+ * (-2) if the input cloud is too large for the file format
+ * 0 on success
*/
int
writeBinaryCompressed (const std::string &file_name, const pcl::PCLPointCloud2 &cloud,
const Eigen::Vector4f &origin = Eigen::Vector4f::Zero (),
const Eigen::Quaternionf &orientation = Eigen::Quaternionf::Identity ());
+ /** \brief Save point cloud data to a std::ostream containing n-D points, in BINARY_COMPRESSED format
+ * \param[out] os the stream into which to write the data
+ * \param[in] cloud the point cloud data message
+ * \param[in] origin the sensor acquisition origin
+ * \param[in] orientation the sensor acquisition orientation
+ * \return
+ * (-1) for a general error
+ * (-2) if the input cloud is too large for the file format
+ * 0 on success
+ */
+ int
+ writeBinaryCompressed (std::ostream &os, const pcl::PCLPointCloud2 &cloud,
+ const Eigen::Vector4f &origin = Eigen::Vector4f::Zero (),
+ const Eigen::Quaternionf &orientation = Eigen::Quaternionf::Identity ());
+
/** \brief Save point cloud data to a PCD file containing n-D points
* \param[in] file_name the output file name
* \param[in] cloud the point cloud data message
/** \brief Save point cloud data to a binary comprssed PCD file
* \param[in] file_name the output file name
* \param[in] cloud the point cloud data message
+ * \return
+ * (-1) for a general error
+ * (-2) if the input cloud is too large for the file format
+ * 0 on success
*/
template <typename PointT> int
writeBinaryCompressed (const std::string &file_name,
namespace ply
{
/** \file byte_order.h
- * defines byte shift operations and endianess.
+ * defines byte shift operations and endianness.
* \author Ares Lagae as part of libply, Nizar Sallem
* \ingroup io
*/
typedef int flags_type;
enum flags { };
- ply_parser (flags_type flags = 0) :
- flags_ (flags),
+ ply_parser () :
comment_callback_ (), obj_info_callback_ (), end_header_callback_ (),
line_number_ (0), current_element_ ()
{}
end_element_callback_type end_element_callback;
std::vector<boost::shared_ptr<property> > properties;
};
-
- flags_type flags_;
info_callback_type info_callback_;
warning_callback_type warning_callback_;
, rgb_offset_before_ (0)
, do_resize_ (false)
, polygons_ (0)
+ , r_(0), g_(0), b_(0)
+ , a_(0), rgba_(0)
{}
PLYReader (const PLYReader &p)
, rgb_offset_before_ (0)
, do_resize_ (false)
, polygons_ (0)
+ , r_(0), g_(0), b_(0)
+ , a_(0), rgba_(0)
{
*this = p;
}
std::vector<pcl::Vertices> *polygons_;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+
+ private:
+ // RGB values stored by vertexColorCallback()
+ int32_t r_, g_, b_;
+ // Color values stored by vertexAlphaCallback()
+ uint32_t a_, rgba_;
};
/** \brief Point Cloud Data (PLY) file format writer.
{
/** \brief Load a PLY v.6 file into a templated PointCloud type.
*
- * Any PLY files containg sensor data will generate a warning as a
+ * Any PLY files containing sensor data will generate a warning as a
* pcl/PCLPointCloud2 message cannot hold the sensor origin.
*
* \param[in] file_name the name of the file to load
* \param[in] file_name the name of the file to load
* \param[in] cloud the resultant templated point cloud
* \param[in] origin the sensor acquisition origin (only for > PLY_V7 - null if not present)
- * \param[in] orientation the sensor acquisition orientation if availble,
+ * \param[in] orientation the sensor acquisition orientation if available,
* identity if not present
* \ingroup io
*/
/** \brief Load a PLY file into a PolygonMesh type.
*
- * Any PLY files containg sensor data will generate a warning as a
+ * Any PLY files containing sensor data will generate a warning as a
* pcl/PolygonMesh message cannot hold the sensor origin.
*
* \param[in] file_name the name of the file to load
PCL_EXPORTS void
savePNGFile (const std::string& file_name, const pcl::PointCloud<unsigned short>& cloud);
- /** \brief Saves a PCLImage (formely ROS sensor_msgs::Image) to PNG file.
+ /** \brief Saves a PCLImage (formerly ROS sensor_msgs::Image) to PNG file.
* \param[in] file_name the name of the file to write to disk
* \param[in] image image to save
* \ingroup io
PCL_EXPORTS void
savePNGFile (const std::string& file_name, const pcl::PCLImage& image);
- /** \brief Saves RGB fields of cloud as image to PNG file.
- * \param[in] file_name the name of the file to write to disk
- * \param[in] cloud point cloud to save
- * \ingroup io
- */
- template <typename T>
- PCL_DEPRECATED (
- "pcl::io::savePNGFile<typename T> (file_name, cloud) is deprecated, please use a new generic "
- "function pcl::io::savePNGFile (file_name, cloud, field_name) with \"rgb\" as the field name."
- )
- void
- savePNGFile (const std::string& file_name, const pcl::PointCloud<T>& cloud)
- {
- std::vector<unsigned char> data(cloud.width * cloud.height * 3);
-
- for (size_t i = 0; i < cloud.points.size (); ++i)
- {
- data[i*3 + 0] = cloud.points[i].r;
- data[i*3 + 1] = cloud.points[i].g;
- data[i*3 + 2] = cloud.points[i].b;
- }
- saveRgbPNGFile(file_name, &data[0], cloud.width, cloud.height);
- }
-
- /** \brief Saves Labeled Point cloud as image to PNG file.
- * \param[in] file_name the name of the file to write to disk
- * \param[in] cloud point cloud to save
- * \ingroup io
- * Warning: Converts to 16 bit (for png), labels using more than 16 bits will cause problems
- */
- PCL_EXPORTS PCL_DEPRECATED (
- "savePNGFile (file_name, cloud) is deprecated, please use a new generic function "
- "savePNGFile (file_name, cloud, field_name) with \"label\" as the field name."
- )
- void
- savePNGFile (const std::string& file_name, const pcl::PointCloud<pcl::PointXYZL>& cloud);
-
/** \brief Saves the data from the specified field of the point cloud as image to PNG file.
* \param[in] file_name the name of the file to write to disk
* \param[in] cloud point cloud to save
* If the default is supplied, then the mode closest to VGA at 30 Hz
* will be chosen.
* \param[in] strict if set to \c true, an exception will be thrown if
- * device does not support exactly the mode requsted. Otherwise the
+ * device does not support exactly the mode requested. Otherwise the
* closest available mode is selected. */
RealSenseGrabber (const std::string& device_id = "", const Mode& mode = Mode (), bool strict = false);
* \param[in] port UDP Port that should be used to listen for VLP packets
*/
VLPGrabber (const boost::asio::ip::address& ipAddress,
- const unsigned short port);
+ const uint16_t port);
/** \brief virtual Destructor inherited from the Grabber interface. It never throws. */
virtual
virtual std::string
getName () const;
+ /** \brief Allows one to customize the colors used by each laser.
+ * \param[in] color RGB color to set
+ * \param[in] laserNumber Number of laser to set color
+ */
+ void
+ setLaserColorRGB (const pcl::RGB& color,
+ const uint8_t laserNumber);
+
+ /** \brief Allows one to customize the colors used for each of the lasers.
+ * \param[in] begin begin iterator of RGB color array
+ * \param[in] end end iterator of RGB color array
+ */
+ template<typename IterT> void
+ setLaserColorRGB (const IterT& begin, const IterT& end)
+ {
+ std::copy (begin, end, laser_rgb_mapping_);
+ }
+
+ /** \brief Returns the maximum number of lasers
+ */
+ virtual uint8_t
+ getMaximumNumberOfLasers () const;
+
+ protected:
+ static const uint8_t VLP_MAX_NUM_LASERS = 16;
+ static const uint8_t VLP_DUAL_MODE = 0x39;
+
private:
+ pcl::RGB laser_rgb_mapping_[VLP_MAX_NUM_LASERS];
+
virtual void
toPointClouds (HDLDataPacket *dataPacket);
boost::asio::ip::address
getDefaultNetworkAddress ();
+ void
+ initializeLaserMapping ();
+
void
loadVLP16Corrections ();
#include <pcl/PCLPointCloud2.h>
#include <pcl/PolygonMesh.h>
-// Please do not add any functions tha depend on VTK structures to this file!
+// Please do not add any functions that depend on VTK structures to this file!
// Use vtk_io_lib.h instead.
namespace pcl
sstream << "[pcl::DinastGrabber::setupDevice] libusb initialization failure, LIBUSB_ERROR: "<< ret;
PCL_THROW_EXCEPTION (pcl::IOException, sstream.str ());
}
-
- libusb_set_debug (context_, 3);
+
+ #if defined(LIBUSB_API_VERSION) && (LIBUSB_API_VERSION >= 0x01000106)
+ libusb_set_option (context_, LIBUSB_OPTION_LOG_LEVEL, 3);
+ #else
+ libusb_set_debug (context_, 3);
+ #endif
libusb_device **devs = NULL;
// Get the list of USB devices
{
double pixel = image_[x + image_width_ * y];
- // Correcting distortion, data emprically got in a calibration test
+ // Correcting distortion, data empirically got in a calibration test
double xc = static_cast<double> (x - image_width_ / 2);
double yc = static_cast<double> (y - image_height_ / 2);
double r1 = sqrt (xc * xc + yc * yc);
cloud.height = height;
cloud.is_dense = false;
- // Copy data in point cloud (and convert milimeters in meters)
+ // Copy data in point cloud (and convert millimeters in meters)
for (size_t i = 0; i < pointMap.size (); i += 3)
{
cloud.points[i / 3].x = pointMap[i] / 1000.0;
// Convert tf into a matrix
if (!jsonTransformationToMatrix (tf.asJson (), pattern_pose))
return (false);
- pattern_pose.translation () /= 1000.0; // Convert translation in meters (Ensenso API returns milimeters)
+ pattern_pose.translation () /= 1000.0; // Convert translation in meters (Ensenso API returns millimeters)
return (true);
}
catch (NxLibException &ex)
std::vector<Eigen::Affine3d, Eigen::aligned_allocator<Eigen::Affine3d> > robot_poses_mm (robot_poses);
std::vector<std::string> robot_poses_json;
robot_poses_json.resize (robot_poses.size ());
- for (uint i = 0; i < robot_poses_json.size (); ++i)
+ for (size_t i = 0; i < robot_poses_json.size (); ++i)
{
robot_poses_mm[i].translation () *= 1000.0; // Convert meters in millimeters
if (!matrixTransformationToJson (robot_poses_mm[i], robot_poses_json[i]))
}
// Feed all robot poses into the calibration command
- for (uint i = 0; i < robot_poses_json.size (); ++i)
+ for (size_t i = 0; i < robot_poses_json.size (); ++i)
{
// Very weird behavior here:
// If you modify this loop, check that all the transformations are still here in the [itmExecute][itmParameters] node
cloud->height = height;
cloud->is_dense = false;
- // Copy data in point cloud (and convert milimeters in meters)
+ // Copy data in point cloud (and convert millimeters in meters)
for (size_t i = 0; i < pointMap.size (); i += 3)
{
cloud->points[i / 3].x = pointMap[i] / 1000.0;
current_sweep_xyz_ (new pcl::PointCloud<pcl::PointXYZ> ()),
current_scan_xyzi_ (new pcl::PointCloud<pcl::PointXYZI> ()),
current_sweep_xyzi_ (new pcl::PointCloud<pcl::PointXYZI> ()),
- current_scan_xyzrgb_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
- current_sweep_xyzrgb_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
+ current_scan_xyzrgba_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
+ current_sweep_xyzrgba_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
sweep_xyz_signal_ (),
- sweep_xyzrgb_signal_ (),
+ sweep_xyzrgba_signal_ (),
sweep_xyzi_signal_ (),
scan_xyz_signal_ (),
- scan_xyzrgb_signal_ (),
+ scan_xyzrgba_signal_ (),
scan_xyzi_signal_ (),
hdl_data_ (),
udp_listener_endpoint_ (),
/////////////////////////////////////////////////////////////////////////////
pcl::HDLGrabber::HDLGrabber (const boost::asio::ip::address& ipAddress,
- const unsigned short int port,
+ const uint16_t port,
const std::string& correctionsFile) :
last_azimuth_ (65000),
current_scan_xyz_ (new pcl::PointCloud<pcl::PointXYZ> ()),
current_sweep_xyz_ (new pcl::PointCloud<pcl::PointXYZ> ()),
current_scan_xyzi_ (new pcl::PointCloud<pcl::PointXYZI> ()),
current_sweep_xyzi_ (new pcl::PointCloud<pcl::PointXYZI> ()),
- current_scan_xyzrgb_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
- current_sweep_xyzrgb_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
+ current_scan_xyzrgba_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
+ current_sweep_xyzrgba_ (new pcl::PointCloud<pcl::PointXYZRGBA> ()),
sweep_xyz_signal_ (),
- sweep_xyzrgb_signal_ (),
+ sweep_xyzrgba_signal_ (),
sweep_xyzi_signal_ (),
scan_xyz_signal_ (),
- scan_xyzrgb_signal_ (),
+ scan_xyzrgba_signal_ (),
scan_xyzi_signal_ (),
hdl_data_ (),
udp_listener_endpoint_ (ipAddress, port),
stop ();
disconnect_all_slots<sig_cb_velodyne_hdl_sweep_point_cloud_xyz> ();
- disconnect_all_slots<sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgb> ();
+ disconnect_all_slots<sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba> ();
disconnect_all_slots<sig_cb_velodyne_hdl_sweep_point_cloud_xyzi> ();
disconnect_all_slots<sig_cb_velodyne_hdl_scan_point_cloud_xyz> ();
- disconnect_all_slots<sig_cb_velodyne_hdl_scan_point_cloud_xyzrgb> ();
+ disconnect_all_slots<sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba> ();
disconnect_all_slots<sig_cb_velodyne_hdl_scan_point_cloud_xyzi> ();
}
{
cos_lookup_table_ = static_cast<double *> (malloc (HDL_NUM_ROT_ANGLES * sizeof (*cos_lookup_table_)));
sin_lookup_table_ = static_cast<double *> (malloc (HDL_NUM_ROT_ANGLES * sizeof (*sin_lookup_table_)));
- for (int i = 0; i < HDL_NUM_ROT_ANGLES; i++)
+ for (uint16_t i = 0; i < HDL_NUM_ROT_ANGLES; i++)
{
double rad = (M_PI / 180.0) * (static_cast<double> (i) / 100.0);
cos_lookup_table_[i] = std::cos (rad);
loadCorrectionsFile (correctionsFile);
- for (int i = 0; i < HDL_MAX_NUM_LASERS; i++)
+ for (uint8_t i = 0; i < HDL_MAX_NUM_LASERS; i++)
{
HDLLaserCorrection correction = laser_corrections_[i];
laser_corrections_[i].sinVertOffsetCorrection = correction.verticalOffsetCorrection * correction.sinVertCorrection;
laser_corrections_[i].cosVertOffsetCorrection = correction.verticalOffsetCorrection * correction.cosVertCorrection;
}
sweep_xyz_signal_ = createSignal<sig_cb_velodyne_hdl_sweep_point_cloud_xyz> ();
- sweep_xyzrgb_signal_ = createSignal<sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgb> ();
+ sweep_xyzrgba_signal_ = createSignal<sig_cb_velodyne_hdl_sweep_point_cloud_xyzrgba> ();
sweep_xyzi_signal_ = createSignal<sig_cb_velodyne_hdl_sweep_point_cloud_xyzi> ();
scan_xyz_signal_ = createSignal<sig_cb_velodyne_hdl_scan_point_cloud_xyz> ();
- scan_xyzrgb_signal_ = createSignal<sig_cb_velodyne_hdl_scan_point_cloud_xyzrgb> ();
+ scan_xyzrgba_signal_ = createSignal<sig_cb_velodyne_hdl_scan_point_cloud_xyzrgba> ();
scan_xyzi_signal_ = createSignal<sig_cb_velodyne_hdl_scan_point_cloud_xyzi> ();
current_scan_xyz_.reset (new pcl::PointCloud<pcl::PointXYZ>);
current_sweep_xyz_.reset (new pcl::PointCloud<pcl::PointXYZ>);
current_sweep_xyzi_.reset (new pcl::PointCloud<pcl::PointXYZI>);
- for (int i = 0; i < HDL_MAX_NUM_LASERS; i++)
+ for (uint8_t i = 0; i < HDL_MAX_NUM_LASERS; i++)
laser_rgb_mapping_[i].r = laser_rgb_mapping_[i].g = laser_rgb_mapping_[i].b = 0;
if (laser_corrections_[32].distanceCorrection == 0.0)
{
- for (int i = 0; i < 16; i++)
+ for (uint8_t i = 0; i < 16; i++)
{
laser_rgb_mapping_[i * 2].b = static_cast<uint8_t> (i * 6 + 64);
laser_rgb_mapping_[i * 2 + 1].b = static_cast<uint8_t> ( (i + 16) * 6 + 64);
}
else
{
- for (int i = 0; i < 16; i++)
+ for (uint8_t i = 0; i < 16; i++)
{
laser_rgb_mapping_[i * 2].b = static_cast<uint8_t> (i * 3 + 64);
laser_rgb_mapping_[i * 2 + 1].b = static_cast<uint8_t> ( (i + 16) * 3 + 64);
}
- for (int i = 0; i < 16; i++)
+ for (uint8_t i = 0; i < 16; i++)
{
laser_rgb_mapping_[i * 2 + 32].b = static_cast<uint8_t> (i * 3 + 160);
laser_rgb_mapping_[i * 2 + 33].b = static_cast<uint8_t> ( (i + 16) * 3 + 160);
if (px.first == "px")
{
boost::property_tree::ptree calibration_data = px.second;
- int index = -1;
+ int32_t index = -1;
double azimuth = 0, vert_correction = 0, dist_correction = 0, vert_offset_correction = 0, horiz_offset_correction = 0;
BOOST_FOREACH (boost::property_tree::ptree::value_type &item, calibration_data)
{
double hdl32_vertical_corrections[] = { -30.67, -9.3299999, -29.33, -8, -28, -6.6700001, -26.67, -5.3299999, -25.33, -4, -24, -2.6700001, -22.67, -1.33, -21.33,
0, -20, 1.33, -18.67, 2.6700001, -17.33, 4, -16, 5.3299999, -14.67, 6.6700001, -13.33, 8, -12, 9.3299999, -10.67, 10.67 };
- for (int i = 0; i < HDL_LASER_PER_FIRING; i++)
+ for (uint8_t i = 0; i < HDL_LASER_PER_FIRING; i++)
{
laser_corrections_[i].azimuthCorrection = 0.0;
laser_corrections_[i].distanceCorrection = 0.0;
laser_corrections_[i].sinVertCorrection = std::sin (HDL_Grabber_toRadians(hdl32_vertical_corrections[i]));
laser_corrections_[i].cosVertCorrection = std::cos (HDL_Grabber_toRadians(hdl32_vertical_corrections[i]));
}
- for (int i = HDL_LASER_PER_FIRING; i < HDL_MAX_NUM_LASERS; i++)
+ for (uint8_t i = HDL_LASER_PER_FIRING; i < HDL_MAX_NUM_LASERS; i++)
{
laser_corrections_[i].azimuthCorrection = 0.0;
laser_corrections_[i].distanceCorrection = 0.0;
{
while (true)
{
- unsigned char *data;
+ uint8_t *data;
if (!hdl_data_.dequeue (data))
return;
return;
current_scan_xyz_.reset (new pcl::PointCloud<pcl::PointXYZ> ());
- current_scan_xyzrgb_.reset (new pcl::PointCloud<pcl::PointXYZRGBA> ());
+ current_scan_xyzrgba_.reset (new pcl::PointCloud<pcl::PointXYZRGBA> ());
current_scan_xyzi_.reset (new pcl::PointCloud<pcl::PointXYZI> ());
time_t system_time;
time_t velodyne_time = (system_time & 0x00000000ffffffffl) << 32 | dataPacket->gpsTimestamp;
current_scan_xyz_->header.stamp = velodyne_time;
- current_scan_xyzrgb_->header.stamp = velodyne_time;
+ current_scan_xyzrgba_->header.stamp = velodyne_time;
current_scan_xyzi_->header.stamp = velodyne_time;
current_scan_xyz_->header.seq = scan_counter;
- current_scan_xyzrgb_->header.seq = scan_counter;
+ current_scan_xyzrgba_->header.seq = scan_counter;
current_scan_xyzi_->header.seq = scan_counter;
scan_counter++;
- for (int i = 0; i < HDL_FIRING_PER_PKT; ++i)
+ for (uint8_t i = 0; i < HDL_FIRING_PER_PKT; ++i)
{
HDLFiringData firing_data = dataPacket->firingData[i];
- int offset = (firing_data.blockIdentifier == BLOCK_0_TO_31) ? 0 : 32;
+ uint8_t offset = (firing_data.blockIdentifier == BLOCK_0_TO_31) ? 0 : 32;
- for (int j = 0; j < HDL_LASER_PER_FIRING; j++)
+ for (uint8_t j = 0; j < HDL_LASER_PER_FIRING; j++)
{
if (firing_data.rotationalPosition < last_azimuth_)
{
- if (current_sweep_xyzrgb_->size () > 0)
+ if (current_sweep_xyzrgba_->size () > 0)
{
- current_sweep_xyz_->is_dense = current_sweep_xyzrgb_->is_dense = current_sweep_xyzi_->is_dense = false;
+ current_sweep_xyz_->is_dense = current_sweep_xyzrgba_->is_dense = current_sweep_xyzi_->is_dense = false;
current_sweep_xyz_->header.stamp = velodyne_time;
- current_sweep_xyzrgb_->header.stamp = velodyne_time;
+ current_sweep_xyzrgba_->header.stamp = velodyne_time;
current_sweep_xyzi_->header.stamp = velodyne_time;
current_sweep_xyz_->header.seq = sweep_counter;
- current_sweep_xyzrgb_->header.seq = sweep_counter;
+ current_sweep_xyzrgba_->header.seq = sweep_counter;
current_sweep_xyzi_->header.seq = sweep_counter;
sweep_counter++;
fireCurrentSweep ();
}
current_sweep_xyz_.reset (new pcl::PointCloud<pcl::PointXYZ> ());
- current_sweep_xyzrgb_.reset (new pcl::PointCloud<pcl::PointXYZRGBA> ());
+ current_sweep_xyzrgba_.reset (new pcl::PointCloud<pcl::PointXYZRGBA> ());
current_sweep_xyzi_.reset (new pcl::PointCloud<pcl::PointXYZI> ());
}
PointXYZ xyz;
PointXYZI xyzi;
- PointXYZRGBA xyzrgb;
+ PointXYZRGBA xyzrgba;
computeXYZI (xyzi, firing_data.rotationalPosition, firing_data.laserReturns[j], laser_corrections_[j + offset]);
- xyz.x = xyzrgb.x = xyzi.x;
- xyz.y = xyzrgb.y = xyzi.y;
- xyz.z = xyzrgb.z = xyzi.z;
+ xyz.x = xyzrgba.x = xyzi.x;
+ xyz.y = xyzrgba.y = xyzi.y;
+ xyz.z = xyzrgba.z = xyzi.z;
- xyzrgb.rgba = laser_rgb_mapping_[j + offset].rgba;
+ xyzrgba.rgba = laser_rgb_mapping_[j + offset].rgba;
if (pcl_isnan (xyz.x) || pcl_isnan (xyz.y) || pcl_isnan (xyz.z))
{
continue;
current_scan_xyz_->push_back (xyz);
current_scan_xyzi_->push_back (xyzi);
- current_scan_xyzrgb_->push_back (xyzrgb);
+ current_scan_xyzrgba_->push_back (xyzrgba);
current_sweep_xyz_->push_back (xyz);
current_sweep_xyzi_->push_back (xyzi);
- current_sweep_xyzrgb_->push_back (xyzrgb);
+ current_sweep_xyzrgba_->push_back (xyzrgba);
last_azimuth_ = firing_data.rotationalPosition;
}
}
- current_scan_xyz_->is_dense = current_scan_xyzrgb_->is_dense = current_scan_xyzi_->is_dense = true;
+ current_scan_xyz_->is_dense = current_scan_xyzrgba_->is_dense = current_scan_xyzi_->is_dense = true;
fireCurrentScan (dataPacket->firingData[0].rotationalPosition, dataPacket->firingData[11].rotationalPosition);
}
/////////////////////////////////////////////////////////////////////////////
void
pcl::HDLGrabber::computeXYZI (pcl::PointXYZI& point,
- int azimuth,
+ uint16_t azimuth,
HDLLaserReturn laserReturn,
HDLLaserCorrection correction)
{
if (sweep_xyz_signal_ != NULL && sweep_xyz_signal_->num_slots () > 0)
sweep_xyz_signal_->operator() (current_sweep_xyz_);
- if (sweep_xyzrgb_signal_ != NULL && sweep_xyzrgb_signal_->num_slots () > 0)
- sweep_xyzrgb_signal_->operator() (current_sweep_xyzrgb_);
+ if (sweep_xyzrgba_signal_ != NULL && sweep_xyzrgba_signal_->num_slots () > 0)
+ sweep_xyzrgba_signal_->operator() (current_sweep_xyzrgba_);
if (sweep_xyzi_signal_ != NULL && sweep_xyzi_signal_->num_slots () > 0)
sweep_xyzi_signal_->operator() (current_sweep_xyzi_);
/////////////////////////////////////////////////////////////////////////////
void
-pcl::HDLGrabber::fireCurrentScan (const unsigned short startAngle,
- const unsigned short endAngle)
+pcl::HDLGrabber::fireCurrentScan (const uint16_t startAngle,
+ const uint16_t endAngle)
{
const float start = static_cast<float> (startAngle) / 100.0f;
const float end = static_cast<float> (endAngle) / 100.0f;
if (scan_xyz_signal_->num_slots () > 0)
scan_xyz_signal_->operator () (current_scan_xyz_, start, end);
- if (scan_xyzrgb_signal_->num_slots () > 0)
- scan_xyzrgb_signal_->operator () (current_scan_xyzrgb_, start, end);
+ if (scan_xyzrgba_signal_->num_slots () > 0)
+ scan_xyzrgba_signal_->operator () (current_scan_xyzrgba_, start, end);
if (scan_xyzi_signal_->num_slots () > 0)
scan_xyzi_signal_->operator() (current_scan_xyzi_, start, end);
/////////////////////////////////////////////////////////////////////////////
void
-pcl::HDLGrabber::enqueueHDLPacket (const unsigned char *data,
+pcl::HDLGrabber::enqueueHDLPacket (const uint8_t *data,
std::size_t bytesReceived)
{
if (bytesReceived == 1206)
{
- unsigned char *dup = static_cast<unsigned char *> (malloc (bytesReceived * sizeof(unsigned char)));
- memcpy (dup, data, bytesReceived * sizeof(unsigned char));
+ uint8_t *dup = static_cast<uint8_t *> (malloc (bytesReceived * sizeof(uint8_t)));
+ memcpy (dup, data, bytesReceived * sizeof (uint8_t));
hdl_data_.enqueue (dup);
}
else
{
#ifdef HAVE_PCAP
- hdl_read_packet_thread_ = new boost::thread(boost::bind(&HDLGrabber::readPacketsFromPcap, this));
+ hdl_read_packet_thread_ = new boost::thread (boost::bind (&HDLGrabber::readPacketsFromPcap, this));
#endif // #ifdef HAVE_PCAP
}
}
/////////////////////////////////////////////////////////////////////////////
void
pcl::HDLGrabber::filterPackets (const boost::asio::ip::address& ipAddress,
- const unsigned short port)
+ const uint16_t port)
{
source_address_filter_ = ipAddress;
source_port_filter_ = port;
/////////////////////////////////////////////////////////////////////////////
void
pcl::HDLGrabber::setLaserColorRGB (const pcl::RGB& color,
- unsigned int laserNumber)
+ const uint8_t laserNumber)
{
- if (laserNumber >= (unsigned int) HDL_MAX_NUM_LASERS)
+ if (laserNumber >= HDL_MAX_NUM_LASERS)
return;
laser_rgb_mapping_[laserNumber] = color;
return (ipAddress.is_unspecified ());
#else
if (ipAddress.is_v4 ())
- return (ipAddress.to_v4 ().to_ulong() == 0);
+ return (ipAddress.to_v4 ().to_ulong () == 0);
return (false);
#endif
return (min_distance_threshold_);
}
+/////////////////////////////////////////////////////////////////////////////
+uint8_t
+pcl::HDLGrabber::getMaximumNumberOfLasers () const
+{
+ return (HDL_MAX_NUM_LASERS);
+}
+
/////////////////////////////////////////////////////////////////////////////
void
pcl::HDLGrabber::readPacketsFromSocket ()
{
- unsigned char data[1500];
+ uint8_t data[1500];
udp::endpoint sender_endpoint;
while (!terminate_read_packet_thread_ && hdl_read_socket_->is_open ())
pcl::HDLGrabber::readPacketsFromPcap ()
{
struct pcap_pkthdr *header;
- const unsigned char *data;
- char errbuff[PCAP_ERRBUF_SIZE];
+ const uint8_t *data;
+ int8_t errbuff[PCAP_ERRBUF_SIZE];
- pcap_t *pcap = pcap_open_offline (pcap_file_name_.c_str (), errbuff);
+ pcap_t *pcap = pcap_open_offline (pcap_file_name_.c_str (), reinterpret_cast<char *> (errbuff));
struct bpf_program filter;
std::ostringstream string_stream;
string_stream << "udp ";
- if (!isAddressUnspecified(source_address_filter_))
+ if (!isAddressUnspecified (source_address_filter_))
{
- string_stream << " and src port " << source_port_filter_ << " and src host " << source_address_filter_.to_string();
+ string_stream << " and src port " << source_port_filter_ << " and src host " << source_address_filter_.to_string ();
}
// PCAP_NETMASK_UNKNOWN should be 0xffffffff, but it's undefined in older PCAP versions
- if (pcap_compile (pcap, &filter, string_stream.str ().c_str(), 0, 0xffffffff) == -1)
+ if (pcap_compile (pcap, &filter, string_stream.str ().c_str (), 0, 0xffffffff) == -1)
{
PCL_WARN ("[pcl::HDLGrabber::readPacketsFromPcap] Issue compiling filter: %s.\n", pcap_geterr (pcap));
}
}
struct timeval lasttime;
- unsigned long long usec_delay;
+ uint64_t usec_delay;
lasttime.tv_sec = 0;
- int returnValue = pcap_next_ex(pcap, &header, &data);
+ int32_t returnValue = pcap_next_ex (pcap, &header, &data);
while (returnValue >= 0 && !terminate_read_packet_thread_)
{
usec_delay = ((header->ts.tv_sec - lasttime.tv_sec) * 1000000) +
(header->ts.tv_usec - lasttime.tv_usec);
- boost::this_thread::sleep(boost::posix_time::microseconds(usec_delay));
+ boost::this_thread::sleep (boost::posix_time::microseconds (usec_delay));
lasttime.tv_sec = header->ts.tv_sec;
lasttime.tv_usec = header->ts.tv_usec;
// The ETHERNET header is 42 bytes long; unnecessary
- enqueueHDLPacket(data + 42, header->len - 42);
+ enqueueHDLPacket (data + 42, header->len - 42);
- returnValue = pcap_next_ex(pcap, &header, &data);
+ returnValue = pcap_next_ex (pcap, &header, &data);
}
}
#endif //#ifdef HAVE_PCAP
if (!cloud.is_dense)
{
- PCL_ERROR ("[pcl::IFSWriter::write] Non dense cloud are not alowed by IFS format!\n");
+ PCL_ERROR ("[pcl::IFSWriter::write] Non dense cloud are not allowed by IFS format!\n");
return (-1);
}
typedef unsigned int LZF_HSLOT;
typedef unsigned int LZF_STATE[1 << (HLOG)];
-#define STRICT_ALIGN !(defined(__i386) || defined (__amd64))
+#if !(defined(__i386) || defined (__amd64))
+# define STRICT_ALIGN 1
+#else
+# define STRICT_ALIGN 0
+#endif
#if !STRICT_ALIGN
/* for unaligned accesses we need a 16 bit datatype. */
# if USHRT_MAX == 65535
*
*/
#include <pcl/console/time.h>
+#include <pcl/io/low_level_io.h>
#include <pcl/io/lzf_image_io.h>
#include <pcl/io/lzf.h>
#include <pcl/console/print.h>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
-#ifdef _WIN32
-# include <io.h>
-# include <windows.h>
-# define pcl_open _open
-# define pcl_close(fd) _close(fd)
-# define pcl_lseek(fd,offset,origin) _lseek(fd,offset,origin)
-#else
-# include <sys/mman.h>
-# define pcl_open open
-# define pcl_close(fd) close(fd)
-# define pcl_lseek(fd,offset,origin) lseek(fd,offset,origin)
-#endif
-
#define LZF_HEADER_SIZE 37
UnmapViewOfFile (map);
CloseHandle (h_native_file);
#else
- int fd = pcl_open (filename.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
+ int fd = raw_open (filename.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
if (fd < 0)
return (false);
- // Stretch the file size to the size of the data
- off_t result = pcl_lseek (fd, data_size - 1, SEEK_SET);
- if (result < 0)
- {
- pcl_close (fd);
- return (false);
- }
- // Write a bogus entry so that the new file size comes in effect
- result = static_cast<int> (::write (fd, "", 1));
- if (result != 1)
+
+ // Allocate disk space for the entire file to prevent bus errors.
+ if (io::raw_fallocate (fd, data_size) != 0)
{
- pcl_close (fd);
+ raw_close (fd);
+ throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during posix_fallocate ()!");
return (false);
}
- char *map = static_cast<char*> (mmap (0, data_size, PROT_WRITE, MAP_SHARED, fd, 0));
+ char *map = static_cast<char*> (::mmap (0, data_size, PROT_WRITE, MAP_SHARED, fd, 0));
if (map == reinterpret_cast<char*> (-1)) // MAP_FAILED
{
- pcl_close (fd);
+ raw_close (fd);
return (false);
}
// Copy the data
memcpy (&map[0], data, data_size);
- if (munmap (map, (data_size)) == -1)
+ if (::munmap (map, (data_size)) == -1)
{
- pcl_close (fd);
+ raw_close (fd);
return (false);
}
- pcl_close (fd);
+ raw_close (fd);
#endif
return (true);
}
return (false);
}
// Open for reading
- int fd = pcl_open (filename.c_str (), O_RDONLY);
+ int fd = raw_open (filename.c_str (), O_RDONLY);
if (fd == -1)
{
PCL_ERROR ("[pcl::io::LZFImageReader::loadImage] Failure to open file %s\n", filename.c_str () );
}
// Seek to the end of file to get the filesize
- off_t data_size = pcl_lseek (fd, 0, SEEK_END);
+ long data_size = raw_lseek (fd, 0, SEEK_END);
if (data_size < 0)
{
- pcl_close (fd);
+ raw_close (fd);
PCL_ERROR ("[pcl::io::LZFImageReader::loadImage] lseek errno: %d strerror: %s\n", errno, strerror (errno));
PCL_ERROR ("[pcl::io::LZFImageReader::loadImage] Error during lseek ()!\n");
return (false);
}
- pcl_lseek (fd, 0, SEEK_SET);
+ raw_lseek (fd, 0, SEEK_SET);
#ifdef _WIN32
// As we don't know the real size of data (compressed or not),
if (map == NULL)
{
CloseHandle (fm);
- pcl_close (fd);
+ raw_close (fd);
PCL_ERROR ("[pcl::io::LZFImageReader::loadImage] Error mapping view of file, %s\n", filename.c_str ());
return (false);
}
#else
- char *map = static_cast<char*> (mmap (0, data_size, PROT_READ, MAP_SHARED, fd, 0));
+ char *map = static_cast<char*> (::mmap (0, data_size, PROT_READ, MAP_SHARED, fd, 0));
if (map == reinterpret_cast<char*> (-1)) // MAP_FAILED
{
- pcl_close (fd);
+ raw_close (fd);
PCL_ERROR ("[pcl::io::LZFImageReader::loadImage] Error preparing mmap for PCLZF file.\n");
return (false);
}
UnmapViewOfFile (map);
CloseHandle (fm);
#else
- munmap (map, data_size);
+ ::munmap (map, data_size);
#endif
return (false);
}
UnmapViewOfFile (map);
CloseHandle (fm);
#else
- munmap (map, data_size);
+ ::munmap (map, data_size);
#endif
return (false);
}
UnmapViewOfFile (map);
CloseHandle (fm);
#else
- if (munmap (map, data_size) == -1)
+ if (::munmap (map, data_size) == -1)
PCL_ERROR ("[pcl::io::LZFImageReader::loadImage] Munmap failure\n");
#endif
- pcl_close (fd);
+ raw_close (fd);
- data_size = off_t (compressed_size); // We only care about this from here on
+ data_size = compressed_size; // We only care about this from here on
return (true);
}
// bool material_found = false;
std::vector<std::string> material_files;
std::size_t nr_point = 0;
- std::vector<std::string> st;
try
{
if (line == "")
continue;
- // Tokenize the line
- std::stringstream sstream (line);
- sstream.imbue (std::locale::classic ());
- line = sstream.str ();
+ // Trim the line
boost::trim (line);
- boost::split (st, line, boost::is_any_of ("\t\r "), boost::token_compress_on);
+
// Ignore comments
- if (st.at (0) == "#")
+ if (line[0] == '#')
continue;
- // Vertex
- if (st.at (0) == "v")
+ // Vertex, vertex texture or vertex normal
+ if (line[0] == 'v')
{
- ++nr_point;
- continue;
- }
+ // Vertex (v)
+ if (line[1] == ' ') {
+ ++nr_point;
+ continue;
+ }
- // Vertex texture
- if ((st.at (0) == "vt") && !vertex_texture_found)
- {
- vertex_texture_found = true;
- continue;
- }
+ // Vertex texture (vt)
+ else if ((line[1] == 't') && !vertex_texture_found)
+ {
+ vertex_texture_found = true;
+ continue;
+ }
- // Vertex normal
- if ((st.at (0) == "vn") && !vertex_normal_found)
- {
- vertex_normal_found = true;
- continue;
+ // Vertex normal (vn)
+ else if ((line[1] == 'n') && !vertex_normal_found)
+ {
+ vertex_normal_found = true;
+ continue;
+ }
}
// Material library, skip for now!
- if (st.at (0) == "mtllib")
+ if (line.substr (0, 6) == "mtllib")
{
+ std::vector<std::string> st;
+ boost::split(st, line, boost::is_any_of("\t\r "), boost::token_compress_on);
material_files.push_back (st.at (1));
continue;
}
// Vertex normal
if (st[0] == "vn")
{
+ if (normal_idx >= cloud.width)
+ {
+ if (normal_idx == cloud.width)
+ PCL_WARN ("[pcl:OBJReader] Too many vertex normals (expected %d), skipping remaining normals.\n", cloud.width, normal_idx + 1);
+ ++normal_idx;
+ continue;
+ }
try
{
for (int i = 1, f = normal_x_field; i < 4; ++i, ++f)
// Close obj file
fs.close ();
- /* Write material defination for OBJ file*/
+ /* Write material definition for OBJ file*/
// Open file
std::ofstream m_fs;
{
setColorVideoMode (getDefaultColorMode ());
}
- setDepthVideoMode (getDefaultDepthMode ());
- setIRVideoMode (getDefaultIRMode ());
+ if (openni_device_->hasSensor (openni::SENSOR_DEPTH))
+ {
+ setDepthVideoMode (getDefaultDepthMode ());
+ }
+ if (openni_device_->hasSensor (openni::SENSOR_IR))
+ {
+ setIRVideoMode (getDefaultIRMode ());
+ }
}
if (openni_device_->isFile ())
}
catch (...)
{
- PCL_THROW_EXCEPTION (pcl::IOException, "unknown error occured");
+ PCL_THROW_EXCEPTION (pcl::IOException, "unknown error occurred");
}
typedef pcl::io::openni2::OpenNI2VideoMode VideoMode;
}
catch (...)
{
- PCL_THROW_EXCEPTION (pcl::IOException, "unknown error occured");
+ PCL_THROW_EXCEPTION (pcl::IOException, "unknown error occurred");
}
XnMapOutputMode depth_md;
*/
#include <pcl/pcl_config.h>
+#include <pcl/io/low_level_io.h>
#include <pcl/io/pcd_grabber.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/tar.h>
-
-#ifdef _WIN32
-# include <io.h>
-# include <windows.h>
-# define pcl_open _open
-# define pcl_close(fd) _close(fd)
-# define pcl_lseek(fd,offset,origin) _lseek(fd,offset,origin)
-#else
-# include <sys/mman.h>
-# define pcl_open open
-# define pcl_close(fd) close(fd)
-# define pcl_lseek(fd,offset,origin) lseek(fd,offset,origin)
-#endif
///////////////////////////////////////////////////////////////////////////////////////////
//////////////////////// GrabberImplementation //////////////////////
else
{
tar_offset_ += (tar_header_.getFileSize ()) + (512 - tar_header_.getFileSize () % 512);
- int result = static_cast<int> (pcl_lseek (tar_fd_, tar_offset_, SEEK_SET));
+ int result = static_cast<int> (io::raw_lseek (tar_fd_, tar_offset_, SEEK_SET));
if (result < 0)
closeTARFile ();
}
// Try to read in the file as a PCD first
valid_ = (reader.read (*pcd_iterator_, next_cloud_, origin_, orientation_, pcd_version) == 0);
- // Has an error occured? Check if we can interpret the file as a TAR file first before going onto the next
+ // Has an error occurred? Check if we can interpret the file as a TAR file first before going onto the next
if (!valid_ && openTARFile (*pcd_iterator_) >= 0 && readTARHeader ())
{
tar_file_ = *pcd_iterator_;
else
{
tar_offset_ += (tar_header_.getFileSize ()) + (512 - tar_header_.getFileSize () % 512);
- int result = static_cast<int> (pcl_lseek (tar_fd_, tar_offset_, SEEK_SET));
+ int result = static_cast<int> (io::raw_lseek (tar_fd_, tar_offset_, SEEK_SET));
if (result < 0)
closeTARFile ();
}
}
// We only support regular files for now.
- // Addional file types in TAR include: hard links, symbolic links, device/special files, block devices,
+ // Additional file types in TAR include: hard links, symbolic links, device/special files, block devices,
// directories, and named pipes.
if (tar_header_.file_type[0] != '0' && tar_header_.file_type[0] != '\0')
{
void
pcl::PCDGrabberBase::PCDGrabberImpl::closeTARFile ()
{
- pcl_close (tar_fd_);
+ io::raw_close (tar_fd_);
tar_fd_ = -1;
tar_offset_ = 0;
memset (&tar_header_.file_name[0], 0, 512);
int
pcl::PCDGrabberBase::PCDGrabberImpl::openTARFile (const std::string &file_name)
{
- tar_fd_ = pcl_open (file_name.c_str (), O_RDONLY);
+ tar_fd_ = io::raw_open (file_name.c_str (), O_RDONLY);
if (tar_fd_ == -1)
return (-1);
cloud_idx_to_file_idx_.push_back (i);
// Update offset
tar_offset_ += (tar_header_.getFileSize ()) + (512 - tar_header_.getFileSize () % 512);
- int result = static_cast<int> (pcl_lseek (tar_fd_, tar_offset_, SEEK_SET));
+ int result = static_cast<int> (io::raw_lseek (tar_fd_, tar_offset_, SEEK_SET));
if (result < 0)
break;
if (tar_fd_ == -1)
#include <stdlib.h>
#include <pcl/io/boost.h>
#include <pcl/common/io.h>
-#include <pcl/io/pcd_io.h>
+#include <pcl/io/low_level_io.h>
#include <pcl/io/lzf.h>
+#include <pcl/io/pcd_io.h>
#include <pcl/console/time.h>
#include <cstring>
#include <cerrno>
-#ifdef _WIN32
-# include <io.h>
-# include <windows.h>
-# define pcl_open _open
-# define pcl_close(fd) _close(fd)
-# define pcl_lseek(fd,offset,origin) _lseek(fd,offset,origin)
-#else
-# include <sys/mman.h>
-# define pcl_open open
-# define pcl_close(fd) close(fd)
-# define pcl_lseek(fd,offset,origin) lseek(fd,offset,origin)
-#endif
#include <boost/version.hpp>
///////////////////////////////////////////////////////////////////////////////////////////
(void)file_name;
(void)lock;
#ifndef WIN32
+#ifndef NO_MANDATORY_LOCKING
// Boost version 1.49 introduced permissions
#if BOOST_VERSION >= 104900
// Attempt to lock the file.
}
#endif
#endif
+#endif
}
///////////////////////////////////////////////////////////////////////////////////////////
(void)file_name;
(void)lock;
#ifndef WIN32
+#ifndef NO_MANDATORY_LOCKING
// Boost version 1.49 introduced permissions
#if BOOST_VERSION >= 104900
(void)file_name;
lock.unlock ();
#endif
#endif
+#endif
}
///////////////////////////////////////////////////////////////////////////////////////////
int
-pcl::PCDReader::readHeader (const std::string &file_name, pcl::PCLPointCloud2 &cloud,
+pcl::PCDReader::readHeader (std::istream &fs, pcl::PCLPointCloud2 &cloud,
Eigen::Vector4f &origin, Eigen::Quaternionf &orientation,
- int &pcd_version, int &data_type, unsigned int &data_idx, const int offset)
+ int &pcd_version, int &data_type, unsigned int &data_idx)
{
// Default values
data_idx = 0;
// By default, assume that there are _no_ invalid (e.g., NaN) points
//cloud.is_dense = true;
- int nr_points = 0;
- std::ifstream fs;
+ size_t nr_points = 0;
std::string line;
int specified_channel_count = 0;
- if (file_name == "" || !boost::filesystem::exists (file_name))
- {
- PCL_ERROR ("[pcl::PCDReader::readHeader] Could not find file '%s'.\n", file_name.c_str ());
- return (-1);
- }
-
- // Open file in binary mode to avoid problem of
- // std::getline() corrupting the result of ifstream::tellg()
- fs.open (file_name.c_str (), std::ios::binary);
- if (!fs.is_open () || fs.fail ())
- {
- PCL_ERROR ("[pcl::PCDReader::readHeader] Could not open file '%s'! Error : %s\n", file_name.c_str (), strerror(errno));
- fs.close ();
- return (-1);
- }
-
- // Seek at the given offset
- fs.seekg (offset, std::ios::beg);
-
// field_sizes represents the size of one element in a field (e.g., float = 4, char = 1)
// field_counts represents the number of elements in a field (e.g., x = 1, normal_x = 1, fpfh = 33)
std::vector<int> field_sizes, field_counts;
cloud.fields[i].offset = offset;
int col_count;
sstream >> col_count;
+ if (col_count < 1)
+ throw "Invalid COUNT value specified.";
cloud.fields[i].count = col_count;
offset += col_count * field_sizes[i];
}
if (line_type.substr (0, 5) == "WIDTH")
{
sstream >> cloud.width;
+ if (sstream.fail ())
+ throw "Invalid WIDTH value specified.";
if (cloud.point_step != 0)
cloud.row_step = cloud.point_step * cloud.width; // row_step only makes sense for organized datasets
continue;
// Get the number of points
if (line_type.substr (0, 6) == "POINTS")
{
+ if (!cloud.point_step)
+ throw "Number of POINTS specified before COUNT in header!";
sstream >> nr_points;
// Need to allocate: N * point_step
cloud.data.resize (nr_points * cloud.point_step);
catch (const char *exception)
{
PCL_ERROR ("[pcl::PCDReader::readHeader] %s\n", exception);
- fs.close ();
return (-1);
}
if (nr_points == 0)
{
PCL_ERROR ("[pcl::PCDReader::readHeader] No points to read\n");
- fs.close ();
return (-1);
}
if (cloud.width == 0 && nr_points != 0)
{
PCL_ERROR ("[pcl::PCDReader::readHeader] HEIGHT given (%d) but no WIDTH!\n", cloud.height);
- fs.close ();
return (-1);
}
}
- if (int (cloud.width * cloud.height) != nr_points)
+ if (cloud.width * cloud.height != nr_points)
{
PCL_ERROR ("[pcl::PCDReader::readHeader] HEIGHT (%d) x WIDTH (%d) != number of points (%d)\n", cloud.height, cloud.width, nr_points);
- fs.close ();
return (-1);
}
- // Close file
- fs.close ();
-
return (0);
}
///////////////////////////////////////////////////////////////////////////////////////////
int
-pcl::PCDReader::readHeader (const std::string &file_name, pcl::PCLPointCloud2 &cloud, const int offset)
+pcl::PCDReader::readHeader (const std::string &file_name, pcl::PCLPointCloud2 &cloud,
+ Eigen::Vector4f &origin, Eigen::Quaternionf &orientation,
+ int &pcd_version, int &data_type, unsigned int &data_idx, const int offset)
{
- // Default values
- cloud.width = cloud.height = cloud.point_step = cloud.row_step = 0;
- cloud.data.clear ();
-
- // By default, assume that there are _no_ invalid (e.g., NaN) points
- //cloud.is_dense = true;
-
- int nr_points = 0;
- std::ifstream fs;
- std::string line;
-
- int specified_channel_count = 0;
-
if (file_name == "" || !boost::filesystem::exists (file_name))
{
PCL_ERROR ("[pcl::PCDReader::readHeader] Could not find file '%s'.\n", file_name.c_str ());
// Open file in binary mode to avoid problem of
// std::getline() corrupting the result of ifstream::tellg()
+ std::ifstream fs;
fs.open (file_name.c_str (), std::ios::binary);
if (!fs.is_open () || fs.fail ())
{
- PCL_ERROR ("[pcl::PCDReader::readHeader] Could not open file '%s'! Error : %s\n", file_name.c_str (), strerror(errno));
+ PCL_ERROR ("[pcl::PCDReader::readHeader] Could not open file '%s'! Error : %s\n", file_name.c_str (), strerror (errno));
fs.close ();
return (-1);
}
// Seek at the given offset
fs.seekg (offset, std::ios::beg);
- // field_sizes represents the size of one element in a field (e.g., float = 4, char = 1)
- // field_counts represents the number of elements in a field (e.g., x = 1, normal_x = 1, fpfh = 33)
- std::vector<int> field_sizes, field_counts;
- // field_types represents the type of data in a field (e.g., F = float, U = unsigned)
- std::vector<char> field_types;
+ // Delegate parsing to the istream overload.
+ int result = readHeader (fs, cloud, origin, orientation, pcd_version, data_type, data_idx);
+
+ // Close file
+ fs.close ();
+
+ return result;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+int
+pcl::PCDReader::readHeader (const std::string &file_name, pcl::PCLPointCloud2 &cloud, const int offset)
+{
+ Eigen::Vector4f origin = Eigen::Vector4f::Zero ();
+ Eigen::Quaternionf orientation = Eigen::Quaternionf::Identity ();
+ int pcd_version = 0;
+ int data_type = 0;
+ unsigned int data_idx = 0;
+
+ return readHeader (file_name, cloud, origin, orientation, pcd_version, data_type, data_idx, offset);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+int
+pcl::PCDReader::readBodyASCII (std::istream &fs, pcl::PCLPointCloud2 &cloud, int /*pcd_version*/)
+{
+ // Get the number of points the cloud should have
+ unsigned int nr_points = cloud.width * cloud.height;
+
+ // Setting the is_dense property to true by default
+ cloud.is_dense = true;
+
+ unsigned int idx = 0;
+ std::string line;
std::vector<std::string> st;
- // Read the header and fill it in with wonderful values
try
{
- while (!fs.eof ())
+ while (idx < nr_points && !fs.eof ())
{
getline (fs, line);
// Ignore empty lines
// Tokenize the line
boost::trim (line);
boost::split (st, line, boost::is_any_of ("\t\r "), boost::token_compress_on);
-
- std::stringstream sstream (line);
- sstream.imbue (std::locale::classic ());
-
- std::string line_type;
- sstream >> line_type;
-
- // Ignore comments
- if (line_type.substr (0, 1) == "#")
- continue;
-
- // Version numbers are not needed for now, but we are checking to see if they're there
- if (line_type.substr (0, 7) == "VERSION")
- continue;
-
- // Get the field indices (check for COLUMNS too for backwards compatibility)
- if ( (line_type.substr (0, 6) == "FIELDS") || (line_type.substr (0, 7) == "COLUMNS") )
+
+ if (idx >= nr_points)
{
- specified_channel_count = static_cast<int> (st.size () - 1);
-
- // Allocate enough memory to accommodate all fields
- cloud.fields.resize (specified_channel_count);
- for (int i = 0; i < specified_channel_count; ++i)
- {
- std::string col_type = st.at (i + 1);
- cloud.fields[i].name = col_type;
- }
-
- // Default the sizes and the types of each field to float32 to avoid crashes while using older PCD files
- int offset = 0;
- for (int i = 0; i < specified_channel_count; ++i, offset += 4)
- {
- cloud.fields[i].offset = offset;
- cloud.fields[i].datatype = pcl::PCLPointField::FLOAT32;
- cloud.fields[i].count = 1;
- }
- cloud.point_step = offset;
- continue;
+ PCL_WARN ("[pcl::PCDReader::read] input has more points (%d) than advertised (%d)!\n", idx, nr_points);
+ break;
}
- // Get the field sizes
- if (line_type.substr (0, 4) == "SIZE")
+ size_t total = 0;
+ // Copy data
+ for (unsigned int d = 0; d < static_cast<unsigned int> (cloud.fields.size ()); ++d)
{
- specified_channel_count = static_cast<int> (st.size () - 1);
-
- // Allocate enough memory to accommodate all fields
- if (specified_channel_count != static_cast<int> (cloud.fields.size ()))
- throw "The number of elements in <SIZE> differs than the number of elements in <FIELDS>!";
-
- // Resize to accommodate the number of values
- field_sizes.resize (specified_channel_count);
-
- int offset = 0;
- for (int i = 0; i < specified_channel_count; ++i)
+ // Ignore invalid padded dimensions that are inherited from binary data
+ if (cloud.fields[d].name == "_")
{
- int col_type ;
- sstream >> col_type;
- cloud.fields[i].offset = offset; // estimate and save the data offsets
- offset += col_type;
- field_sizes[i] = col_type; // save a temporary copy
- }
- cloud.point_step = offset;
- //if (cloud.width != 0)
- //cloud.row_step = cloud.point_step * cloud.width;
- continue;
- }
-
- // Get the field types
- if (line_type.substr (0, 4) == "TYPE")
- {
- if (field_sizes.empty ())
- throw "TYPE of FIELDS specified before SIZE in header!";
-
- specified_channel_count = static_cast<int> (st.size () - 1);
-
- // Allocate enough memory to accommodate all fields
- if (specified_channel_count != static_cast<int> (cloud.fields.size ()))
- throw "The number of elements in <TYPE> differs than the number of elements in <FIELDS>!";
-
- // Resize to accommodate the number of values
- field_types.resize (specified_channel_count);
-
- for (int i = 0; i < specified_channel_count; ++i)
- {
- field_types[i] = st.at (i + 1).c_str ()[0];
- cloud.fields[i].datatype = static_cast<uint8_t> (getFieldType (field_sizes[i], field_types[i]));
+ total += cloud.fields[d].count; // jump over this many elements in the string token
+ continue;
}
- continue;
- }
-
- // Get the field counts
- if (line_type.substr (0, 5) == "COUNT")
- {
- if (field_sizes.empty () || field_types.empty ())
- throw "COUNT of FIELDS specified before SIZE or TYPE in header!";
-
- specified_channel_count = static_cast<int> (st.size () - 1);
-
- // Allocate enough memory to accommodate all fields
- if (specified_channel_count != static_cast<int> (cloud.fields.size ()))
- throw "The number of elements in <COUNT> differs than the number of elements in <FIELDS>!";
-
- field_counts.resize (specified_channel_count);
-
- int offset = 0;
- for (int i = 0; i < specified_channel_count; ++i)
+ for (unsigned int c = 0; c < cloud.fields[d].count; ++c)
{
- cloud.fields[i].offset = offset;
- int col_count;
- sstream >> col_count;
- cloud.fields[i].count = col_count;
- offset += col_count * field_sizes[i];
+ switch (cloud.fields[d].datatype)
+ {
+ case pcl::PCLPointField::INT8:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT8>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ case pcl::PCLPointField::UINT8:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT8>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ case pcl::PCLPointField::INT16:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT16>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ case pcl::PCLPointField::UINT16:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT16>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ case pcl::PCLPointField::INT32:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT32>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ case pcl::PCLPointField::UINT32:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT32>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ case pcl::PCLPointField::FLOAT32:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::FLOAT32>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ case pcl::PCLPointField::FLOAT64:
+ {
+ copyStringValue<pcl::traits::asType<pcl::PCLPointField::FLOAT64>::type> (
+ st.at (total + c), cloud, idx, d, c);
+ break;
+ }
+ default:
+ PCL_WARN ("[pcl::PCDReader::read] Incorrect field data type specified (%d)!\n",cloud.fields[d].datatype);
+ break;
+ }
}
- // Adjust the offset for count (number of elements)
- cloud.point_step = offset;
- continue;
- }
-
- // Get the width of the data (organized point cloud dataset)
- if (line_type.substr (0, 5) == "WIDTH")
- {
- sstream >> cloud.width;
- if (cloud.point_step != 0)
- cloud.row_step = cloud.point_step * cloud.width; // row_step only makes sense for organized datasets
- continue;
+ total += cloud.fields[d].count; // jump over this many elements in the string token
}
-
- // Get the height of the data (organized point cloud dataset)
- if (line_type.substr (0, 6) == "HEIGHT")
- {
- sstream >> cloud.height;
- continue;
- }
-
- // Check the format of the acquisition viewpoint
- if (line_type.substr (0, 9) == "VIEWPOINT")
- {
- if (st.size () < 8)
- throw "Not enough number of elements in <VIEWPOINT>! Need 7 values (tx ty tz qw qx qy qz).";
- continue;
- }
-
- // Get the number of points
- if (line_type.substr (0, 6) == "POINTS")
- {
- sstream >> nr_points;
- // Need to allocate: N * point_step
- cloud.data.resize (nr_points * cloud.point_step);
- continue;
- }
- break;
+ idx++;
}
}
catch (const char *exception)
{
- PCL_ERROR ("[pcl::PCDReader::readHeader] %s\n", exception);
- fs.close ();
+ PCL_ERROR ("[pcl::PCDReader::read] %s\n", exception);
return (-1);
}
- // Exit early: if no points have been given, there's no sense to read or check anything anymore
- if (nr_points == 0)
+ if (idx != nr_points)
{
- PCL_ERROR ("[pcl::PCDReader::readHeader] No points to read\n");
- fs.close ();
+ PCL_ERROR ("[pcl::PCDReader::read] Number of points read (%d) is different than expected (%d)\n", idx, nr_points);
return (-1);
}
-
- // Compatibility with older PCD file versions
- if (cloud.width == 0 && cloud.height == 0)
- {
- cloud.width = nr_points;
- cloud.height = 1;
- cloud.row_step = cloud.point_step * cloud.width; // row_step only makes sense for organized datasets
- }
- //assert (cloud.row_step != 0); // If row_step = 0, either point_step was not set or width is 0
- // if both height/width are not given, assume an unorganized dataset
- if (cloud.height == 0)
+ return (0);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+int
+pcl::PCDReader::readBodyBinary (const unsigned char *map, pcl::PCLPointCloud2 &cloud,
+ int /*pcd_version*/, bool compressed, unsigned int data_idx)
+{
+ // Setting the is_dense property to true by default
+ cloud.is_dense = true;
+
+ /// ---[ Binary compressed mode only
+ if (compressed)
{
- cloud.height = 1;
- PCL_WARN ("[pcl::PCDReader::readHeader] no HEIGHT given, setting to 1 (unorganized).\n");
- if (cloud.width == 0)
- cloud.width = nr_points;
+ // Uncompress the data first
+ unsigned int compressed_size = 0, uncompressed_size = 0;
+ memcpy (&compressed_size, &map[data_idx + 0], 4);
+ memcpy (&uncompressed_size, &map[data_idx + 4], 4);
+ PCL_DEBUG ("[pcl::PCDReader::read] Read a binary compressed file with %u bytes compressed and %u original.\n", compressed_size, uncompressed_size);
+
+ if (uncompressed_size != cloud.data.size ())
+ {
+ PCL_WARN ("[pcl::PCDReader::read] The estimated cloud.data size (%u) is different than the saved uncompressed value (%u)! Data corruption?\n",
+ cloud.data.size (), uncompressed_size);
+ cloud.data.resize (uncompressed_size);
+ }
+
+ unsigned int data_size = static_cast<unsigned int> (cloud.data.size ());
+ std::vector<char> buf (data_size);
+ // The size of the uncompressed data better be the same as what we stored in the header
+ unsigned int tmp_size = pcl::lzfDecompress (&map[data_idx + 8], compressed_size, &buf[0], data_size);
+ if (tmp_size != uncompressed_size)
+ {
+ PCL_ERROR ("[pcl::PCDReader::read] Size of decompressed lzf data (%u) does not match value stored in PCD header (%u). Errno: %d\n", tmp_size, uncompressed_size, errno);
+ return (-1);
+ }
+
+ // Get the fields sizes
+ std::vector<pcl::PCLPointField> fields (cloud.fields.size ());
+ std::vector<int> fields_sizes (cloud.fields.size ());
+ int nri = 0, fsize = 0;
+ for (size_t i = 0; i < cloud.fields.size (); ++i)
+ {
+ if (cloud.fields[i].name == "_")
+ continue;
+ fields_sizes[nri] = cloud.fields[i].count * pcl::getFieldSize (cloud.fields[i].datatype);
+ fsize += fields_sizes[nri];
+ fields[nri] = cloud.fields[i];
+ ++nri;
+ }
+ fields.resize (nri);
+ fields_sizes.resize (nri);
+
+ // Unpack the xxyyzz to xyz
+ std::vector<char*> pters (fields.size ());
+ int toff = 0;
+ for (size_t i = 0; i < pters.size (); ++i)
+ {
+ pters[i] = &buf[toff];
+ toff += fields_sizes[i] * cloud.width * cloud.height;
+ }
+ // Copy it to the cloud
+ for (size_t i = 0; i < cloud.width * cloud.height; ++i)
+ {
+ for (size_t j = 0; j < pters.size (); ++j)
+ {
+ memcpy (&cloud.data[i * fsize + fields[j].offset], pters[j], fields_sizes[j]);
+ // Increment the pointer
+ pters[j] += fields_sizes[j];
+ }
+ }
+ //memcpy (&cloud.data[0], &buf[0], uncompressed_size);
}
else
+ // Copy the data
+ memcpy (&cloud.data[0], &map[0] + data_idx, cloud.data.size ());
+
+ // Extra checks (not needed for ASCII)
+ int point_size = static_cast<int> (cloud.data.size () / (cloud.height * cloud.width));
+ // Once copied, we need to go over each field and check if it has NaN/Inf values and assign cloud.is_dense to true or false
+ for (uint32_t i = 0; i < cloud.width * cloud.height; ++i)
{
- if (cloud.width == 0 && nr_points != 0)
+ for (unsigned int d = 0; d < static_cast<unsigned int> (cloud.fields.size ()); ++d)
{
- PCL_ERROR ("[pcl::PCDReader::readHeader] HEIGHT given (%d) but no WIDTH!\n", cloud.height);
- fs.close ();
- return (-1);
+ for (uint32_t c = 0; c < cloud.fields[d].count; ++c)
+ {
+ switch (cloud.fields[d].datatype)
+ {
+ case pcl::PCLPointField::INT8:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::INT8>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ case pcl::PCLPointField::UINT8:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::UINT8>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ case pcl::PCLPointField::INT16:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::INT16>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ case pcl::PCLPointField::UINT16:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::UINT16>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ case pcl::PCLPointField::INT32:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::INT32>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ case pcl::PCLPointField::UINT32:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::UINT32>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ case pcl::PCLPointField::FLOAT32:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::FLOAT32>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ case pcl::PCLPointField::FLOAT64:
+ {
+ if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::FLOAT64>::type> (cloud, i, point_size, d, c))
+ cloud.is_dense = false;
+ break;
+ }
+ }
+ }
}
}
- if (int (cloud.width * cloud.height) != nr_points)
- {
- PCL_ERROR ("[pcl::PCDReader::readHeader] HEIGHT (%d) x WIDTH (%d) != number of points (%d)\n", cloud.height, cloud.width, nr_points);
- fs.close ();
- return (-1);
- }
-
- // Close file
- fs.close ();
-
return (0);
}
pcl::console::TicToc tt;
tt.tic ();
+ if (file_name == "" || !boost::filesystem::exists (file_name))
+ {
+ PCL_ERROR ("[pcl::PCDReader::read] Could not find file '%s'.\n", file_name.c_str ());
+ return (-1);
+ }
+
int data_type;
unsigned int data_idx;
if (res < 0)
return (res);
-
- unsigned int idx = 0;
-
- // Get the number of points the cloud should have
- unsigned int nr_points = cloud.width * cloud.height;
-
- // Setting the is_dense property to true by default
- cloud.is_dense = true;
-
- if (file_name == "" || !boost::filesystem::exists (file_name))
- {
- PCL_ERROR ("[pcl::PCDReader::read] Could not find file '%s'.\n", file_name.c_str ());
- return (-1);
- }
// if ascii
if (data_type == 0)
return (-1);
}
- fs.seekg (data_idx);
-
- std::string line;
- std::vector<std::string> st;
+ fs.seekg (data_idx + offset);
// Read the rest of the file
- try
- {
- while (idx < nr_points && !fs.eof ())
- {
- getline (fs, line);
- // Ignore empty lines
- if (line == "")
- continue;
-
- // Tokenize the line
- boost::trim (line);
- boost::split (st, line, boost::is_any_of ("\t\r "), boost::token_compress_on);
-
- if (idx >= nr_points)
- {
- PCL_WARN ("[pcl::PCDReader::read] input file %s has more points (%d) than advertised (%d)!\n", file_name.c_str (), idx, nr_points);
- break;
- }
-
- size_t total = 0;
- // Copy data
- for (unsigned int d = 0; d < static_cast<unsigned int> (cloud.fields.size ()); ++d)
- {
- // Ignore invalid padded dimensions that are inherited from binary data
- if (cloud.fields[d].name == "_")
- {
- total += cloud.fields[d].count; // jump over this many elements in the string token
- continue;
- }
- for (unsigned int c = 0; c < cloud.fields[d].count; ++c)
- {
- switch (cloud.fields[d].datatype)
- {
- case pcl::PCLPointField::INT8:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT8>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- case pcl::PCLPointField::UINT8:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT8>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- case pcl::PCLPointField::INT16:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT16>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- case pcl::PCLPointField::UINT16:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT16>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- case pcl::PCLPointField::INT32:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::INT32>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- case pcl::PCLPointField::UINT32:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::UINT32>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- case pcl::PCLPointField::FLOAT32:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::FLOAT32>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- case pcl::PCLPointField::FLOAT64:
- {
- copyStringValue<pcl::traits::asType<pcl::PCLPointField::FLOAT64>::type> (
- st.at (total + c), cloud, idx, d, c);
- break;
- }
- default:
- PCL_WARN ("[pcl::PCDReader::read] Incorrect field data type specified (%d)!\n",cloud.fields[d].datatype);
- break;
- }
- }
- total += cloud.fields[d].count; // jump over this many elements in the string token
- }
- idx++;
- }
- }
- catch (const char *exception)
- {
- PCL_ERROR ("[pcl::PCDReader::read] %s\n", exception);
- fs.close ();
- return (-1);
- }
+ res = readBodyASCII (fs, cloud, pcd_version);
// Close file
fs.close ();
/// We must re-open the file and read with mmap () for binary
{
// Open for reading
- int fd = pcl_open (file_name.c_str (), O_RDONLY);
+ int fd = io::raw_open (file_name.c_str (), O_RDONLY);
if (fd == -1)
{
PCL_ERROR ("[pcl::PCDReader::read] Failure to open file %s\n", file_name.c_str () );
return (-1);
}
+
+ // Infer file size
+ const size_t file_size = io::raw_lseek (fd, 0, SEEK_END);
+ io::raw_lseek (fd, 0, SEEK_SET);
+
+ size_t mmap_size = offset + data_idx; // ...because we mmap from the start of the file.
+ if (data_type == 2)
+ {
+ // Seek to real start of data.
+ long result = io::raw_lseek (fd, offset + data_idx, SEEK_SET);
+ if (result < 0)
+ {
+ io::raw_close (fd);
+ PCL_ERROR ("[pcl::PCDReader::read] lseek errno: %d strerror: %s\n", errno, strerror (errno));
+ PCL_ERROR ("[pcl::PCDReader::read] Error during lseek ()!\n");
+ return (-1);
+ }
- // Seek at the given offset
- off_t result = pcl_lseek (fd, offset, SEEK_SET);
- if (result < 0)
+ // Read compressed size to compute how much must be mapped
+ unsigned int compressed_size = 0;
+ ssize_t num_read = io::raw_read (fd, &compressed_size, 4);
+ if (num_read < 0)
+ {
+ io::raw_close (fd);
+ PCL_ERROR ("[pcl::PCDReader::read] read errno: %d strerror: %s\n", errno, strerror (errno));
+ PCL_ERROR ("[pcl::PCDReader::read] Error during read()!\n");
+ return (-1);
+ }
+ mmap_size += compressed_size;
+ // Add the 8 bytes used to store the compressed and uncompressed size
+ mmap_size += 8;
+
+ // Reset position
+ io::raw_lseek (fd, 0, SEEK_SET);
+ }
+ else
+ {
+ mmap_size += cloud.data.size ();
+ }
+
+ if (mmap_size > file_size)
{
- pcl_close (fd);
- PCL_ERROR ("[pcl::PCDReader::read] lseek errno: %d strerror: %s\n", errno, strerror (errno));
- PCL_ERROR ("[pcl::PCDReader::read] Error during lseek ()!\n");
+ io::raw_close (fd);
+ PCL_ERROR ("[pcl::PCDReader::read] Corrupted PCD file. The file is smaller than expected!\n");
return (-1);
}
-
- size_t data_size = data_idx + cloud.data.size ();
+
// Prepare the map
#ifdef _WIN32
// As we don't know the real size of data (compressed or not),
HANDLE fm = CreateFileMapping ((HANDLE) _get_osfhandle (fd), NULL, PAGE_READONLY, 0, 0, NULL);
// As we don't know the real size of data (compressed or not),
// we set dwNumberOfBytesToMap = 0 so as to map the whole file
- char *map = static_cast<char*>(MapViewOfFile (fm, FILE_MAP_READ, 0, 0, 0));
+ unsigned char *map = static_cast<unsigned char*> (MapViewOfFile (fm, FILE_MAP_READ, 0, 0, 0));
if (map == NULL)
{
CloseHandle (fm);
- pcl_close (fd);
+ io::raw_close (fd);
PCL_ERROR ("[pcl::PCDReader::read] Error mapping view of file, %s\n", file_name.c_str ());
return (-1);
}
#else
- char *map = static_cast<char*> (mmap (0, data_size, PROT_READ, MAP_SHARED, fd, 0));
- if (map == reinterpret_cast<char*> (-1)) // MAP_FAILED
+ unsigned char *map = static_cast<unsigned char*> (::mmap (0, mmap_size, PROT_READ, MAP_SHARED, fd, 0));
+ if (map == reinterpret_cast<unsigned char*> (-1)) // MAP_FAILED
{
- pcl_close (fd);
+ io::raw_close (fd);
PCL_ERROR ("[pcl::PCDReader::read] Error preparing mmap for binary PCD file.\n");
return (-1);
}
#endif
- /// ---[ Binary compressed mode only
- if (data_type == 2)
- {
- // Uncompress the data first
- unsigned int compressed_size, uncompressed_size;
- memcpy (&compressed_size, &map[data_idx + 0], sizeof (unsigned int));
- memcpy (&uncompressed_size, &map[data_idx + 4], sizeof (unsigned int));
- PCL_DEBUG ("[pcl::PCDReader::read] Read a binary compressed file with %u bytes compressed and %u original.\n", compressed_size, uncompressed_size);
- // For all those weird situations where the compressed data is actually LARGER than the uncompressed one
- // (we really ought to check this in the compressor and copy the original data in those cases)
- if (data_size < compressed_size || uncompressed_size < compressed_size)
- {
- PCL_DEBUG ("[pcl::PCDReader::read] Allocated data size (%lu) or uncompressed size (%lu) smaller than compressed size (%u). Need to remap.\n", data_size, uncompressed_size, compressed_size);
-#ifdef _WIN32
- UnmapViewOfFile (map);
- data_size = compressed_size + data_idx + 8;
- map = static_cast<char*>(MapViewOfFile (fm, FILE_MAP_READ, 0, 0, data_size));
-#else
- munmap (map, data_size);
- data_size = compressed_size + data_idx + 8;
- map = static_cast<char*> (mmap (0, data_size, PROT_READ, MAP_SHARED, fd, 0));
-#endif
- }
-
- if (uncompressed_size != cloud.data.size ())
- {
- PCL_WARN ("[pcl::PCDReader::read] The estimated cloud.data size (%u) is different than the saved uncompressed value (%u)! Data corruption?\n",
- cloud.data.size (), uncompressed_size);
- cloud.data.resize (uncompressed_size);
- }
-
- char *buf = static_cast<char*> (malloc (data_size));
- // The size of the uncompressed data better be the same as what we stored in the header
- unsigned int tmp_size = pcl::lzfDecompress (&map[data_idx + 8], compressed_size, buf, static_cast<unsigned int> (data_size));
- if (tmp_size != uncompressed_size)
- {
- free (buf);
- pcl_close (fd);
- PCL_ERROR ("[pcl::PCDReader::read] Size of decompressed lzf data (%u) does not match value stored in PCD header (%u). Errno: %d\n", tmp_size, uncompressed_size, errno);
- return (-1);
- }
-
- // Get the fields sizes
- std::vector<pcl::PCLPointField> fields (cloud.fields.size ());
- std::vector<int> fields_sizes (cloud.fields.size ());
- int nri = 0, fsize = 0;
- for (size_t i = 0; i < cloud.fields.size (); ++i)
- {
- if (cloud.fields[i].name == "_")
- continue;
- fields_sizes[nri] = cloud.fields[i].count * pcl::getFieldSize (cloud.fields[i].datatype);
- fsize += fields_sizes[nri];
- fields[nri] = cloud.fields[i];
- ++nri;
- }
- fields.resize (nri);
- fields_sizes.resize (nri);
-
- // Unpack the xxyyzz to xyz
- std::vector<char*> pters (fields.size ());
- int toff = 0;
- for (size_t i = 0; i < pters.size (); ++i)
- {
- pters[i] = &buf[toff];
- toff += fields_sizes[i] * cloud.width * cloud.height;
- }
- // Copy it to the cloud
- for (size_t i = 0; i < cloud.width * cloud.height; ++i)
- {
- for (size_t j = 0; j < pters.size (); ++j)
- {
- memcpy (&cloud.data[i * fsize + fields[j].offset], pters[j], fields_sizes[j]);
- // Increment the pointer
- pters[j] += fields_sizes[j];
- }
- }
- //memcpy (&cloud.data[0], &buf[0], uncompressed_size);
-
- free (buf);
- }
- else
- // Copy the data
- memcpy (&cloud.data[0], &map[0] + data_idx, cloud.data.size ());
+ res = readBodyBinary (map, cloud, pcd_version, data_type == 2, offset + data_idx);
// Unmap the pages of memory
#ifdef _WIN32
UnmapViewOfFile (map);
CloseHandle (fm);
#else
- if (munmap (map, data_size) == -1)
+ if (::munmap (map, mmap_size) == -1)
{
- pcl_close (fd);
+ io::raw_close (fd);
PCL_ERROR ("[pcl::PCDReader::read] Munmap failure\n");
return (-1);
}
#endif
- pcl_close (fd);
- }
-
- if ((idx != nr_points) && (data_type == 0))
- {
- PCL_ERROR ("[pcl::PCDReader::read] Number of points read (%d) is different than expected (%d)\n", idx, nr_points);
- return (-1);
- }
-
- // No need to do any extra checks if the data type is ASCII
- if (data_type != 0)
- {
- int point_size = static_cast<int> (cloud.data.size () / (cloud.height * cloud.width));
- // Once copied, we need to go over each field and check if it has NaN/Inf values and assign cloud.is_dense to true or false
- for (uint32_t i = 0; i < cloud.width * cloud.height; ++i)
- {
- for (unsigned int d = 0; d < static_cast<unsigned int> (cloud.fields.size ()); ++d)
- {
- for (uint32_t c = 0; c < cloud.fields[d].count; ++c)
- {
- switch (cloud.fields[d].datatype)
- {
- case pcl::PCLPointField::INT8:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::INT8>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- case pcl::PCLPointField::UINT8:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::UINT8>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- case pcl::PCLPointField::INT16:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::INT16>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- case pcl::PCLPointField::UINT16:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::UINT16>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- case pcl::PCLPointField::INT32:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::INT32>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- case pcl::PCLPointField::UINT32:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::UINT32>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- case pcl::PCLPointField::FLOAT32:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::FLOAT32>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- case pcl::PCLPointField::FLOAT64:
- {
- if (!isValueFinite<pcl::traits::asType<pcl::PCLPointField::FLOAT64>::type>(cloud, i, point_size, d, c))
- cloud.is_dense = false;
- break;
- }
- }
- }
- }
- }
+ io::raw_close (fd);
}
double total_time = tt.toc ();
PCL_DEBUG ("[pcl::PCDReader::read] Loaded %s as a %s cloud in %g ms with %d points. Available dimensions: %s.\n",
file_name.c_str (), cloud.is_dense ? "dense" : "non-dense", total_time,
cloud.width * cloud.height, pcl::getFieldsList (cloud).c_str ());
- return (0);
+ return res;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-std::string
-pcl::PCDWriter::generateHeaderBinaryCompressed (const pcl::PCLPointCloud2 &cloud,
+int
+pcl::PCDWriter::generateHeaderBinaryCompressed (std::ostream &os,
+ const pcl::PCLPointCloud2 &cloud,
const Eigen::Vector4f &origin,
const Eigen::Quaternionf &orientation)
{
- std::ostringstream oss;
- oss.imbue (std::locale::classic ());
+ os.imbue (std::locale::classic ());
- oss << "# .PCD v0.7 - Point Cloud Data file format"
+ os << "# .PCD v0.7 - Point Cloud Data file format"
"\nVERSION 0.7"
"\nFIELDS";
if (fsize > cloud.point_step)
{
PCL_ERROR ("[pcl::PCDWriter::generateHeaderBinaryCompressed] The size of the fields (%d) is larger than point_step (%d)! Something is wrong here...\n", fsize, cloud.point_step);
- return ("");
+ return (-1);
}
std::stringstream field_names, field_types, field_sizes, field_counts;
if (count == 0) count = 1; // check for 0 counts (coming from older converter code)
field_counts << " " << count;
}
- oss << field_names.str ();
- oss << "\nSIZE" << field_sizes.str ()
+ os << field_names.str ();
+ os << "\nSIZE" << field_sizes.str ()
<< "\nTYPE" << field_types.str ()
<< "\nCOUNT" << field_counts.str ();
- oss << "\nWIDTH " << cloud.width << "\nHEIGHT " << cloud.height << "\n";
+ os << "\nWIDTH " << cloud.width << "\nHEIGHT " << cloud.height << "\n";
- oss << "VIEWPOINT " << origin[0] << " " << origin[1] << " " << origin[2] << " " << orientation.w () << " " <<
+ os << "VIEWPOINT " << origin[0] << " " << origin[1] << " " << origin[2] << " " << orientation.w () << " " <<
orientation.x () << " " << orientation.y () << " " << orientation.z () << "\n";
- oss << "POINTS " << cloud.width * cloud.height << "\n";
+ os << "POINTS " << cloud.width * cloud.height << "\n";
- return (oss.str ());
+ return (os ? 0 : -1);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+std::string
+pcl::PCDWriter::generateHeaderBinaryCompressed (const pcl::PCLPointCloud2 &cloud,
+ const Eigen::Vector4f &origin,
+ const Eigen::Quaternionf &orientation)
+{
+ std::ostringstream oss;
+ generateHeaderBinaryCompressed (oss, cloud, origin, orientation);
+ return oss.str ();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
setLockingPermissions (file_name, file_lock);
#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ int fd = io::raw_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0)
{
PCL_ERROR ("[pcl::PCDWriter::writeBinary] Error during open (%s)!\n", file_name.c_str());
setLockingPermissions (file_name, file_lock);
// Stretch the file size to the size of the data
- off_t result = pcl_lseek (fd, getpagesize () + cloud.data.size () - 1, SEEK_SET);
+ long result = io::raw_lseek (fd, getpagesize () + cloud.data.size () - 1, SEEK_SET);
if (result < 0)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinary] lseek errno: %d strerror: %s\n", errno, strerror (errno));
PCL_ERROR ("[pcl::PCDWriter::writeBinary] Error during lseek ()!\n");
result = static_cast<int> (::write (fd, "", 1));
if (result != 1)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinary] Error during write ()!\n");
return (-1);
char *map = static_cast<char*> (mmap (0, static_cast<size_t> (data_idx + cloud.data.size ()), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
if (map == reinterpret_cast<char*> (-1)) // MAP_FAILED
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinary] Error during mmap ()!\n");
return (-1);
#ifdef _WIN32
UnmapViewOfFile (map);
#else
- if (munmap (map, static_cast<size_t> (data_idx + cloud.data.size ())) == -1)
+ if (::munmap (map, static_cast<size_t> (data_idx + cloud.data.size ())) == -1)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinary] Error during munmap ()!\n");
return (-1);
#ifdef _WIN32
CloseHandle(h_native_file);
#else
- pcl_close (fd);
+ io::raw_close (fd);
#endif
resetLockingPermissions (file_name, file_lock);
return (0);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int
-pcl::PCDWriter::writeBinaryCompressed (const std::string &file_name, const pcl::PCLPointCloud2 &cloud,
+pcl::PCDWriter::writeBinaryCompressed (std::ostream &os, const pcl::PCLPointCloud2 &cloud,
const Eigen::Vector4f &origin, const Eigen::Quaternionf &orientation)
{
if (cloud.data.empty ())
PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Input point cloud has no data!\n");
return (-1);
}
- std::streamoff data_idx = 0;
- std::ostringstream oss;
- oss.imbue (std::locale::classic ());
- oss << generateHeaderBinaryCompressed (cloud, origin, orientation) << "DATA binary_compressed\n";
- oss.flush ();
- data_idx = oss.tellp ();
-
-#ifdef _WIN32
- HANDLE h_native_file = CreateFile (file_name.c_str (), GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
- if (h_native_file == INVALID_HANDLE_VALUE)
- {
- PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during CreateFile (%s)!\n", file_name.c_str ());
- return (-1);
- }
-#else
- int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
- if (fd < 0)
+ if (generateHeaderBinaryCompressed (os, cloud, origin, orientation))
{
- PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during open (%s)!\n", file_name.c_str());
return (-1);
}
-#endif
- // Mandatory lock file
- boost::interprocess::file_lock file_lock;
- setLockingPermissions (file_name, file_lock);
size_t fsize = 0;
size_t data_size = 0;
// Compute the size of data
data_size = cloud.width * cloud.height * fsize;
+ // If the data is too large the two 32 bit integers used to store the
+ // compressed and uncompressed size will overflow.
+ if (data_size * 3 / 2 > std::numeric_limits<uint32_t>::max ())
+ {
+ PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] The input data exceeds the maximum size for compressed version 0.7 pcds of %l bytes.\n",
+ static_cast<size_t> (std::numeric_limits<uint32_t>::max ()) * 2 / 3);
+ return (-2);
+ }
+
//////////////////////////////////////////////////////////////////////
// Empty array holding only the valid data
// data_size = nr_points * point_size
// = nr_points * (sizeof_field_1 + sizeof_field_2 + ... sizeof_field_n)
// = sizeof_field_1 * nr_points + sizeof_field_2 * nr_points + ... sizeof_field_n * nr_points
- char *only_valid_data = static_cast<char*> (malloc (data_size));
+ std::vector<char> only_valid_data (data_size);
// Convert the XYZRGBXYZRGB structure to XXYYZZRGBRGB to aid compression. For
// this, we need a vector of fields.size () (4 in this case), which points to
}
}
- char* temp_buf = static_cast<char*> (malloc (static_cast<size_t> (static_cast<float> (data_size) * 1.5f + 8.0f)));
+ std::vector<char> temp_buf (data_size * 3 / 2 + 8);
// Compress the valid data
- unsigned int compressed_size = pcl::lzfCompress (only_valid_data,
+ unsigned int compressed_size = pcl::lzfCompress (&only_valid_data.front (),
static_cast<unsigned int> (data_size),
&temp_buf[8],
- static_cast<unsigned int> (static_cast<float> (data_size) * 1.5f));
- unsigned int compressed_final_size = 0;
+ data_size * 3 / 2);
// Was the compression successful?
- if (compressed_size)
+ if (compressed_size == 0)
{
- char *header = &temp_buf[0];
- memcpy (&header[0], &compressed_size, sizeof (unsigned int));
- memcpy (&header[4], &data_size, sizeof (unsigned int));
- data_size = compressed_size + 8;
- compressed_final_size = static_cast<unsigned int> (data_size + data_idx);
+ return (-1);
}
- else
+
+ memcpy (&temp_buf[0], &compressed_size, 4);
+ memcpy (&temp_buf[4], &data_size, 4);
+ temp_buf.resize (compressed_size + 8);
+
+ os.imbue (std::locale::classic ());
+ os << "DATA binary_compressed\n";
+ std::copy (temp_buf.begin (), temp_buf.end (), std::ostream_iterator<char> (os));
+ os.flush ();
+
+ return (os ? 0 : -1);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+int
+pcl::PCDWriter::writeBinaryCompressed (const std::string &file_name, const pcl::PCLPointCloud2 &cloud,
+ const Eigen::Vector4f &origin, const Eigen::Quaternionf &orientation)
+{
+ // Format output
+ std::ostringstream oss;
+ int status = writeBinaryCompressed (oss, cloud, origin, orientation);
+ if (status)
{
-#ifndef _WIN32
- pcl_close (fd);
-#endif
- resetLockingPermissions (file_name, file_lock);
throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during compression!");
+ return status;
+ }
+ std::string ostr = oss.str ();
+
+#ifdef _WIN32
+ HANDLE h_native_file = CreateFile (file_name.c_str (), GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (h_native_file == INVALID_HANDLE_VALUE)
+ {
+ PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during CreateFile (%s)!\n", file_name.c_str ());
+ return (-1);
+ }
+#else
+ int fd = io::raw_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd < 0)
+ {
+ PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during open (%s)!\n", file_name.c_str ());
return (-1);
}
+#endif
+ // Mandatory lock file
+ boost::interprocess::file_lock file_lock;
+ setLockingPermissions (file_name, file_lock);
#ifndef _WIN32
// Stretch the file size to the size of the data
- off_t result = pcl_lseek (fd, getpagesize () + data_size - 1, SEEK_SET);
+ size_t page_size = getpagesize ();
+ size_t size_pages = ostr.size () / page_size;
+ size_t partial_pages = (size_pages * page_size < ostr.size ()) ? 1 : 0;
+ long result = io::raw_lseek (fd, (size_pages + partial_pages) * page_size - 1, SEEK_SET);
if (result < 0)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] lseek errno: %d strerror: %s\n", errno, strerror (errno));
PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during lseek ()!\n");
result = static_cast<int> (::write (fd, "", 1));
if (result != 1)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during write ()!\n");
return (-1);
// Prepare the map
#ifdef _WIN32
- HANDLE fm = CreateFileMapping (h_native_file, NULL, PAGE_READWRITE, 0, compressed_final_size, NULL);
- char *map = static_cast<char*> (MapViewOfFile (fm, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, compressed_final_size));
+ HANDLE fm = CreateFileMapping (h_native_file, NULL, PAGE_READWRITE, 0, ostr.size (), NULL);
+ char *map = static_cast<char*> (MapViewOfFile (fm, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, ostr.size ()));
CloseHandle (fm);
#else
- char *map = static_cast<char*> (mmap (0, compressed_final_size, PROT_WRITE, MAP_SHARED, fd, 0));
+ char *map = static_cast<char*> (::mmap (0, ostr.size (), PROT_WRITE, MAP_SHARED, fd, 0));
if (map == reinterpret_cast<char*> (-1)) // MAP_FAILED
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during mmap ()!\n");
return (-1);
}
#endif
- // copy header
- memcpy (&map[0], oss.str ().c_str (), static_cast<size_t> (data_idx));
- // Copy the compressed data
- memcpy (&map[data_idx], temp_buf, data_size);
+ // copy header + compressed data
+ memcpy (map, ostr.data (), ostr.size ());
#ifndef _WIN32
// If the user set the synchronization flag on, call msync
if (map_synchronization_)
- msync (map, compressed_final_size, MS_SYNC);
+ msync (map, ostr.size (), MS_SYNC);
#endif
// Unmap the pages of memory
#ifdef _WIN32
UnmapViewOfFile (map);
#else
- if (munmap (map, (compressed_final_size)) == -1)
+ if (::munmap (map, ostr.size ()) == -1)
{
- pcl_close (fd);
+ io::raw_close (fd);
resetLockingPermissions (file_name, file_lock);
PCL_ERROR ("[pcl::PCDWriter::writeBinaryCompressed] Error during munmap ()!\n");
return (-1);
#ifdef _WIN32
CloseHandle (h_native_file);
#else
- pcl_close (fd);
+ io::raw_close (fd);
#endif
resetLockingPermissions (file_name, file_lock);
- free (only_valid_data);
- free (temp_buf);
return (0);
}
{
if (error_callback_)
{
- error_callback_ (line_number_, "parse error: unkonwn scalar type");
+ error_callback_ (line_number_, "parse error: unknown scalar type");
}
return false;
}
cloud_->width = static_cast<uint32_t> (count);
cloud_->height = 1;
}
- cloud_->is_dense = false;
+ cloud_->is_dense = true;
cloud_->point_step = 0;
cloud_->row_step = 0;
vertex_count_ = 0;
bool
pcl::PLYReader::endHeaderCallback ()
{
- cloud_->data.resize (cloud_->point_step * cloud_->width * cloud_->height);
- return (cloud_->data.size () == cloud_->point_step * cloud_->width * cloud_->height);
+ cloud_->data.resize (static_cast<size_t>(cloud_->point_step) * cloud_->width * cloud_->height);
+ return (true);
}
template<typename Scalar> void
template<typename Scalar> void
PLYReader::vertexScalarPropertyCallback (Scalar value)
{
+ if (!pcl_isfinite (value))
+ cloud_->is_dense = false;
+
memcpy (&cloud_->data[vertex_count_ * cloud_->point_step + vertex_offset_before_],
&value,
sizeof (Scalar));
template<typename ContentType> void
PLYReader::vertexListPropertyContentCallback (ContentType value)
{
+ if (!pcl_isfinite (value))
+ cloud_->is_dense = false;
+
memcpy (&cloud_->data[vertex_count_ * cloud_->point_step + vertex_offset_before_],
&value,
sizeof (ContentType));
void
pcl::PLYReader::vertexColorCallback (const std::string& color_name, pcl::io::ply::uint8 color)
{
- static int32_t r, g, b;
if ((color_name == "red") || (color_name == "diffuse_red"))
{
- r = int32_t (color);
+ r_ = int32_t (color);
rgb_offset_before_ = vertex_offset_before_;
}
if ((color_name == "green") || (color_name == "diffuse_green"))
{
- g = int32_t (color);
+ g_ = int32_t (color);
}
if ((color_name == "blue") || (color_name == "diffuse_blue"))
{
- b = int32_t (color);
- int32_t rgb = r << 16 | g << 8 | b;
+ b_ = int32_t (color);
+ int32_t rgb = r_ << 16 | g_ << 8 | b_;
memcpy (&cloud_->data[vertex_count_ * cloud_->point_step + rgb_offset_before_],
&rgb,
sizeof (pcl::io::ply::float32));
void
pcl::PLYReader::vertexAlphaCallback (pcl::io::ply::uint8 alpha)
{
- static uint32_t a, rgba;
- a = uint32_t (alpha);
+ a_ = uint32_t (alpha);
// get anscient rgb value and store it in rgba
- memcpy (&rgba,
+ memcpy (&rgba_,
&cloud_->data[vertex_count_ * cloud_->point_step + rgb_offset_before_],
sizeof (pcl::io::ply::float32));
// append alpha
- rgba = rgba | a << 24;
+ rgba_ = rgba_ | a_ << 24;
// put rgba back
memcpy (&cloud_->data[vertex_count_ * cloud_->point_step + rgb_offset_before_],
- &rgba,
+ &rgba_,
sizeof (uint32_t));
}
bool
pcl::PLYReader::parse (const std::string& istream_filename)
{
- pcl::io::ply::ply_parser::flags_type ply_parser_flags = 0;
- pcl::io::ply::ply_parser ply_parser (ply_parser_flags);
+ pcl::io::ply::ply_parser ply_parser;
ply_parser.info_callback (boost::bind (&pcl::PLYReader::infoCallback, this, boost::ref (istream_filename), _1, _2));
ply_parser.warning_callback (boost::bind (&pcl::PLYReader::warningCallback, this, boost::ref (istream_filename), _1, _2));
PCL_ERROR ("[pcl::io::savePNGFile] Unsupported image encoding \"%s\".\n", image.encoding.c_str ());
}
}
-
-void
-pcl::io::savePNGFile (const std::string& file_name, const pcl::PointCloud<pcl::PointXYZL>& cloud)
-{
- std::vector<unsigned short> data(cloud.width * cloud.height);
- for (size_t i = 0; i < cloud.points.size (); ++i)
- {
- data[i] = static_cast<unsigned short> (cloud.points[i].label);
- }
- saveShortPNGFile(file_name, &data[0], cloud.width, cloud.height,1);
-}
-
frequency_.event ();
fps_mutex_.unlock ();
- /* We preform the following steps to convert received data into point clouds:
+ /* We perform the following steps to convert received data into point clouds:
*
* 1. Push depth image to the depth buffer
* 2. Pull filtered depth image from the depth buffer
using boost::asio::ip::udp;
-#define VLP_MAX_NUM_LASERS 16
-#define VLP_DUAL_MODE 0x39
-
/////////////////////////////////////////////////////////////////////////////
pcl::VLPGrabber::VLPGrabber (const std::string& pcapFile) :
HDLGrabber ("", pcapFile)
{
+ initializeLaserMapping ();
loadVLP16Corrections ();
}
/////////////////////////////////////////////////////////////////////////////
pcl::VLPGrabber::VLPGrabber (const boost::asio::ip::address& ipAddress,
- const unsigned short int port) :
+ const uint16_t port) :
HDLGrabber (ipAddress, port)
{
+ initializeLaserMapping ();
loadVLP16Corrections ();
}
/////////////////////////////////////////////////////////////////////////////
void
-pcl::VLPGrabber::loadVLP16Corrections ()
+pcl::VLPGrabber::initializeLaserMapping ()
{
- double vlp16_vertical_corrections[] = { -15, 1, -13, 3, -11, 5, -9, 7, -7, 9, -5, 11, -3, 13, -1, 15
+ for (uint8_t i = 0; i < VLP_MAX_NUM_LASERS / 2u; i++)
+ {
+ laser_rgb_mapping_[i * 2].b = static_cast<uint8_t> (i * 6 + 64);
+ laser_rgb_mapping_[i * 2 + 1].b = static_cast<uint8_t> ((i + 16) * 6 + 64);
+ }
+}
- };
- for (int i = 0; i < VLP_MAX_NUM_LASERS; i++)
+/////////////////////////////////////////////////////////////////////////////
+void
+pcl::VLPGrabber::loadVLP16Corrections ()
+{
+ double vlp16_vertical_corrections[] = { -15, 1, -13, 3, -11, 5, -9, 7, -7, 9, -5, 11, -3, 13, -1, 15 };
+ for (uint8_t i = 0; i < VLP_MAX_NUM_LASERS; i++)
{
HDLGrabber::laser_corrections_[i].azimuthCorrection = 0.0;
HDLGrabber::laser_corrections_[i].distanceCorrection = 0.0;
double interpolated_azimuth_delta;
- int index = 1;
+ uint8_t index = 1;
if (dataPacket->mode == VLP_DUAL_MODE)
{
index = 2;
}
if (dataPacket->firingData[index].rotationalPosition < dataPacket->firingData[0].rotationalPosition)
{
- interpolated_azimuth_delta = ( (dataPacket->firingData[index].rotationalPosition + 36000) - dataPacket->firingData[0].rotationalPosition) / 2.0;
+ interpolated_azimuth_delta = ((dataPacket->firingData[index].rotationalPosition + 36000) - dataPacket->firingData[0].rotationalPosition) / 2.0;
}
else
{
interpolated_azimuth_delta = (dataPacket->firingData[index].rotationalPosition - dataPacket->firingData[0].rotationalPosition) / 2.0;
}
- for (int i = 0; i < HDL_FIRING_PER_PKT; ++i)
+ for (uint8_t i = 0; i < HDL_FIRING_PER_PKT; ++i)
{
HDLFiringData firing_data = dataPacket->firingData[i];
- for (int j = 0; j < HDL_LASER_PER_FIRING; j++)
+ for (uint8_t j = 0; j < HDL_LASER_PER_FIRING; j++)
{
double current_azimuth = firing_data.rotationalPosition;
if (j >= VLP_MAX_NUM_LASERS)
{
if (current_sweep_xyz_->size () > 0)
{
- current_sweep_xyz_->is_dense = current_sweep_xyzi_->is_dense = false;
+ current_sweep_xyz_->is_dense = current_sweep_xyzrgba_->is_dense = current_sweep_xyzi_->is_dense = false;
current_sweep_xyz_->header.stamp = velodyne_time;
+ current_sweep_xyzrgba_->header.stamp = velodyne_time;
current_sweep_xyzi_->header.stamp = velodyne_time;
current_sweep_xyz_->header.seq = sweep_counter;
+ current_sweep_xyzrgba_->header.seq = sweep_counter;
current_sweep_xyzi_->header.seq = sweep_counter;
sweep_counter++;
HDLGrabber::fireCurrentSweep ();
}
current_sweep_xyz_.reset (new pcl::PointCloud<pcl::PointXYZ> ());
+ current_sweep_xyzrgba_.reset (new pcl::PointCloud<pcl::PointXYZRGBA> ());
current_sweep_xyzi_.reset (new pcl::PointCloud<pcl::PointXYZI> ());
}
PointXYZ xyz;
PointXYZI xyzi;
+ PointXYZRGBA xyzrgba;
PointXYZ dual_xyz;
PointXYZI dual_xyzi;
+ PointXYZRGBA dual_xyzrgba;
HDLGrabber::computeXYZI (xyzi, current_azimuth, firing_data.laserReturns[j], laser_corrections_[j % VLP_MAX_NUM_LASERS]);
- xyz.x = xyzi.x;
- xyz.y = xyzi.y;
- xyz.z = xyzi.z;
+ xyz.x = xyzrgba.x = xyzi.x;
+ xyz.y = xyzrgba.y = xyzi.y;
+ xyz.z = xyzrgba.z = xyzi.z;
+
+ xyzrgba.rgba = laser_rgb_mapping_[j % VLP_MAX_NUM_LASERS].rgba;
if (dataPacket->mode == VLP_DUAL_MODE)
{
HDLGrabber::computeXYZI (dual_xyzi, current_azimuth, dataPacket->firingData[i + 1].laserReturns[j], laser_corrections_[j % VLP_MAX_NUM_LASERS]);
- dual_xyz.x = dual_xyzi.x;
- dual_xyz.y = dual_xyzi.y;
- dual_xyz.z = dual_xyzi.z;
+ dual_xyz.x = dual_xyzrgba.x = dual_xyzi.x;
+ dual_xyz.y = dual_xyzrgba.y = dual_xyzi.y;
+ dual_xyz.z = dual_xyzrgba.z = dual_xyzi.z;
+ dual_xyzrgba.rgba = laser_rgb_mapping_[j % VLP_MAX_NUM_LASERS].rgba;
}
if (! (pcl_isnan (xyz.x) || pcl_isnan (xyz.y) || pcl_isnan (xyz.z)))
{
current_sweep_xyz_->push_back (xyz);
+ current_sweep_xyzrgba_->push_back (xyzrgba);
current_sweep_xyzi_->push_back (xyzi);
last_azimuth_ = current_azimuth;
}
if (dataPacket->mode == VLP_DUAL_MODE)
{
- if ( (dual_xyz.x != xyz.x || dual_xyz.y != xyz.y || dual_xyz.z != xyz.z)
+ if ((dual_xyz.x != xyz.x || dual_xyz.y != xyz.y || dual_xyz.z != xyz.z)
&& ! (pcl_isnan (dual_xyz.x) || pcl_isnan (dual_xyz.y) || pcl_isnan (dual_xyz.z)))
{
current_sweep_xyz_->push_back (dual_xyz);
+ current_sweep_xyzrgba_->push_back (dual_xyzrgba);
current_sweep_xyzi_->push_back (dual_xyzi);
}
}
return (std::string ("Velodyne LiDAR (VLP) Grabber"));
}
+/////////////////////////////////////////////////////////////////////////////
+void
+pcl::VLPGrabber::setLaserColorRGB (const pcl::RGB& color,
+ const uint8_t laserNumber)
+{
+ if (laserNumber >= VLP_MAX_NUM_LASERS)
+ return;
+
+ laser_rgb_mapping_[laserNumber] = color;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+uint8_t
+pcl::VLPGrabber::getMaximumNumberOfLasers () const
+{
+ return (VLP_MAX_NUM_LASERS);
+}
// Then the color information, if any
vtkUnsignedCharArray* poly_colors = NULL;
if (poly_data->GetPointData() != NULL)
+ {
poly_colors = vtkUnsignedCharArray::SafeDownCast (poly_data->GetPointData ()->GetScalars ("Colors"));
- // some applications do not save the name of scalars (including PCL's native vtk_io)
- if (!poly_colors && poly_data->GetPointData () != NULL)
- poly_colors = vtkUnsignedCharArray::SafeDownCast (poly_data->GetPointData ()->GetScalars ("scalars"));
+ // some applications do not save the name of scalars (including PCL's native vtk_io)
+ if (!poly_colors)
+ poly_colors = vtkUnsignedCharArray::SafeDownCast (poly_data->GetPointData ()->GetScalars ("scalars"));
+
+ if (!poly_colors)
+ poly_colors = vtkUnsignedCharArray::SafeDownCast (poly_data->GetPointData ()->GetScalars ("RGB"));
- if (!poly_colors && poly_data->GetPointData () != NULL)
- poly_colors = vtkUnsignedCharArray::SafeDownCast (poly_data->GetPointData ()->GetScalars ("RGB"));
+ if (!poly_colors)
+ poly_colors = vtkUnsignedCharArray::SafeDownCast (poly_data->GetPointData ()->GetScalars ("RGBA"));
+ }
- // TODO: currently only handles rgb values with 3 components
- if (poly_colors && (poly_colors->GetNumberOfComponents () == 3))
+ if (poly_colors && (poly_colors->GetNumberOfComponents () == 3 || poly_colors->GetNumberOfComponents () == 4))
{
pcl::PointCloud<pcl::RGB>::Ptr rgb_cloud (new pcl::PointCloud<pcl::RGB> ());
rgb_cloud->points.resize (nr_points);
rgb_cloud->height = 1;
rgb_cloud->is_dense = true;
- unsigned char point_color[3];
+ unsigned char point_color[4] = {0, 0, 0, 255};
for (vtkIdType i = 0; i < mesh_points->GetNumberOfPoints (); i++)
{
poly_colors->GetTupleValue (i, &point_color[0]);
- rgb_cloud->points[i].r = point_color[0];
- rgb_cloud->points[i].g = point_color[1];
- rgb_cloud->points[i].b = point_color[2];
+ // individual component copy due to different memory layout
+ (*rgb_cloud)[i].r = point_color[0];
+ (*rgb_cloud)[i].g = point_color[1];
+ (*rgb_cloud)[i].b = point_color[2];
+ (*rgb_cloud)[i].a = point_color[3];
}
pcl::PCLPointCloud2 rgb_cloud2;
* @param argv[in]
*/
void
-displayHelp (int argc,
+displayHelp (int,
char** argv)
{
PCL_INFO ("\nUsage: %s [OPTION] SOURCE DEST\n", argv[0]);
{
boost::mutex::scoped_lock io_lock (io_mutex);
- print_info ("Writing remaing %ld clouds in the buffer to disk...\n", buf_.getSize ());
+ print_info ("Writing remaining %ld clouds in the buffer to disk...\n", buf_.getSize ());
}
while (!buf_.isEmpty ())
writeToDisk (buf_.getFront ());
bool
ply_to_obj_converter::convert (std::istream&, const std::string& istream_filename, std::ostream& ostream, const std::string&)
{
- pcl::io::ply::ply_parser::flags_type ply_parser_flags = 0;
- pcl::io::ply::ply_parser ply_parser (ply_parser_flags);
+ pcl::io::ply::ply_parser ply_parser;
ply_parser.info_callback (boost::bind (&ply_to_obj_converter::info_callback, this, boost::ref (istream_filename), _1, _2));
ply_parser.warning_callback (boost::bind (&ply_to_obj_converter::warning_callback, this, boost::ref (istream_filename), _1, _2));
bool
ply_to_ply_converter::convert (const std::string &ifilename, std::istream&, std::ostream& ostream)
{
- pcl::io::ply::ply_parser::flags_type ply_parser_flags = 0;
-
- pcl::io::ply::ply_parser ply_parser(ply_parser_flags);
+ pcl::io::ply::ply_parser ply_parser;
ply_parser.info_callback(boost::bind(&ply_to_ply_converter::info_callback, this, boost::ref(ifilename), _1, _2));
ply_parser.warning_callback(boost::bind(&ply_to_ply_converter::warning_callback, this, boost::ref(ifilename), _1, _2));
bool
ply_to_raw_converter::convert (std::istream&, const std::string& istream_filename, std::ostream& ostream, const std::string&)
{
- pcl::io::ply::ply_parser::flags_type ply_parser_flags = 0;
- pcl::io::ply::ply_parser ply_parser (ply_parser_flags);
+ pcl::io::ply::ply_parser ply_parser;
ply_parser.info_callback (boost::bind (&ply_to_raw_converter::info_callback, this, boost::ref (istream_filename), _1, _2));
ply_parser.warning_callback (boost::bind (&ply_to_raw_converter::warning_callback, this, boost::ref (istream_filename), _1, _2));
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename Dist>
-pcl::KdTreeFLANN<PointT, Dist>::KdTreeFLANN (const KdTreeFLANN<PointT> &k)
+pcl::KdTreeFLANN<PointT, Dist>::KdTreeFLANN (const KdTreeFLANN<PointT, Dist> &k)
: pcl::KdTree<PointT> (false)
, flann_index_ (), cloud_ ()
, index_mapping_ (), identity_mapping_ (false)
typedef ::flann::Index<Dist> FLANNIndex;
// Boost shared pointers
- typedef boost::shared_ptr<KdTreeFLANN<PointT> > Ptr;
- typedef boost::shared_ptr<const KdTreeFLANN<PointT> > ConstPtr;
+ typedef boost::shared_ptr<KdTreeFLANN<PointT, Dist> > Ptr;
+ typedef boost::shared_ptr<const KdTreeFLANN<PointT, Dist> > ConstPtr;
/** \brief Default Constructor for KdTreeFLANN.
* \param[in] sorted set to true if the application that the tree will be used for requires sorted nearest neighbor indices (default). False otherwise.
/** \brief Copy constructor
* \param[in] k the tree to copy into this
*/
- KdTreeFLANN (const KdTreeFLANN<PointT> &k);
+ KdTreeFLANN (const KdTreeFLANN<PointT, Dist> &k);
/** \brief Copy operator
* \param[in] k the tree to copy into this
*/
- inline KdTreeFLANN<PointT>&
- operator = (const KdTreeFLANN<PointT>& k)
+ inline KdTreeFLANN<PointT, Dist>&
+ operator = (const KdTreeFLANN<PointT, Dist>& k)
{
KdTree<PointT>::operator=(k);
flann_index_ = k.flann_index_;
void
setSortedResults (bool sorted);
- inline Ptr makeShared () { return Ptr (new KdTreeFLANN<PointT> (*this)); }
+ inline Ptr makeShared () { return Ptr (new KdTreeFLANN<PointT, Dist> (*this)); }
/** \brief Destructor for KdTreeFLANN.
* Deletes all allocated data arrays and destroys the kd-tree structures.
src/susan.cpp
src/iss_3d.cpp
src/brisk_2d.cpp
+ src/trajkovic_2d.cpp
+ src/trajkovic_3d.cpp
)
set(incs
"include/pcl/${SUBSYS_NAME}/keypoint.h"
"include/pcl/${SUBSYS_NAME}/susan.h"
"include/pcl/${SUBSYS_NAME}/iss_3d.h"
"include/pcl/${SUBSYS_NAME}/brisk_2d.h"
+ "include/pcl/${SUBSYS_NAME}/trajkovic_2d.h"
+ "include/pcl/${SUBSYS_NAME}/trajkovic_3d.h"
)
set(impl_incs
"include/pcl/${SUBSYS_NAME}/impl/keypoint.hpp"
"include/pcl/${SUBSYS_NAME}/impl/susan.hpp"
"include/pcl/${SUBSYS_NAME}/impl/iss_3d.hpp"
"include/pcl/${SUBSYS_NAME}/impl/brisk_2d.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/trajkovic_2d.hpp"
+ "include/pcl/${SUBSYS_NAME}/impl/trajkovic_3d.hpp"
)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
nr_max_keypoints_ = nr_max_keypoints;
}
- /** \brief Get the maximum nuber of keypoints to return, as set by the user. */
+ /** \brief Get the maximum number of keypoints to return, as set by the user. */
inline unsigned int
getMaxKeypoints ()
{
nr_max_keypoints_ = nr_max_keypoints;
}
- /** \brief Get the maximum nuber of keypoints to return, as set by the user. */
+ /** \brief Get the maximum number of keypoints to return, as set by the user. */
inline unsigned int
getMaxKeypoints ()
{
bool refine_;
/// non maximas suppression
bool nonmax_;
- /// cornerness computation methode
+ /// cornerness computation method
ResponseMethod method_;
/// number of threads to be used
unsigned int threads_;
#ifdef _OPENMP
#pragma omp parallel for shared (output) num_threads (threads_)
#endif
- for (size_t i = 0; i < indices.size (); ++i)
+ for (int i = 0; i < static_cast<int>(indices.size ()); ++i)
{
- int idx = indices[i];
+ int idx = indices[static_cast<size_t>(i)];
if ((response_->points[idx] < second_threshold_) || occupency_map[idx])
continue;
namespace pcl
{
- /** \brief SUSANKeypoint implements a RGB-D extension of the SUSAN detector inluding normal
+ /** \brief SUSANKeypoint implements a RGB-D extension of the SUSAN detector including normal
* directions variation in top of intensity variation.
* It is different from Harris in that it exploits normals directly so it is faster.
* Original paper "SUSAN — A New Approach to Low Level Image Processing", Smith,
if(build)
set(incs
- "include/pcl/${SUBSYS_NAME}/dt/decision_forest.h"
- "include/pcl/${SUBSYS_NAME}/dt/decision_forest_evaluator.h"
- "include/pcl/${SUBSYS_NAME}/dt/decision_forest_trainer.h"
- "include/pcl/${SUBSYS_NAME}/dt/decision_tree.h"
- "include/pcl/${SUBSYS_NAME}/dt/decision_tree_evaluator.h"
- "include/pcl/${SUBSYS_NAME}/dt/decision_tree_trainer.h"
- "include/pcl/${SUBSYS_NAME}/dt/decision_tree_data_provider.h"
- "include/pcl/${SUBSYS_NAME}/ferns/fern.h"
- "include/pcl/${SUBSYS_NAME}/ferns/fern_evaluator.h"
- "include/pcl/${SUBSYS_NAME}/ferns/fern_trainer.h"
"include/pcl/${SUBSYS_NAME}/branch_estimator.h"
"include/pcl/${SUBSYS_NAME}/feature_handler.h"
"include/pcl/${SUBSYS_NAME}/multi_channel_2d_comparison_feature.h"
"include/pcl/${SUBSYS_NAME}/kmeans.h"
)
- set(impl_incs
+ set(dt_incs
+ "include/pcl/${SUBSYS_NAME}/dt/decision_forest.h"
+ "include/pcl/${SUBSYS_NAME}/dt/decision_forest_evaluator.h"
+ "include/pcl/${SUBSYS_NAME}/dt/decision_forest_trainer.h"
+ "include/pcl/${SUBSYS_NAME}/dt/decision_tree.h"
+ "include/pcl/${SUBSYS_NAME}/dt/decision_tree_evaluator.h"
+ "include/pcl/${SUBSYS_NAME}/dt/decision_tree_trainer.h"
+ "include/pcl/${SUBSYS_NAME}/dt/decision_tree_data_provider.h"
+ )
+
+ set(ferns_incs
+ "include/pcl/${SUBSYS_NAME}/ferns/fern.h"
+ "include/pcl/${SUBSYS_NAME}/ferns/fern_evaluator.h"
+ "include/pcl/${SUBSYS_NAME}/ferns/fern_trainer.h"
+ )
+
+ set(dt_impl_incs
"include/pcl/${SUBSYS_NAME}/impl/dt/decision_forest_evaluator.hpp"
"include/pcl/${SUBSYS_NAME}/impl/dt/decision_forest_trainer.hpp"
"include/pcl/${SUBSYS_NAME}/impl/dt/decision_tree_evaluator.hpp"
"include/pcl/${SUBSYS_NAME}/impl/dt/decision_tree_trainer.hpp"
+ )
+
+ set(ferns_impl_incs
"include/pcl/${SUBSYS_NAME}/impl/ferns/fern_evaluator.hpp"
"include/pcl/${SUBSYS_NAME}/impl/ferns/fern_trainer.hpp"
+ )
+
+ set(svm_impl_incs
"include/pcl/${SUBSYS_NAME}/impl/svm/svm_wrapper.hpp"
- #"include/pcl/${SUBSYS_NAME}/impl/kmeans.hpp"
)
set(srcs
PCL_MAKE_PKGCONFIG("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}" "${SUBSYS_DEPS}" "" "" "" "")
# Install include files
PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}" ${incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/dt" ${dt_incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/ferns" ${ferns_incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl/dt" ${dt_impl_incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl/ferns" ${ferns_impl_incs})
+ PCL_ADD_INCLUDES("${SUBSYS_NAME}" "${SUBSYS_NAME}/impl/svm" ${svm_impl_incs})
endif(build)
/** \brief Generates evaluation code for the specified feature and writes it to the specified stream.
* \param[in] feature The feature for which code is generated.
- * \param[out] stream The destionation for the code.
+ * \param[out] stream The destination for the code.
*/
virtual void
generateCodeForEvaluation (const FeatureType & feature,
}
- // if cluster field name is set, check if field name is valied
+ // if cluster field name is set, check if field name is valid
else
{
user_index = pcl::getFieldIndex (point, cluster_field_name_.c_str (), fields);
* \param[in] data_set The data set corresponding to the supplied result data.
* \param[in] examples The examples used for extracting the supplied result data.
* \param[in] label_data The label data corresponding to the specified examples.
- * \param[in] results The results computed using the specifed examples.
+ * \param[in] results The results computed using the specified examples.
* \param[in] flags The flags corresponding to the results.
* \param[in] threshold The threshold for which the information gain is computed.
*/
/** \brief Generates code for computing the branch indices for the specified node and writes it to the specified stream.
* \param[in] node The node for which the branch index estimation code is generated.
- * \param[out] stream The destionation for the code.
+ * \param[out] stream The destination for the code.
*/
virtual void
generateCodeForBranchIndexComputation (NodeType & node,
/** \brief Generates code for computing the output for the specified node and writes it to the specified stream.
* \param[in] node The node for which the output estimation code is generated.
- * \param[out] stream The destionation for the code.
+ * \param[out] stream The destination for the code.
*/
virtual void
generateCodeForOutput (NodeType & node,
}
};
- /** \brief The structure initialize a model crated by the SVM (Support Vector Machines) classifier (pcl::SVMTrain)
+ /** \brief The structure initialize a model created by the SVM (Support Vector Machines) classifier (pcl::SVMTrain)
*/
struct SVMModel: svm_model
{
{
num_points_in_cluster = 0;
- // For earch PointId in this set
+ // For each PointId in this set
BOOST_FOREACH(SetPoints::value_type pid, clusters_to_points_[cid])
{
Point p = data_[pid];
//Point p = ps__.getPoint(pid);
for (i=0; i<num_dimensions_; i++)
- centroid[i] += p[i];
+ centroid[i] += p[i];
num_points_in_cluster++;
}
// if no point in the clusters, this goes to inf (correct!)
/*
}
- // if cluster field name is set, check if field name is valied
+ // if cluster field name is set, check if field name is valid
else
{
user_index = pcl::getFieldIndex (point, cluster_field_name_.c_str (), fields);
return f;
}
-// Platt's binary SVM Probablistic Output: an improvement from Lin et al.
+// Platt's binary SVM Probabilistic Output: an improvement from Lin et al.
static void sigmoid_train (
int l, const double *dec_values, const double *labels,
double& A, double& B)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${incs} ${impl_incs})
- target_link_libraries("${LIB_NAME}")
+ target_link_libraries("${LIB_NAME}" pcl_common)
target_include_directories(${LIB_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
PCL_MAKE_PKGCONFIG("${LIB_NAME}" "${SUBSYS_NAME}" "${SUBSYS_DESC}"
"${SUBSYS_DEPS}" "" "" "" "")
child_branch = static_cast<BranchNode*> (child_node);
branch_arg->setChildPtr(buffer_selector_, child_idx, child_node);
} else {
- // depth has changed.. child in preceeding buffer is a leaf node.
+ // depth has changed.. child in preceding buffer is a leaf node.
deleteBranchChild (*branch_arg, !buffer_selector_, child_idx);
child_branch = createBranchChild (*branch_arg, child_idx);
}
child_leaf = static_cast<LeafNode*> (child_node);
branch_arg->setChildPtr(buffer_selector_, child_idx, child_node);
} else {
- // depth has changed.. child in preceeding buffer is a leaf node.
+ // depth has changed.. child in preceding buffer is a leaf node.
deleteBranchChild (*branch_arg, !buffer_selector_, child_idx);
child_leaf = createLeafChild (*branch_arg, child_idx);
}
child_branch = static_cast<BranchNode*> (child_node);
branch_arg->setChildPtr(buffer_selector_, child_idx, child_node);
} else {
- // depth has changed.. child in preceeding buffer is a leaf node.
+ // depth has changed.. child in preceding buffer is a leaf node.
deleteBranchChild (*branch_arg, !buffer_selector_, child_idx);
child_branch = createBranchChild (*branch_arg, child_idx);
}
child_leaf = static_cast<LeafNode*> (child_node);
branch_arg->setChildPtr(buffer_selector_, child_idx, child_node);
} else {
- // depth has changed.. child in preceeding buffer is a leaf node.
+ // depth has changed.. child in preceding buffer is a leaf node.
deleteBranchChild (*branch_arg, !buffer_selector_, child_idx);
child_leaf = createLeafChild (*branch_arg, child_idx);
}
assert(max_voxel_index_arg>0);
// tree depth == bitlength of maxVoxels
- tree_depth = std::max ( (std::min (static_cast<unsigned int> (OctreeKey::maxDepth), static_cast<unsigned int> (std::ceil (Log2 (max_voxel_index_arg))))), 0u);
+ tree_depth = std::min (static_cast<unsigned int> (OctreeKey::maxDepth), static_cast<unsigned int> (std::ceil (Log2 (max_voxel_index_arg))));
// define depthMask_ by setting a single bit to 1 at bit position == tree depth
depth_mask_ = (1 << (tree_depth - 1));
#ifndef PCL_OCTREE_ITERATOR_HPP_
#define PCL_OCTREE_ITERATOR_HPP_
+#include <pcl/console/print.h>
+
namespace pcl
{
namespace octree
this->reset ();
}
- //////////////////////////////////////////////////////////////////////////////////////////////
- template<typename OctreeT>
- OctreeDepthFirstIterator<OctreeT>::~OctreeDepthFirstIterator ()
- {
- }
-
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename OctreeT>
void OctreeDepthFirstIterator<OctreeT>::reset ()
if ( (this->max_octree_depth_>=stack_entry.depth_) &&
(stack_entry.node_->getNodeType () == BRANCH_NODE) )
{
- unsigned char child_idx;
-
// current node is a branch node
BranchNode* current_branch =
static_cast<BranchNode*> (stack_entry.node_);
// add all children to stack
- for (child_idx = 0; child_idx < 8; ++child_idx)
+ for (int8_t i = 7; i >= 0; --i)
{
+ const unsigned char child_idx = (unsigned char) i;
// if child exist
-
if (this->octree_->branchHasChild(*current_branch, child_idx))
{
// add child to stack
this->reset ();
}
- //////////////////////////////////////////////////////////////////////////////////////////////
- template<typename OctreeT>
- OctreeBreadthFirstIterator<OctreeT>::~OctreeBreadthFirstIterator ()
- {
- }
-
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename OctreeT>
void OctreeBreadthFirstIterator<OctreeT>::reset ()
return (*this);
}
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ OctreeFixedDepthIterator<OctreeT>::OctreeFixedDepthIterator () :
+ OctreeBreadthFirstIterator<OctreeT> (0u), fixed_depth_ (0u)
+ {}
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ OctreeFixedDepthIterator<OctreeT>::OctreeFixedDepthIterator (OctreeT* octree_arg, unsigned int fixed_depth_arg) :
+ OctreeBreadthFirstIterator<OctreeT> (octree_arg, fixed_depth_arg), fixed_depth_ (fixed_depth_arg)
+ {
+ this->reset (fixed_depth_arg);
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ void OctreeFixedDepthIterator<OctreeT>::reset (unsigned int fixed_depth_arg)
+ {
+ // Set the desired depth to walk through
+ fixed_depth_ = fixed_depth_arg;
+
+ if (!this->octree_)
+ {
+ return;
+ }
+
+ // If I'm nowhere, reset
+ // If I'm somewhere and I can't guarantee I'm before the first node, reset
+ if ((!this->current_state_) || (fixed_depth_ <= this->getCurrentOctreeDepth ()))
+ OctreeBreadthFirstIterator<OctreeT>::reset ();
+
+ if (this->octree_->getTreeDepth () < fixed_depth_)
+ {
+ PCL_WARN ("[pcl::octree::FixedDepthIterator] The requested fixed depth was bigger than the octree's depth.\n");
+ PCL_WARN ("[pcl::octree::FixedDepthIterator] fixed_depth = %d (instead of %d)\n", this->octree_->getTreeDepth (), fixed_depth_);
+ }
+
+ // By default for the parent class OctreeBreadthFirstIterator, if the
+ // depth argument is equal to 0, the iterator would run over every node.
+ // For the OctreeFixedDepthIterator, whatever the depth ask, set the
+ // max_octree_depth_ accordingly
+ this->max_octree_depth_ = std::min (fixed_depth_, this->octree_->getTreeDepth ());
+
+ // Restore previous state in case breath first iterator had child nodes already set up
+ if (FIFO_.size ())
+ this->current_state_ = &FIFO_.front ();
+
+ // Iterate all the way to the desired level
+ while (this->current_state_ && (this->getCurrentOctreeDepth () != fixed_depth_))
+ OctreeBreadthFirstIterator<OctreeT>::operator++ ();
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ OctreeLeafNodeBreadthFirstIterator<OctreeT>::OctreeLeafNodeBreadthFirstIterator (unsigned int max_depth_arg) :
+ OctreeBreadthFirstIterator<OctreeT> (max_depth_arg)
+ {
+ reset ();
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ OctreeLeafNodeBreadthFirstIterator<OctreeT>::OctreeLeafNodeBreadthFirstIterator (OctreeT* octree_arg, unsigned int max_depth_arg) :
+ OctreeBreadthFirstIterator<OctreeT> (octree_arg, max_depth_arg)
+ {
+ reset ();
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ OctreeLeafNodeBreadthFirstIterator<OctreeT>::OctreeLeafNodeBreadthFirstIterator (OctreeT* octree_arg,
+ unsigned int max_depth_arg,
+ IteratorState* current_state,
+ const std::deque<IteratorState>& fifo)
+ : OctreeBreadthFirstIterator<OctreeT> (octree_arg,
+ max_depth_arg,
+ current_state,
+ fifo)
+ {}
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ void OctreeLeafNodeBreadthFirstIterator<OctreeT>::reset ()
+ {
+ OctreeBreadthFirstIterator<OctreeT>::reset ();
+ ++*this;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ OctreeLeafNodeBreadthFirstIterator<OctreeT>&
+ OctreeLeafNodeBreadthFirstIterator<OctreeT>::operator++ ()
+ {
+ do
+ {
+ OctreeBreadthFirstIterator<OctreeT>::operator++ ();
+ } while ((this->current_state_) && (this->current_state_->node_->getNodeType () != LEAF_NODE));
+
+ return (*this);
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ OctreeLeafNodeBreadthFirstIterator<OctreeT>
+ OctreeLeafNodeBreadthFirstIterator<OctreeT>::operator++ (int)
+ {
+ OctreeLeafNodeBreadthFirstIterator _Tmp = *this;
+ ++*this;
+ return (_Tmp);
+ }
}
}
template<typename PointT, typename LeafContainerT, typename BranchContainerT, typename OctreeT> bool
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::isVoxelOccupiedAtPoint (const PointT& point_arg) const
{
+ if (!isPointWithinBoundingBox (point_arg))
+ {
+ return false;
+ }
+
OctreeKey key;
// generate key for point
this->genOctreeKeyforPoint (point_arg, key);
// search for key in octree
- return (isPointWithinBoundingBox (point_arg) && this->existLeaf (key));
+ return (this->existLeaf (key));
}
//////////////////////////////////////////////////////////////////////////////////////////////
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::isVoxelOccupiedAtPoint (
const double point_x_arg, const double point_y_arg, const double point_z_arg) const
{
- OctreeKey key;
-
- // generate key for point
- this->genOctreeKeyforPoint (point_x_arg, point_y_arg, point_z_arg, key);
+ // create a new point with the argument coordinates
+ PointT point;
+ point.x = point_x_arg;
+ point.y = point_y_arg;
+ point.z = point_z_arg;
- return (this->existLeaf (key));
+ // search for voxel at point in octree
+ return (this->isVoxelOccupiedAtPoint (point));
}
//////////////////////////////////////////////////////////////////////////////////////////////
template<typename PointT, typename LeafContainerT, typename BranchContainerT, typename OctreeT> void
pcl::octree::OctreePointCloud<PointT, LeafContainerT, BranchContainerT, OctreeT>::deleteVoxelAtPoint (const PointT& point_arg)
{
+ if (!isPointWithinBoundingBox (point_arg))
+ {
+ return;
+ }
+
OctreeKey key;
// generate key for point
OctreePointCloud<PointT, LeafContainerT, BranchContainerT>::addPointsFromInputCloud ();
LeafContainerT *leaf_container;
- typename OctreeAdjacencyT::LeafNodeIterator leaf_itr;
+ typename OctreeAdjacencyT::LeafNodeDepthFirstIterator leaf_itr;
leaf_vector_.reserve (this->getLeafCount ());
- for ( leaf_itr = this->leaf_begin () ; leaf_itr != this->leaf_end (); ++leaf_itr)
+ for ( leaf_itr = this->leaf_depth_begin () ; leaf_itr != this->leaf_depth_end (); ++leaf_itr)
{
OctreeKey leaf_key = leaf_itr.getCurrentOctreeKey ();
leaf_container = &(leaf_itr.getLeafContainer ());
voxel_adjacency_graph.clear ();
//Add a vertex for each voxel, store ids in map
std::map <LeafContainerT*, VoxelID> leaf_vertex_id_map;
- for (typename OctreeAdjacencyT::LeafNodeIterator leaf_itr = this->leaf_begin () ; leaf_itr != this->leaf_end (); ++leaf_itr)
+ for (typename OctreeAdjacencyT::LeafNodeDepthFirstIterator leaf_itr = this->leaf_depth_begin () ; leaf_itr != this->leaf_depth_end (); ++leaf_itr)
{
OctreeKey leaf_key = leaf_itr.getCurrentOctreeKey ();
PointT centroid_point;
OctreeKey key;
key.x = key.y = key.z = 0;
- // initalize smallest point distance in search with high value
+ // initialize smallest point distance in search with high value
double smallest_dist = std::numeric_limits<double>::max ();
getKNearestNeighborRecursive (p_q, k, this->root_node_, key, 1, smallest_dist, point_candidates);
friend class OctreeIteratorBase<OctreeT> ;
friend class OctreeDepthFirstIterator<OctreeT> ;
friend class OctreeBreadthFirstIterator<OctreeT> ;
- friend class OctreeLeafNodeIterator<OctreeT> ;
+ friend class OctreeLeafNodeDepthFirstIterator<OctreeT> ;
+ friend class OctreeLeafNodeBreadthFirstIterator<OctreeT> ;
typedef BufferedBranchNode<BranchContainerT> BranchNode;
typedef OctreeLeafNode<LeafContainerT> LeafNode;
const Iterator end() {return Iterator();};
// Octree leaf node iterators
- typedef OctreeLeafNodeIterator<OctreeT> LeafNodeIterator;
- typedef const OctreeLeafNodeIterator<OctreeT> ConstLeafNodeIterator;
- LeafNodeIterator leaf_begin(unsigned int max_depth_arg = 0) {return LeafNodeIterator(this, max_depth_arg);};
- const LeafNodeIterator leaf_end() {return LeafNodeIterator();};
+ // The previous deprecated names
+ // LeafNodeIterator and ConstLeafNodeIterator are deprecated.
+ // Please use LeafNodeDepthFirstIterator and ConstLeafNodeDepthFirstIterator instead.
+ typedef OctreeLeafNodeDepthFirstIterator<OctreeT> LeafNodeIterator;
+ typedef const OctreeLeafNodeDepthFirstIterator<OctreeT> ConstLeafNodeIterator;
+
+ PCL_DEPRECATED ("Please use leaf_depth_begin () instead.")
+ LeafNodeIterator leaf_begin (unsigned int max_depth_arg = 0)
+ {
+ return LeafNodeIterator (this, max_depth_arg);
+ };
+
+ PCL_DEPRECATED ("Please use leaf_depth_end () instead.")
+ const LeafNodeIterator leaf_end ()
+ {
+ return LeafNodeIterator ();
+ };
+
+ // The currently valide names
+ typedef OctreeLeafNodeDepthFirstIterator<OctreeT> LeafNodeDepthFirstIterator;
+ typedef const OctreeLeafNodeDepthFirstIterator<OctreeT> ConstLeafNodeDepthFirstIterator;
+ LeafNodeDepthFirstIterator leaf_depth_begin (unsigned int max_depth_arg = 0)
+ {
+ return LeafNodeDepthFirstIterator (this, max_depth_arg);
+ };
+
+ const LeafNodeDepthFirstIterator leaf_depth_end ()
+ {
+ return LeafNodeDepthFirstIterator();
+ };
// Octree depth-first iterators
typedef OctreeDepthFirstIterator<OctreeT> DepthFirstIterator;
BreadthFirstIterator breadth_begin(unsigned int max_depth_arg = 0) {return BreadthFirstIterator(this, max_depth_arg);};
const BreadthFirstIterator breadth_end() {return BreadthFirstIterator();};
+ // Octree leaf node iterators
+ typedef OctreeLeafNodeBreadthFirstIterator<OctreeT> LeafNodeBreadthIterator;
+ typedef const OctreeLeafNodeBreadthFirstIterator<OctreeT> ConstLeafNodeBreadthIterator;
+
+ LeafNodeBreadthIterator leaf_breadth_begin (unsigned int max_depth_arg = 0u)
+ {
+ return LeafNodeBreadthIterator (this, max_depth_arg? max_depth_arg : this->octree_depth_);
+ };
+
+ const LeafNodeBreadthIterator leaf_breadth_end ()
+ {
+ return LeafNodeBreadthIterator (this, 0, NULL);
+ };
+
/** \brief Empty constructor. */
Octree2BufBase ();
return ret;
}
- /** \brief Check for leaf not existance in the octree
+ /** \brief Check if leaf doesn't exist in the octree
* \param key_arg: octree key addressing a leaf node.
* \return "true" if leaf node is found; "false" otherwise
* */
* \param key_arg: reference to an octree key
* \param binary_tree_in_it_arg iterator of binary input data
* \param binary_tree_in_it_end_arg
- * \param leaf_container_vector_it_arg: iterator pointing to leaf containter pointers to be added to a leaf node
- * \param leaf_container_vector_it_end_arg: iterator pointing to leaf containter pointers pointing to last object in input container.
+ * \param leaf_container_vector_it_arg: iterator pointing to leaf container pointers to be added to a leaf node
+ * \param leaf_container_vector_it_end_arg: iterator pointing to leaf container pointers pointing to last object in input container.
* \param branch_reset_arg: Reset pointer array of current branch
* \param do_XOR_decoding_arg: select if binary tree structure is based on current octree (false) of based on a XOR comparison between current and previous octree
**/
typename BranchContainerT = OctreeContainerEmpty >
class OctreeBase
{
-
public:
typedef OctreeBase<LeafContainerT, BranchContainerT> OctreeT;
typedef BranchContainerT BranchContainer;
typedef LeafContainerT LeafContainer;
+ protected:
+
+ ///////////////////////////////////////////////////////////////////////
+ // Members
+ ///////////////////////////////////////////////////////////////////////
+
+ /** \brief Amount of leaf nodes **/
+ std::size_t leaf_count_;
+
+ /** \brief Amount of branch nodes **/
+ std::size_t branch_count_;
+
+ /** \brief Pointer to root branch node of octree **/
+ BranchNode* root_node_;
+
+ /** \brief Depth mask based on octree depth **/
+ unsigned int depth_mask_;
+
+ /** \brief Octree depth */
+ unsigned int octree_depth_;
+
+ /** \brief Enable dynamic_depth **/
+ bool dynamic_depth_enabled_;
+
+ /** \brief key range */
+ OctreeKey max_key_;
+
+ public:
+
// iterators are friends
friend class OctreeIteratorBase<OctreeT> ;
friend class OctreeDepthFirstIterator<OctreeT> ;
friend class OctreeBreadthFirstIterator<OctreeT> ;
- friend class OctreeLeafNodeIterator<OctreeT> ;
+ friend class OctreeFixedDepthIterator<OctreeT> ;
+ friend class OctreeLeafNodeDepthFirstIterator<OctreeT> ;
+ friend class OctreeLeafNodeBreadthFirstIterator<OctreeT> ;
// Octree default iterators
typedef OctreeDepthFirstIterator<OctreeT> Iterator;
typedef const OctreeDepthFirstIterator<OctreeT> ConstIterator;
- Iterator begin(unsigned int max_depth_arg = 0) {return Iterator(this, max_depth_arg);};
- const Iterator end() {return Iterator();};
+
+ Iterator begin (unsigned int max_depth_arg = 0u)
+ {
+ return Iterator (this, max_depth_arg? max_depth_arg : this->octree_depth_);
+ };
+
+ const Iterator end ()
+ {
+ return Iterator (this, 0, NULL);
+ };
// Octree leaf node iterators
- typedef OctreeLeafNodeIterator<OctreeT> LeafNodeIterator;
- typedef const OctreeLeafNodeIterator<OctreeT> ConstLeafNodeIterator;
- LeafNodeIterator leaf_begin(unsigned int max_depth_arg = 0) {return LeafNodeIterator(this, max_depth_arg);};
- const LeafNodeIterator leaf_end() {return LeafNodeIterator();};
+ // The previous deprecated names
+ // LeafNodeIterator and ConstLeafNodeIterator are deprecated.
+ // Please use LeafNodeDepthFirstIterator and ConstLeafNodeDepthFirstIterator instead.
+ typedef OctreeLeafNodeDepthFirstIterator<OctreeT> LeafNodeIterator;
+ typedef const OctreeLeafNodeDepthFirstIterator<OctreeT> ConstLeafNodeIterator;
+
+ PCL_DEPRECATED ("Please use leaf_depth_begin () instead.")
+ LeafNodeIterator leaf_begin (unsigned int max_depth_arg = 0u)
+ {
+ return LeafNodeIterator (this, max_depth_arg? max_depth_arg : this->octree_depth_);
+ };
+
+ PCL_DEPRECATED ("Please use leaf_depth_end () instead.")
+ const LeafNodeIterator leaf_end ()
+ {
+ return LeafNodeIterator (this, 0, NULL);
+ };
+
+ // The currently valide names
+ typedef OctreeLeafNodeDepthFirstIterator<OctreeT> LeafNodeDepthFirstIterator;
+ typedef const OctreeLeafNodeDepthFirstIterator<OctreeT> ConstLeafNodeDepthFirstIterator;
+
+ LeafNodeDepthFirstIterator leaf_depth_begin (unsigned int max_depth_arg = 0u)
+ {
+ return LeafNodeDepthFirstIterator (this, max_depth_arg? max_depth_arg : this->octree_depth_);
+ };
+
+ const LeafNodeDepthFirstIterator leaf_depth_end ()
+ {
+ return LeafNodeDepthFirstIterator (this, 0, NULL);
+ };
// Octree depth-first iterators
typedef OctreeDepthFirstIterator<OctreeT> DepthFirstIterator;
typedef const OctreeDepthFirstIterator<OctreeT> ConstDepthFirstIterator;
- DepthFirstIterator depth_begin(unsigned int max_depth_arg = 0) {return DepthFirstIterator(this, max_depth_arg);};
- const DepthFirstIterator depth_end() {return DepthFirstIterator();};
+
+ DepthFirstIterator depth_begin (unsigned int max_depth_arg = 0u)
+ {
+ return DepthFirstIterator (this, max_depth_arg? max_depth_arg : this->octree_depth_);
+ };
+
+ const DepthFirstIterator depth_end ()
+ {
+ return DepthFirstIterator (this, 0, NULL);
+ };
// Octree breadth-first iterators
typedef OctreeBreadthFirstIterator<OctreeT> BreadthFirstIterator;
typedef const OctreeBreadthFirstIterator<OctreeT> ConstBreadthFirstIterator;
- BreadthFirstIterator breadth_begin(unsigned int max_depth_arg = 0) {return BreadthFirstIterator(this, max_depth_arg);};
- const BreadthFirstIterator breadth_end() {return BreadthFirstIterator();};
+ BreadthFirstIterator breadth_begin (unsigned int max_depth_arg = 0u)
+ {
+ return BreadthFirstIterator (this, max_depth_arg? max_depth_arg : this->octree_depth_);
+ };
+
+ const BreadthFirstIterator breadth_end ()
+ {
+ return BreadthFirstIterator (this, 0, NULL);
+ };
+
+ // Octree breadth iterators at a given depth
+ typedef OctreeFixedDepthIterator<OctreeT> FixedDepthIterator;
+ typedef const OctreeFixedDepthIterator<OctreeT> ConstFixedDepthIterator;
+
+ FixedDepthIterator fixed_depth_begin (unsigned int fixed_depth_arg = 0u)
+ {
+ return FixedDepthIterator (this, fixed_depth_arg);
+ };
+
+ const FixedDepthIterator fixed_depth_end ()
+ {
+ return FixedDepthIterator (this, 0, NULL);
+ };
+
+ // Octree leaf node iterators
+ typedef OctreeLeafNodeBreadthFirstIterator<OctreeT> LeafNodeBreadthFirstIterator;
+ typedef const OctreeLeafNodeBreadthFirstIterator<OctreeT> ConstLeafNodeBreadthFirstIterator;
+
+ LeafNodeBreadthFirstIterator leaf_breadth_begin (unsigned int max_depth_arg = 0u)
+ {
+ return LeafNodeBreadthFirstIterator (this, max_depth_arg? max_depth_arg : this->octree_depth_);
+ };
+
+ const LeafNodeBreadthFirstIterator leaf_breadth_end ()
+ {
+ return LeafNodeBreadthFirstIterator (this, 0, NULL);
+ };
/** \brief Empty constructor. */
OctreeBase ();
return result;
}
- /** \brief Check for existance of a leaf node in the octree
+ /** \brief Check for existence of a leaf node in the octree
* \param key_arg: octree key addressing a leaf node.
* \return "true" if leaf node is found; "false" otherwise
* */
{
return (true);
}
-
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // Globals
- //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- /** \brief Amount of leaf nodes **/
- std::size_t leaf_count_;
-
- /** \brief Amount of branch nodes **/
- std::size_t branch_count_;
-
- /** \brief Pointer to root branch node of octree **/
- BranchNode* root_node_;
-
- /** \brief Depth mask based on octree depth **/
- unsigned int depth_mask_;
-
- /** \brief Octree depth */
- unsigned int octree_depth_;
-
- /** \brief Enable dynamic_depth **/
- bool dynamic_depth_enabled_;
-
- /** \brief key range */
- OctreeKey max_key_;
};
}
}
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010-2012, Willow Garage, Inc.
+ * Copyright (c) 2017-, Open Perception, Inc.
*
* All rights reserved.
*
struct IteratorState{
OctreeNode* node_;
OctreeKey key_;
- unsigned char depth_;
+ unsigned int depth_;
};
this->reset ();
}
- /** \brief Copy constructor.
- * \param[in] src the iterator to copy into this
- * \param[in] max_depth_arg Depth limitation during traversal
- */
- OctreeIteratorBase (const OctreeIteratorBase& src, unsigned int max_depth_arg = 0) :
- octree_ (src.octree_), current_state_(0), max_octree_depth_(max_depth_arg)
- {
- }
-
- /** \brief Copy operator.
- * \param[in] src the iterator to copy into this
- */
- inline OctreeIteratorBase&
- operator = (const OctreeIteratorBase& src)
- {
- octree_ = src.octree_;
- current_state_ = src.current_state_;
- max_octree_depth_ = src.max_octree_depth_;
- return (*this);
- }
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ * \param[in] current_state A pointer to the current iterator state
+ *
+ * \warning For advanced users only.
+ */
+ explicit
+ OctreeIteratorBase (OctreeT* octree_arg,
+ unsigned int max_depth_arg,
+ IteratorState* current_state)
+ : octree_(octree_arg)
+ , current_state_ (current_state)
+ , max_octree_depth_ (max_depth_arg)
+ {}
/** \brief Empty deconstructor. */
virtual
*/
bool operator==(const OctreeIteratorBase& other) const
{
- return (( octree_ ==other.octree_) &&
- ( current_state_ == other.current_state_) &&
- ( max_octree_depth_ == other.max_octree_depth_) );
+ if (this == &other) // same object
+ return true;
+ if (octree_ != other.octree_) // refer to different octrees
+ return false;
+ if (!current_state_ && !other.current_state_) // both are end iterators
+ return true;
+ if (max_octree_depth_ == other.max_octree_depth_ &&
+ current_state_ && other.current_state_ && // null dereference protection
+ current_state_->key_ == other.current_state_->key_)
+ return true;
+ return false;
}
/** \brief Inequal comparison operator
*/
bool operator!=(const OctreeIteratorBase& other) const
{
- return (( octree_ !=other.octree_) &&
- ( current_state_ != other.current_state_) &&
- ( max_octree_depth_ != other.max_octree_depth_) );
+ return !operator== (other);
}
/** \brief Reset iterator */
explicit
OctreeDepthFirstIterator (OctreeT* octree_arg, unsigned int max_depth_arg = 0);
- /** \brief Empty deconstructor. */
- virtual
- ~OctreeDepthFirstIterator ();
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ * \param[in] current_state A pointer to the current iterator state
+ *
+ * \warning For advanced users only.
+ */
+ explicit
+ OctreeDepthFirstIterator (OctreeT* octree_arg,
+ unsigned int max_depth_arg,
+ IteratorState* current_state,
+ const std::vector<IteratorState>& stack = std::vector<IteratorState> ())
+ : OctreeIteratorBase<OctreeT> (octree_arg, max_depth_arg, current_state)
+ , stack_ (stack)
+ {}
+
+ /** \brief Copy Constructor.
+ * \param[in] other Another OctreeDepthFirstIterator to copy from
+ */
+ OctreeDepthFirstIterator (const OctreeDepthFirstIterator& other)
+ : OctreeIteratorBase<OctreeT> (other)
+ , stack_ (other.stack_)
+ {
+ this->current_state_ = stack_.size ()? &stack_.back () : NULL;
+ }
- /** \brief Copy operator.
+ /** \brief Copy assignment
* \param[in] src the iterator to copy into this
*/
inline OctreeDepthFirstIterator&
if (stack_.size())
{
- this->current_state_ = &stack_.back();
+ this->current_state_ = &stack_.back ();
} else
{
this->current_state_ = 0;
template<typename OctreeT>
class OctreeBreadthFirstIterator : public OctreeIteratorBase<OctreeT>
{
+ public:
// public typedefs
typedef typename OctreeIteratorBase<OctreeT>::BranchNode BranchNode;
typedef typename OctreeIteratorBase<OctreeT>::LeafNode LeafNode;
-
- public:
/** \brief Empty constructor.
* \param[in] max_depth_arg Depth limitation during traversal
*/
explicit
OctreeBreadthFirstIterator (OctreeT* octree_arg, unsigned int max_depth_arg = 0);
- /** \brief Empty deconstructor. */
- virtual
- ~OctreeBreadthFirstIterator ();
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ * \param[in] current_state A pointer to the current iterator state
+ *
+ * \warning For advanced users only.
+ */
+ explicit
+ OctreeBreadthFirstIterator (OctreeT* octree_arg,
+ unsigned int max_depth_arg,
+ IteratorState* current_state,
+ const std::deque<IteratorState>& fifo = std::deque<IteratorState> ())
+ : OctreeIteratorBase<OctreeT> (octree_arg, max_depth_arg, current_state)
+ , FIFO_ (fifo)
+ {}
+
+ /** \brief Copy Constructor.
+ * \param[in] other Another OctreeBreadthFirstIterator to copy from
+ */
+ OctreeBreadthFirstIterator (const OctreeBreadthFirstIterator& other)
+ : OctreeIteratorBase<OctreeT> (other)
+ , FIFO_ (other.FIFO_)
+ {
+ this->current_state_ = FIFO_.size ()? &FIFO_.front () : NULL;
+ }
/** \brief Copy operator.
* \param[in] src the iterator to copy into this
std::deque<IteratorState> FIFO_;
};
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /** \brief @b Octree iterator class
+ * \note Iterator over all existing nodes at a given depth. It walks across an octree
+ * in a breadth-first manner.
+ * \ingroup octree
+ * \author Fabien Rozar (fabien.rozar@gmail.com)
+ */
+ template<typename OctreeT>
+ class OctreeFixedDepthIterator : public OctreeBreadthFirstIterator<OctreeT>
+ {
+ public:
+
+ // public typedefs
+ using typename OctreeBreadthFirstIterator<OctreeT>::BranchNode;
+ using typename OctreeBreadthFirstIterator<OctreeT>::LeafNode;
+
+ /** \brief Empty constructor.
+ */
+ OctreeFixedDepthIterator ();
+
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] fixed_depth_arg Depth level during traversal
+ */
+ explicit
+ OctreeFixedDepthIterator (OctreeT* octree_arg, unsigned int fixed_depth_arg = 0);
+
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] fixed_depth_arg Depth level during traversal
+ * \param[in] current_state A pointer to the current iterator state
+ * \param[in] fifo Internal container of octree node to go through
+ *
+ * \warning For advanced users only.
+ */
+ OctreeFixedDepthIterator (OctreeT* octree_arg,
+ unsigned int fixed_depth_arg,
+ IteratorState* current_state,
+ const std::deque<IteratorState>& fifo = std::deque<IteratorState> ())
+ : OctreeBreadthFirstIterator<OctreeT> (octree_arg, fixed_depth_arg, current_state, fifo)
+ , fixed_depth_ (fixed_depth_arg)
+ {}
+
+ /** \brief Copy Constructor.
+ * \param[in] other Another OctreeFixedDepthIterator to copy from
+ */
+ OctreeFixedDepthIterator (const OctreeFixedDepthIterator& other)
+ : OctreeBreadthFirstIterator<OctreeT> (other)
+ {
+ this->fixed_depth_ = other.fixed_depth_;
+ }
+
+ /** \brief Copy assignment.
+ * \param[in] src the iterator to copy into this
+ * \return pointer to the current octree node
+ */
+ inline OctreeFixedDepthIterator&
+ operator = (const OctreeFixedDepthIterator& src)
+ {
+ OctreeBreadthFirstIterator<OctreeT>::operator= (src);
+ this->fixed_depth_ = src.fixed_depth_;
+
+ return (*this);
+ }
+
+ /** \brief Reset the iterator to the first node at the depth given as parameter
+ * \param[in] fixed_depth_arg Depth level during traversal
+ */
+ void
+ reset (unsigned int fixed_depth_arg);
+
+ /** \brief Reset the iterator to the first node at the current depth
+ */
+ void
+ reset ()
+ {
+ this->reset (fixed_depth_);
+ }
+
+ protected:
+ using OctreeBreadthFirstIterator<OctreeT>::FIFO_;
+
+ /** \brief Given level of the node to be iterated */
+ unsigned int fixed_depth_;
+ };
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Octree leaf node iterator class
* \note This class implements a forward iterator for traversing the leaf nodes of an octree data structure.
*/
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename OctreeT>
- class OctreeLeafNodeIterator : public OctreeDepthFirstIterator<OctreeT>
+ class OctreeLeafNodeDepthFirstIterator : public OctreeDepthFirstIterator<OctreeT>
{
typedef typename OctreeDepthFirstIterator<OctreeT>::BranchNode BranchNode;
typedef typename OctreeDepthFirstIterator<OctreeT>::LeafNode LeafNode;
* \param[in] max_depth_arg Depth limitation during traversal
*/
explicit
- OctreeLeafNodeIterator (unsigned int max_depth_arg = 0) :
+ OctreeLeafNodeDepthFirstIterator (unsigned int max_depth_arg = 0) :
OctreeDepthFirstIterator<OctreeT> (max_depth_arg)
{
reset ();
* \param[in] max_depth_arg Depth limitation during traversal
*/
explicit
- OctreeLeafNodeIterator (OctreeT* octree_arg, unsigned int max_depth_arg = 0) :
+ OctreeLeafNodeDepthFirstIterator (OctreeT* octree_arg, unsigned int max_depth_arg = 0) :
OctreeDepthFirstIterator<OctreeT> (octree_arg, max_depth_arg)
{
reset ();
}
- /** \brief Empty deconstructor. */
- virtual
- ~OctreeLeafNodeIterator ()
- {
- }
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ * \param[in] current_state A pointer to the current iterator state
+ *
+ * \warning For advanced users only.
+ */
+ explicit
+ OctreeLeafNodeDepthFirstIterator (OctreeT* octree_arg,
+ unsigned int max_depth_arg,
+ IteratorState* current_state,
+ const std::vector<IteratorState>& stack = std::vector<IteratorState> ())
+ : OctreeDepthFirstIterator<OctreeT> (octree_arg,
+ max_depth_arg,
+ current_state,
+ stack)
+ {}
/** \brief Reset the iterator to the root node of the octree
*/
/** \brief Preincrement operator.
* \note recursively step to next octree leaf node
*/
- inline OctreeLeafNodeIterator&
+ inline OctreeLeafNodeDepthFirstIterator&
operator++ ()
{
do
/** \brief postincrement operator.
* \note step to next octree node
*/
- inline OctreeLeafNodeIterator
+ inline OctreeLeafNodeDepthFirstIterator
operator++ (int)
{
- OctreeLeafNodeIterator _Tmp = *this;
+ OctreeLeafNodeDepthFirstIterator _Tmp = *this;
++*this;
return (_Tmp);
}
}
};
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ /** \brief Octree leaf node iterator class
+ * \note This class implements a forward iterator for traversing the leaf nodes of an octree data structure
+ * in the breadth first way.
+ * \ingroup octree
+ * \author Fabien Rozar (fabien.rozar@gmail.com)
+ */
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ template<typename OctreeT>
+ class OctreeLeafNodeBreadthFirstIterator : public OctreeBreadthFirstIterator<OctreeT>
+ {
+ typedef typename OctreeBreadthFirstIterator<OctreeT>::BranchNode BranchNode;
+ typedef typename OctreeBreadthFirstIterator<OctreeT>::LeafNode LeafNode;
+
+ public:
+ /** \brief Empty constructor.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ */
+ explicit
+ OctreeLeafNodeBreadthFirstIterator (unsigned int max_depth_arg = 0);
+
+ /** \brief Constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ */
+ explicit
+ OctreeLeafNodeBreadthFirstIterator (OctreeT* octree_arg, unsigned int max_depth_arg = 0);
+
+ /** \brief Copy constructor.
+ * \param[in] octree_arg Octree to be iterated. Initially the iterator is set to its root node.
+ * \param[in] max_depth_arg Depth limitation during traversal
+ * \param[in] current_state A pointer to the current iterator state
+ * \param[in] fifo Internal container of octree node to go through
+ *
+ * \warning For advanced users only.
+ */
+ explicit
+ OctreeLeafNodeBreadthFirstIterator (OctreeT* octree_arg,
+ unsigned int max_depth_arg,
+ IteratorState* current_state,
+ const std::deque<IteratorState>& fifo = std::deque<IteratorState> ());
+
+ /** \brief Reset the iterator to the first leaf in the breadth first way.
+ */
+ inline void
+ reset ();
+
+ /** \brief Preincrement operator.
+ * \note recursively step to next octree leaf node
+ */
+ inline OctreeLeafNodeBreadthFirstIterator&
+ operator++ ();
+
+
+ /** \brief Postincrement operator.
+ * \note step to next octree node
+ */
+ inline OctreeLeafNodeBreadthFirstIterator
+ operator++ (int);
+ };
+
}
}
return ((b.x == this->x) && (b.y == this->y) && (b.z == this->z));
}
+ /** \brief Inequal comparison operator
+ * \param[in] other OctreeIteratorBase to compare with
+ * \return "true" if the current and other iterators are different ; "false" otherwise.
+ */
+ bool operator!= (const OctreeKey& other) const
+ {
+ return !operator== (other);
+ }
+
/** \brief Operator<= for comparing octree keys with each other.
* \return "true" if key indices are not greater than the key indices of b ; "false" otherwise.
* */
}
/* \brief maximum depth that can be addressed */
- static const unsigned char maxDepth = static_cast<const unsigned char>(sizeof(uint32_t)*8);
+ static const unsigned char maxDepth = static_cast<unsigned char>(sizeof(uint32_t)*8);
// Indices addressing a voxel at (X, Y, Z)
class OctreePointCloud : public OctreeT
{
- // iterators are friends
- friend class OctreeIteratorBase<OctreeT> ;
- friend class OctreeDepthFirstIterator<OctreeT> ;
- friend class OctreeBreadthFirstIterator<OctreeT> ;
- friend class OctreeLeafNodeIterator<OctreeT> ;
-
public:
typedef OctreeT Base;
typedef typename OctreeT::LeafNode LeafNode;
typedef typename OctreeT::BranchNode BranchNode;
- // Octree default iterators
- typedef OctreeDepthFirstIterator<OctreeT> Iterator;
- typedef const OctreeDepthFirstIterator<OctreeT> ConstIterator;
-
- // Octree leaf node iterators
- typedef OctreeLeafNodeIterator<OctreeT> LeafNodeIterator;
- typedef const OctreeLeafNodeIterator<OctreeT> ConstLeafNodeIterator;
-
- // Octree depth-first iterators
- typedef OctreeDepthFirstIterator<OctreeT> DepthFirstIterator;
- typedef const OctreeDepthFirstIterator<OctreeT> ConstDepthFirstIterator;
-
- // Octree breadth-first iterators
- typedef OctreeBreadthFirstIterator<OctreeT> BreadthFirstIterator;
- typedef const OctreeBreadthFirstIterator<OctreeT> ConstBreadthFirstIterator;
-
/** \brief Octree pointcloud constructor.
* \param[in] resolution_arg octree resolution at lowest octree level
*/
typedef boost::shared_ptr<PointCloud> PointCloudPtr;
typedef boost::shared_ptr<const PointCloud> PointCloudConstPtr;
- // Iterators are friends
- friend class OctreeIteratorBase<OctreeAdjacencyT>;
- friend class OctreeDepthFirstIterator<OctreeAdjacencyT>;
- friend class OctreeBreadthFirstIterator<OctreeAdjacencyT>;
- friend class OctreeLeafNodeIterator<OctreeAdjacencyT>;
-
- // Octree default iterators
- typedef OctreeDepthFirstIterator<OctreeAdjacencyT> Iterator;
- typedef const OctreeDepthFirstIterator<OctreeAdjacencyT> ConstIterator;
-
- Iterator depth_begin (unsigned int max_depth_arg = 0) { return Iterator (this, max_depth_arg); }
- const Iterator depth_end () { return Iterator (); }
-
- // Octree leaf node iterators
- typedef OctreeLeafNodeIterator<OctreeAdjacencyT> LeafNodeIterator;
- typedef const OctreeLeafNodeIterator<OctreeAdjacencyT> ConstLeafNodeIterator;
-
- LeafNodeIterator leaf_begin (unsigned int max_depth_arg = 0) { return LeafNodeIterator (this, max_depth_arg); }
- const LeafNodeIterator leaf_end () { return LeafNodeIterator (); }
-
// BGL graph
typedef boost::adjacency_list<boost::setS, boost::setS, boost::undirectedS, PointT, float> VoxelAdjacencyList;
typedef typename VoxelAdjacencyList::vertex_descriptor VoxelID;
/** \brief Search for points within rectangular search area
+ * Points exactly on the edges of the search rectangle are included.
* \param[in] min_pt lower corner of search area
* \param[in] max_pt upper corner of search area
* \param[out] k_indices the resultant point indices
The following figure illustrates the voxel bounding boxes of an octree nodes at lowest tree level.
The octree voxels are surrounding every 3D point from the bunny's surface. The red dots represent the point data.
-This image is create with the octree_viewer (visualization/tools/octree_viewer).
+This image is created with the octree_viewer (visualization/tools/octree_viewer).
For examples how to use the <b>pcl_octree</b> library, please visit the <a href="http://www.pointclouds.org/documentation/tutorials/">pcl tutorial page</a>.
PCLAPI(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
PCLAPI(void) cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item);
-/* Remove/Detatch items from Arrays/Objects. */
+/* Remove/Detach items from Arrays/Objects. */
PCLAPI(cJSON *) cJSON_DetachItemFromArray(cJSON *array,int which);
PCLAPI(void) cJSON_DeleteItemFromArray(cJSON *array,int which);
PCLAPI(cJSON *) cJSON_DetachItemFromObject(cJSON *object,const char *string);
if (num_pts > 0)
{
//always sample at least one point
- sample_points = sample_points > 0 ? sample_points : 1;
+ sample_points = sample_points > 1 ? sample_points : 1;
}
else
{
random_sampler.setSample (static_cast<unsigned int> (sample_points));
pcl::ExtractIndices<pcl::PCLPointCloud2> extractor;
+ extractor.setInputCloud (tmp_blob);
pcl::IndicesPtr downsampled_cloud_indices (new std::vector<int> ());
random_sampler.filter (*downsampled_cloud_indices);
OutofcoreOctreeDiskContainer<PointT>::rand_gen_ (static_cast<unsigned int> (std::time(NULL)));
template<typename PointT>
- boost::uuids::random_generator OutofcoreOctreeDiskContainer<PointT>::uuid_gen_ (&rand_gen_);
+ boost::uuids::basic_random_generator<boost::mt19937> OutofcoreOctreeDiskContainer<PointT>::uuid_gen_ (&rand_gen_);
template<typename PointT>
const uint64_t OutofcoreOctreeDiskContainer<PointT>::READ_BLOCK_SIZE_ = static_cast<uint64_t> (2e12);
if ((start + count) > size ())
{
- PCL_ERROR ("[pcl::outofcore::OutofcoreOctreeDiskContainer::%s] Indicies out of range; start + count exceeds the size of the stored points\n", __FUNCTION__);
+ PCL_ERROR ("[pcl::outofcore::OutofcoreOctreeDiskContainer::%s] Indices out of range; start + count exceeds the size of the stored points\n", __FUNCTION__);
PCL_THROW_EXCEPTION (PCLException, "[pcl::outofcore::OutofcoreOctreeDiskContainer] Outofcore Octree Exception: Read indices exceed range");
}
*
* \param bb_min triple of x,y,z minima for bounding box
* \param bb_max triple of x,y,z maxima for bounding box
- * \param tree adress of the tree data structure that will hold this initial root node
+ * \param tree address of the tree data structure that will hold this initial root node
* \param rootname Root directory for location of on-disk octree storage; if directory
* doesn't exist, it is created; if "rootname" is an existing file,
*
/** \brief Generate a universally unique identifier (UUID)
*
- * A mutex lock happens to ensure uniquness
+ * A mutex lock happens to ensure uniqueness
*
*/
static void
static boost::mutex rng_mutex_;
static boost::mt19937 rand_gen_;
- static boost::uuids::random_generator uuid_gen_;
+ static boost::uuids::basic_random_generator<boost::mt19937> uuid_gen_;
};
} //namespace outofcore
public:
typedef typename OutofcoreAbstractNodeContainer<PointT>::AlignedPointTVector AlignedPointTVector;
- /** \brief empty contructor (with a path parameter?)
+ /** \brief empty constructor (with a path parameter?)
*/
OutofcoreOctreeRamContainer (const boost::filesystem::path&) : container_ () { }
void
OutofcoreOctreeBaseMetadata::serializeMetadataToDisk ()
{
+ if (LOD_num_points_.empty ())
+ return;
+
// Create JSON object
boost::shared_ptr<cJSON> idx (cJSON_CreateObject (), cJSON_Delete);
void
printHelp (int, char **argv)
{
- print_info ("This program is used to process pcd fiels into an outofcore data structure viewable by the");
+ print_info ("This program is used to process pcd files into an outofcore data structure viewable by the");
print_info ("pcl_outofcore_viewer\n\n");
print_info ("%s <options> <input_tree_dir> \n", argv[0]);
print_info ("\n");
void
printHelp (int, char **argv)
{
- print_info ("This program is used to process pcd fiels into an outofcore data structure viewable by the");
+ print_info ("This program is used to process pcd files into an outofcore data structure viewable by the");
print_info ("pcl_outofcore_viewer\n\n");
print_info ("%s <options> <input>.pcd <output_tree_dir>\n", argv[0]);
print_info ("\n");
/**
* \brief Set sensor orientation (vertical = true means portrait mode, vertical = false means landscape mode).
*
- * \param[in] vertical Set landscape/portait camera orientation (default = false).
+ * \param[in] vertical Set landscape/portrait camera orientation (default = false).
*/
void
setSensorPortraitOrientation (bool vertical);
Eigen::Vector3f p_current_eigen(current_point->x, current_point->y, current_point->z); // conversion to eigen
float t = p_current_eigen.dot(head_ground_coeffs) / normalize_factor; // height from the ground
maxima_projected.col(i).matrix () = p_current_eigen - head_ground_coeffs * t; // projection of the point on the groundplane
- subclusters_number_of_points(i) = 0; // intialize number of points
+ subclusters_number_of_points(i) = 0; // initialize number of points
}
// Associate cluster points to one of the maximum:
{
int i, n=25000, n2=n/2; float t, ni;
static float a[25000]; static bool init=false;
- if( init ) return a+n2; ni = 2.02f/(float) n;
+ if( init ) return a+n2;
+ ni = 2.02f/(float) n;
for( i=0; i<n; i++ ) {
t = i*ni - 1.01f;
t = t<-1 ? -1 : (t>1 ? 1 : t);
return (filtered_quantized_color_gradients_);
}
- /** \brief Returns a reference to the internally computed spreaded quantized map. */
+ /** \brief Returns a reference to the internally computed spread quantized map. */
inline QuantizedMap &
getSpreadedQuantizedMap ()
{
/** \brief Determines whether variable numbers of features are extracted or not. */
bool variable_feature_nr_;
- /** \brief Stores a smoothed verion of the input cloud. */
+ /** \brief Stores a smoothed version of the input cloud. */
pcl::PointCloud<pcl::RGB>::Ptr smoothed_input_;
/** \brief Defines which feature selection method is used. */
pcl::QuantizedMap quantized_color_gradients_;
/** \brief The map which holds the filtered quantized data. */
pcl::QuantizedMap filtered_quantized_color_gradients_;
- /** \brief The map which holds the spreaded quantized data. */
+ /** \brief The map which holds the spread quantized data. */
pcl::QuantizedMap spreaded_filtered_quantized_color_gradients_;
};
}
- /** \brief Computes the roll angle that aligns input to modle.
+ /** \brief Computes the roll angle that aligns input to model.
* \param[in] input_ftt CRH histogram of the input cloud
* \param[in] target_ftt CRH histogram of the target cloud
* \param[out] peaks Vector containing angles where the histograms correlate
#define FACE_DETECTOR_COMMON_H_
#include <pcl/features/integral_image2D.h>
+#include <Eigen/Core>
namespace pcl
{
//save pose head information
Eigen::Vector3f trans_;
Eigen::Vector3f rot_;
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
class FeatureType
Eigen::Matrix3d covariance_trans_;
Eigen::Matrix3d covariance_rot_;
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+
void serialize(::std::ostream & stream) const
{
{
solution_[index] = val;
//update optimizer solution
- cost_ = opt_->evaluateSolution (solution_, index); //this will udpate the cost function in opt_
+ cost_ = opt_->evaluateSolution (solution_, index); //this will update the cost function in opt_
}
void setSolution(std::vector<bool> & sol)
{
void
verify();
+
+ void setResolutionOccupancyGrid(float r)
+ {
+ res_occupancy_grid_ = r;
+ }
void setRadiusNormals(float r)
{
#include <pcl/common/io.h>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-bool
-gcCorrespSorter (pcl::Correspondence i, pcl::Correspondence j)
-{
- return (i.distance < j.distance);
+inline bool
+gcCorrespSorter (pcl::Correspondence i, pcl::Correspondence j)
+{
+ return (i.distance < j.distance);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//}
this->deinitCompute ();
- return (true);
+ return (transformations.size ());
}
// initialize explained_by_RM
points_explained_by_rm_.resize (scene_cloud_downsampled_->points.size ());
- // initalize model
+ // initialize model
for (size_t m = 0; m < visible_models_.size (); m++)
{
boost::shared_ptr < RecognitionModel > recog_model (new RecognitionModel);
explained_by_RM_.resize (scene_cloud_downsampled_->points.size ());
points_explained_by_rm_.resize (scene_cloud_downsampled_->points.size ());
- // initalize model
+ // initialize model
for (size_t m = 0; m < complete_models_.size (); m++)
{
boost::shared_ptr < RecognitionModel > recog_model (new RecognitionModel);
}
else
{
- mask_[m] = false; // the model didnt survive the sequential check...
+ mask_[m] = false; // the model didn't survive the sequential check...
}
}
}
return (false);
// We only support regular files for now.
- // Addional file types in TAR include: hard links, symbolic links, device/special files, block devices,
+ // Additional file types in TAR include: hard links, symbolic links, device/special files, block devices,
// directories, and named pipes.
if (header.file_type[0] != '0' && header.file_type[0] != '\0')
return (false);
void
setTrainingClasses (const std::vector<unsigned int>& training_classes);
- /** \brief This method returns the coresponding cloud of normals for every training point cloud. */
+ /** \brief This method returns the corresponding cloud of normals for every training point cloud. */
std::vector<typename pcl::PointCloud<NormalT>::Ptr>
getTrainingNormals ();
/** \brief This method allows to set the value of sigma used for calculating the learned weights for every single class.
* \param[in] training_sigmas new sigmas for every class. If you want these values to be computed automatically,
* just pass the empty array. The automatic regime calculates the maximum distance between the objects points and takes 10% of
- * this value as recomended in the article. If there are several objects of the same class,
+ * this value as recommended in the article. If there are several objects of the same class,
* then it computes the average maximum distance and takes 10%. Note that each class has its own sigma value.
*/
void
* and returns the list of votes.
* \param[in] model trained model which will be used for searching the objects
* \param[in] in_cloud input cloud that need to be investigated
- * \param[in] in_normals cloud of normals coresponding to the input cloud
+ * \param[in] in_normals cloud of normals corresponding to the input cloud
* \param[in] in_class_of_interest class which we are looking for
*/
boost::shared_ptr<pcl::features::ISMVoteList<PointT> >
void
generateRandomCenter (const std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f> >& boxes, Eigen::VectorXf& center);
- /** \brief Computes the square distance beetween two vectors.
+ /** \brief Computes the square distance between two vectors.
* \param[in] vec_1 first vector
* \param[in] vec_2 second vector
*/
void
detect (std::vector<typename pcl::LineRGBD<PointXYZT, PointRGBT>::Detection> & detections);
- /** \brief Applies the detection process in a semi-scale-invariant manner. This is done by acutally
+ /** \brief Applies the detection process in a semi-scale-invariant manner. This is done by actually
* scaling the template to different sizes.
*/
void
/** \brief Point clouds corresponding to the templates. */
pcl::PointCloud<pcl::PointXYZRGBA>::CloudVectorType template_point_clouds_;
- /** \brief Bounding boxes corresonding to the templates. */
+ /** \brief Bounding boxes corresponding to the templates. */
std::vector<pcl::BoundingBoxXYZ> bounding_boxes_;
/** \brief Object IDs corresponding to the templates. */
std::vector<size_t> object_ids_;
virtual QuantizedMap &
getQuantizedMap () = 0;
- /** \brief Returns a reference to the internally computed spreaded quantized map. */
+ /** \brief Returns a reference to the internally computed spread quantized map. */
virtual QuantizedMap &
getSpreadedQuantizedMap () = 0;
template<typename T> void
expandBoundingBoxToContainPoint (T bbox[6], const T p[3])
{
- if ( p[0] < bbox[0] ) bbox[0] = p[0];
+ if ( p[0] < bbox[0] ) bbox[0] = p[0];
else if ( p[0] > bbox[1] ) bbox[1] = p[0];
- if ( p[1] < bbox[2] ) bbox[2] = p[1];
+ if ( p[1] < bbox[2] ) bbox[2] = p[1];
else if ( p[1] > bbox[3] ) bbox[3] = p[1];
- if ( p[2] < bbox[4] ) bbox[4] = p[2];
+ if ( p[2] < bbox[4] ) bbox[4] = p[2];
else if ( p[2] > bbox[5] ) bbox[5] = p[2];
}
T *voxels_;
int num_of_voxels_[3], num_of_voxels_xy_plane_, total_num_of_voxels_;
REAL bounds_[6];
- REAL spacing_[3]; // spacing betwen the voxel in x, y and z direction = voxel size in x, y and z direction
+ REAL spacing_[3]; // spacing between the voxel in x, y and z direction = voxel size in x, y and z direction
REAL min_center_[3]; // the center of the voxel with integer coordinates (0, 0, 0)
};
} // namespace recognition
return (filtered_quantized_surface_normals_);
}
- /** \brief Returns a reference to the internal spreaded quantized map. */
+ /** \brief Returns a reference to the internal spread quantized map. */
inline QuantizedMap &
getSpreadedQuantizedMap ()
{
pcl::QuantizedMap quantized_surface_normals_;
/** \brief Filtered quantized surface normals. */
pcl::QuantizedMap filtered_quantized_surface_normals_;
- /** \brief Spreaded quantized surface normals. */
+ /** \brief Spread quantized surface normals. */
pcl::QuantizedMap spreaded_quantized_surface_normals_;
/** \brief Map containing surface normal orientations. */
te.wsize_ = w_size_;
te.trans_ = center_point.getVector3fMap () - patch_center_point.getVector3fMap ();
- te.trans_ *= 1000.f; //transform it to milimiters
+ te.trans_ *= 1000.f; //transform it to millimeters
te.rot_ = ea;
te.rot_ *= 57.2957795f; //transform it to degrees
"include/pcl/${SUBSYS_NAME}/impl/correspondence_estimation_normal_shooting.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_estimation_backprojection.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_estimation_organized_projection.hpp"
- "include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_distance.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_median_distance.hpp"
"include/pcl/${SUBSYS_NAME}/impl/correspondence_rejection_surface_normal.hpp"
/** \brief Empty destructor */
virtual ~CorrespondenceEstimationBase () {}
- /** \brief Provide a pointer to the input source
- * (e.g., the point cloud that we want to align to the target)
- *
- * \param[in] cloud the input point cloud source
- */
- PCL_DEPRECATED ("[pcl::registration::CorrespondenceEstimationBase::setInputCloud] setInputCloud is deprecated. Please use setInputSource instead.")
- void
- setInputCloud (const PointCloudSourceConstPtr &cloud);
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- PCL_DEPRECATED ("[pcl::registration::CorrespondenceEstimationBase::getInputCloud] getInputCloud is deprecated. Please use getInputSource instead.")
- PointCloudSourceConstPtr const
- getInputCloud ();
-
/** \brief Provide a pointer to the input source
* (e.g., the point cloud that we want to align to the target)
*
inline const std::string&
getClassName () const { return (corr_name_); }
- /** \brief Internal computation initalization. */
+ /** \brief Internal computation initialization. */
bool
initCompute ();
- /** \brief Internal computation initalization for reciprocal correspondences. */
+ /** \brief Internal computation initialization for reciprocal correspondences. */
bool
initComputeReciprocal ();
using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_reciprocal_;
using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
- /** \brief Internal computation initalization. */
+ /** \brief Internal computation initialization. */
bool
initCompute ();
using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::tree_reciprocal_;
using CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::target_;
- /** \brief Internal computation initalization. */
+ /** \brief Internal computation initialization. */
bool
initCompute ();
/** \brief Empty destructor */
virtual ~DataContainer () {}
- /** \brief Provide a source point cloud dataset (must contain XYZ
- * data!), used to compute the correspondence distance.
- * \param[in] cloud a cloud containing XYZ data
- */
- PCL_DEPRECATED ("[pcl::registration::DataContainer::setInputCloud] setInputCloud is deprecated. Please use setInputSource instead.")
- void
- setInputCloud (const PointCloudConstPtr &cloud);
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- PCL_DEPRECATED ("[pcl::registration::DataContainer::getInputCloud] getInputCloud is deprecated. Please use getInputSource instead.")
- PointCloudConstPtr const
- getInputCloud ();
-
/** \brief Provide a source point cloud dataset (must contain XYZ
* data!), used to compute the correspondence distance.
* \param[in] cloud a cloud containing XYZ data
}
/** \brief Get the correspondence score for a given pair of correspondent points based on
- * the angle betweeen the normals. The normmals for the in put and target clouds must be
+ * the angle between the normals. The normmals for the in put and target clouds must be
* set before using this function
* \param[in] corr Correspondent points
*/
}
}
-#include <pcl/registration/impl/correspondence_rejection.hpp>
-
#endif /* PCL_REGISTRATION_CORRESPONDENCE_REJECTION_H_ */
getRemainingCorrespondences (const pcl::Correspondences& original_correspondences,
pcl::Correspondences& remaining_correspondences);
- /** \brief Provide a source point cloud dataset (must contain XYZ data!)
- * \param[in] cloud a cloud containing XYZ data
- */
- PCL_DEPRECATED ("[pcl::registration::CorrespondenceRejectorSampleConsensus::setInputCloud] setInputCloud is deprecated. Please use setInputSource instead.")
- virtual void
- setInputCloud (const PointCloudConstPtr &cloud);
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- PCL_DEPRECATED ("[pcl::registration::CorrespondenceRejectorSampleConsensus::getInputCloud] getInputCloud is deprecated. Please use getInputSource instead.")
- PointCloudConstPtr const
- getInputCloud ();
-
/** \brief Provide a source point cloud dataset (must contain XYZ data!)
* \param[in] cloud a cloud containing XYZ data
*/
inline PointCloudConstPtr const
getInputSource () { return (input_); }
- /** \brief Provide a target point cloud dataset (must contain XYZ data!)
- * \param[in] cloud a cloud containing XYZ data
- */
- PCL_DEPRECATED ("[pcl::registration::CorrespondenceRejectorSampleConsensus::setTargetCloud] setTargetCloud is deprecated. Please use setInputTarget instead.")
- virtual void
- setTargetCloud (const PointCloudConstPtr &cloud);
-
/** \brief Provide a target point cloud dataset (must contain XYZ data!)
* \param[in] cloud a cloud containing XYZ data
*/
inline double
getInlierThreshold () { return inlier_threshold_; };
- /** \brief Set the maximum number of iterations.
- * \param[in] max_iterations Maximum number if iterations to run
- */
- PCL_DEPRECATED ("[pcl::registration::CorrespondenceRejectorSampleConsensus::setMaxIterations] setMaxIterations is deprecated. Please use setMaximumIterations instead.")
- void
- setMaxIterations (int max_iterations);
-
/** \brief Set the maximum number of iterations.
* \param[in] max_iterations Maximum number if iterations to run
*/
inline void
setMaximumIterations (int max_iterations) { max_iterations_ = std::max (max_iterations, 0); }
- /** \brief Get the maximum number of iterations.
- * \return max_iterations Maximum number if iterations to run
- */
- PCL_DEPRECATED ("[pcl::registration::CorrespondenceRejectorSampleConsensus::getMaxIterations] getMaxIterations is deprecated. Please use getMaximumIterations instead.")
- int
- getMaxIterations ();
-
/** \brief Get the maximum number of iterations.
* \return max_iterations Maximum number if iterations to run
*/
* \author Dirk Holz
* \ingroup registration
*/
- struct sortCorrespondencesByQueryIndex : public std::binary_function<pcl::Correspondence, pcl::Correspondence, bool>
+ struct sortCorrespondencesByQueryIndex
{
+ typedef pcl::Correspondence first_argument_type;
+ typedef pcl::Correspondence second_argument_type;
+ typedef bool result_type;
bool
operator()( pcl::Correspondence a, pcl::Correspondence b)
{
* \author Dirk Holz
* \ingroup registration
*/
- struct sortCorrespondencesByMatchIndex : public std::binary_function<pcl::Correspondence, pcl::Correspondence, bool>
+ struct sortCorrespondencesByMatchIndex
{
+ typedef pcl::Correspondence first_argument_type;
+ typedef pcl::Correspondence second_argument_type;
+ typedef bool result_type;
bool
operator()( pcl::Correspondence a, pcl::Correspondence b)
{
* \author Dirk Holz
* \ingroup registration
*/
- struct sortCorrespondencesByDistance : public std::binary_function<pcl::Correspondence, pcl::Correspondence, bool>
+ struct sortCorrespondencesByDistance
{
+ typedef pcl::Correspondence first_argument_type;
+ typedef pcl::Correspondence second_argument_type;
+ typedef bool result_type;
bool
operator()( pcl::Correspondence a, pcl::Correspondence b)
{
* \author Dirk Holz
* \ingroup registration
*/
- struct sortCorrespondencesByQueryIndexAndDistance : public std::binary_function<pcl::Correspondence, pcl::Correspondence, bool>
+ struct sortCorrespondencesByQueryIndexAndDistance
{
+ typedef pcl::Correspondence first_argument_type;
+ typedef pcl::Correspondence second_argument_type;
+ typedef bool result_type;
inline bool
operator()( pcl::Correspondence a, pcl::Correspondence b)
{
* \author Dirk Holz
* \ingroup registration
*/
- struct sortCorrespondencesByMatchIndexAndDistance : public std::binary_function<pcl::Correspondence, pcl::Correspondence, bool>
+ struct sortCorrespondencesByMatchIndexAndDistance
{
+ typedef pcl::Correspondence first_argument_type;
+ typedef pcl::Correspondence second_argument_type;
+ typedef bool result_type;
inline bool
operator()( pcl::Correspondence a, pcl::Correspondence b)
{
* \param[in] correspondences list of correspondences
* \param[out] mean the mean descriptor distance of correspondences
* \param[out] stddev the standard deviation of descriptor distances.
- * \note The sample varaiance is used to determine the standard deviation
+ * \note The sample variance is used to determine the standard deviation
*/
inline void
getCorDistMeanStd (const pcl::Correspondences& correspondences, double &mean, double &stddev);
VertexT v_start, v_end;
Eigen::Matrix4f relative_transformation;
InformationT information_matrix;
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
PoseMeasurement (const VertexT& v_s, const VertexT& v_e, const Eigen::Matrix4f& tr, const InformationT& mtx)
: v_start (v_s), v_end (v_e), relative_transformation (tr), information_matrix (mtx) {}
} ;
/** \class NotEnoughPointsException
- * \brief An exception that is thrown when the number of correspondants is not equal
+ * \brief An exception that is thrown when the number of correspondents is not equal
* to the minimum required
*/
class PCL_EXPORTS NotEnoughPointsException : public PCLException
boost::bind (&GeneralizedIterativeClosestPoint<PointSource, PointTarget>::estimateRigidTransformationBFGS,
this, _1, _2, _3, _4, _5);
}
-
- /** \brief Provide a pointer to the input dataset
- * \param cloud the const boost shared pointer to a PointCloud message
- */
- PCL_DEPRECATED ("[pcl::registration::GeneralizedIterativeClosestPoint::setInputCloud] setInputCloud is deprecated. Please use setInputSource instead.")
- void
- setInputCloud (const PointCloudSourceConstPtr &cloud);
/** \brief Provide a pointer to the input dataset
* \param cloud the const boost shared pointer to a PointCloud message
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
void
int k_correspondences_;
/** \brief The epsilon constant for gicp paper; this is NOT the convergence
- * tolerence
+ * tolerance
* default: 0.001
*/
double gicp_epsilon_;
int max_inner_iterations_;
/** \brief compute points covariances matrices according to the K nearest
- * neighbors. K is set via setCorrespondenceRandomness() methode.
+ * neighbors. K is set via setCorrespondenceRandomness() method.
* \param cloud pointer to point cloud
* \param tree KD tree performer for nearest neighbors search
* \param[out] cloud_covariances covariances matrices for each point in the cloud
namespace registration
{
/** \brief @b GraphHandler class is a wrapper for a general SLAM graph
- * The actual graph class must fulfil the following boost::graph concepts:
+ * The actual graph class must fulfill the following boost::graph concepts:
* - BidirectionalGraph
* - AdjacencyGraph
* - VertexAndEdgeListGraph
/** \brief Estimated squared metric overlap between source and target.
* \note Internally calculated using the estimated overlap and the extent of the source cloud.
* It is used to derive the minimum sampling distance of the base points as well as to calculated
- * the number of trys to reliable find a correct mach.
+ * the number of tries to reliably find a correct match.
*/
float max_base_diameter_sqr_;
using Registration<PointSource, PointTarget, Scalar>::final_transformation_;
using Registration<PointSource, PointTarget, Scalar>::transformation_;
using Registration<PointSource, PointTarget, Scalar>::transformation_epsilon_;
+ using Registration<PointSource, PointTarget, Scalar>::transformation_rotation_epsilon_;
using Registration<PointSource, PointTarget, Scalar>::converged_;
using Registration<PointSource, PointTarget, Scalar>::corr_dist_threshold_;
using Registration<PointSource, PointTarget, Scalar>::inlier_threshold_;
#include <pcl/common/io.h>
#include <pcl/common/copy_point.h>
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> void
-pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::setInputCloud (const typename pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::PointCloudSourceConstPtr &cloud)
-{
- setInputSource (cloud);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> typename pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::PointCloudSourceConstPtr const
-pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::getInputCloud ()
-{
- return (getInputSource ());
-}
-
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointTarget, typename Scalar> void
pcl::registration::CorrespondenceEstimationBase<PointSource, PointTarget, Scalar>::setInputTarget (
+++ /dev/null
-/*
- * Software License Agreement (BSD License)
- *
- * Point Cloud Library (PCL) - www.pointclouds.org
- * Copyright (c) 2012-, Open Perception, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of the copyright holder(s) nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef PCL_REGISTRATION_CORRESPONDENCE_REJECTION_IMPL_HPP_
-#define PCL_REGISTRATION_CORRESPONDENCE_REJECTION_IMPL_HPP_
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename NormalT> void
-pcl::registration::DataContainer<PointT, NormalT>::setInputCloud (const typename pcl::registration::DataContainer<PointT, NormalT>::PointCloudConstPtr &cloud)
-{
- //input_ = cloud;
- setInputSource (cloud);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT, typename NormalT> typename pcl::registration::DataContainer<PointT, NormalT>::PointCloudConstPtr const
-pcl::registration::DataContainer<PointT, NormalT>::getInputCloud ()
-{
- return (getInputSource ());
- //return (input_);
-}
-
-#endif // PCL_REGISTRATION_CORRESPONDENCE_REJECTION_IMPL_HPP_
-
#include <boost/unordered_map.hpp>
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::setInputCloud (
- const typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::PointCloudConstPtr &cloud)
-{
- setInputSource (cloud);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::PointCloudConstPtr const
-pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::getInputCloud ()
-{
- return (getInputSource ());
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::setTargetCloud (
- const typename pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::PointCloudConstPtr &cloud)
-{
- setInputTarget (cloud);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> void
-pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::setMaxIterations (
- int max_iterations)
-{
- setMaximumIterations (max_iterations);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointT> int
-pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::getMaxIterations ()
-{
- return (getMaximumIterations ());
-}
-
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::registration::CorrespondenceRejectorSampleConsensus<PointT>::getRemainingCorrespondences (
{
if (failure_after_max_iter_)
return (false);
- else
- {
- convergence_state_ = CONVERGENCE_CRITERIA_ITERATIONS;
- return (true);
- }
- return (failure_after_max_iter_ ? false : true);
+
+ convergence_state_ = CONVERGENCE_CRITERIA_ITERATIONS;
+ return (true);
}
// 2. The epsilon (difference) between the previous transformation and the current estimated transformation
#include <pcl/registration/boost.h>
#include <pcl/registration/exceptions.h>
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget> void
-pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::setInputCloud (
- const typename pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::PointCloudSourceConstPtr &cloud)
-{
- setInputSource (cloud);
-}
-
////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointTarget>
template<typename PointT> void
Vector4fMapConst p_tgt = gicp_->tmp_tgt_->points[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
Eigen::Vector4f pp (transformation_matrix * p_src);
// Estimate the distance (cost function)
- // The last coordiante is still guaranteed to be set to 1.0
+ // The last coordinate is still guaranteed to be set to 1.0
Eigen::Vector3d res(pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
Eigen::Vector3d temp (gicp_->mahalanobis((*gicp_->tmp_idx_src_)[i]) * res);
//increment= res'*temp/num_matches = temp'*M*temp/num_matches (we postpone 1/num_matches after the loop closes)
Vector4fMapConst p_tgt = gicp_->tmp_tgt_->points[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
Eigen::Vector4f pp (transformation_matrix * p_src);
- // The last coordiante is still guaranteed to be set to 1.0
+ // The last coordinate is still guaranteed to be set to 1.0
Eigen::Vector3d res (pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
// temp = M*res
Eigen::Vector3d temp (gicp_->mahalanobis ((*gicp_->tmp_idx_src_)[i]) * res);
// The last coordinate, p_tgt[3] is guaranteed to be set to 1.0 in registration.hpp
Vector4fMapConst p_tgt = gicp_->tmp_tgt_->points[(*gicp_->tmp_idx_tgt_)[i]].getVector4fMap ();
Eigen::Vector4f pp (transformation_matrix * p_src);
- // The last coordiante is still guaranteed to be set to 1.0
+ // The last coordinate is still guaranteed to be set to 1.0
Eigen::Vector3d res (pp[0] - p_tgt[0], pp[1] - p_tgt[1], pp[2] - p_tgt[2]);
// temp = M*res
Eigen::Vector3d temp (gicp_->mahalanobis((*gicp_->tmp_idx_src_)[i]) * res);
// select four coplanar point base
if (selectBase (base_indices, ratio) == 0)
{
- // calculate candidate pair correspondences using diagonal lenghts of base
+ // calculate candidate pair correspondences using diagonal lengths of base
pcl::Correspondences pairs_a, pairs_b;
if (bruteForceCorrespondences (base_indices[0], base_indices[1], pairs_a) == 0 &&
bruteForceCorrespondences (base_indices[2], base_indices[3], pairs_b) == 0)
}
- // determine best match over all trys
+ // determine best match over all tries
finalCompute (all_candidates);
// apply the final transformation
delta_ *= mean_dist;
}
- // heuristic determination of number of trials to have high probabilty of finding a good solution
+ // heuristic determination of number of trials to have high probability of finding a good solution
if (max_iterations_ == 0)
{
float first_est = std::log (small_error_) / std::log (1.0 - std::pow ((double) approx_overlap_, (double) min_iterations));
Eigen::Vector4f centre_pt;
float nearest_to_plane = FLT_MAX;
- // repeat base search until valid quadruple was found or ransac_iterations_ number of trys were unsuccessfull
+ // repeat base search until valid quadruple was found or ransac_iterations_ number of tries were unsuccessful
for (int i = 0; i < ransac_iterations_; i++)
{
// random select an appropriate point triple
}
}
- // check if at least one point fullfilled the conditions
+ // check if at least one point fulfilled the conditions
if (nearest_to_plane != FLT_MAX)
{
// order points to build largest quadrangle and calcuate intersection ratios of diagonals
}
}
- // return unsuccessfull if no quadruple was selected
+ // return unsuccessful if no quadruple was selected
return (-1);
}
}
}
- // return unsuccessfull if no match was found
+ // return unsuccessful if no match was found
return (matches.size () > 0 ? 0 : -1);
}
{
float dist_sqr_1 = pcl::squaredEuclideanDistance (target_->points[*it_base], centre_pt_base);
float best_diff_sqr = FLT_MAX;
- int best_index;
+ int best_index = -1;
for (it_match = copy.begin (); it_match != it_match_e; it_match++)
{
break;
}
- // check current costs and return unsuccessfull if larger than previous ones
+ // check current costs and return unsuccessful if larger than previous ones
inlier_score_temp /= static_cast <float> (nr_points);
float fitness_score_temp = 1.f - inlier_score_temp;
pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::finalCompute (
const std::vector <MatchingCandidates > &candidates)
{
- // get best fitness_score over all trys
+ // get best fitness_score over all tries
int nr_candidates = static_cast <int> (candidates.size ());
int best_index = -1;
float best_score = FLT_MAX;
// generate a subset of indices of size ransac_iterations_ on which to evaluate candidates on
std::size_t nr_indices = indices_->size ();
- if (nr_indices < ransac_iterations_)
+ if (nr_indices < size_t (ransac_iterations_))
indices_validation_ = indices_;
else
for (int i = 0; i < ransac_iterations_; i++)
scale += lambda_;
}
- // calculate the fitness and return unsuccessfull if smaller than previous ones
+ // calculate the fitness and return unsuccessful if smaller than previous ones
float fitness_score_temp = (score_a + lambda_ * score_b) / scale;
if (fitness_score_temp > fitness_score)
return (-1);
for (it_curr = it->begin (), it_curr_e = it->end (); it_curr != it_curr_e; it_curr++)
candidates_.push_back (*it_curr);
- // sort acoording to score value
+ // sort according to score value
std::sort (candidates_.begin (), candidates_.end (), by_score ());
// return here if no score was valid, i.e. all scores are FLT_MAX
// Estimate the transform from the samples to their corresponding points
transformation_estimation_->estimateRigidTransformation (*input_, sample_indices, *target_, corresponding_indices, transformation_);
- // Tranform the data and compute the error
+ // Transform the data and compute the error
transformPointCloud (*input_, input_transformed, transformation_);
error = computeErrorMetric (input_transformed, static_cast<float> (corr_dist_threshold_));
convergence_criteria_->setMaximumIterations (max_iterations_);
convergence_criteria_->setRelativeMSE (euclidean_fitness_epsilon_);
convergence_criteria_->setTranslationThreshold (transformation_epsilon_);
- convergence_criteria_->setRotationThreshold (1.0 - transformation_epsilon_);
+ if (transformation_rotation_epsilon_ > 0)
+ convergence_criteria_->setRotationThreshold (transformation_rotation_epsilon_);
+ else
+ convergence_criteria_->setRotationThreshold (1.0 - transformation_epsilon_);
// Repeat until convergence
do
// Estimate the transform
transformation_estimation_->estimateRigidTransformation (*input_transformed, *target_, *correspondences_, transformation_);
- // Tranform the data
+ // Transform the data
transformCloud (*input_transformed, *input_transformed, transformation_);
// Obtain the final transformation
// Estimate the transform jointly, on a combined correspondence set
transformation_estimation_->estimateRigidTransformation (*inputs_transformed_combined, *targets_combined, *correspondences_, transformation_);
- // Tranform the combined data
+ // Transform the combined data
this->transformCloud (*inputs_transformed_combined, *inputs_transformed_combined, transformation_);
// And all its components
for (size_t i = 0; i < sources_.size (); i++)
double gauss_c1, gauss_c2, gauss_d3;
- // Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009]
+ // Initializes the gaussian fitting parameters (eq. 6.8) [Magnusson 2009]
gauss_c1 = 10.0 * (1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
gauss_d3 = -log (gauss_c2);
double gauss_c1, gauss_c2, gauss_d3;
- // Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009]
+ // Initializes the gaussian fitting parameters (eq. 6.8) [Magnusson 2009]
gauss_c1 = 10 * (1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
gauss_d3 = -log (gauss_c2);
if (update_visualizer_ != 0)
update_visualizer_ (output, std::vector<int>(), *target_, std::vector<int>() );
- if (nr_iterations_ > max_iterations_ ||
- (nr_iterations_ && (std::fabs (delta_p_norm) < transformation_epsilon_)))
- {
- converged_ = true;
- }
+ double cos_angle = 0.5 * (transformation_.coeff (0, 0) + transformation_.coeff (1, 1) + transformation_.coeff (2, 2) - 1);
+ double translation_sqr = transformation_.coeff (0, 3) * transformation_.coeff (0, 3) +
+ transformation_.coeff (1, 3) * transformation_.coeff (1, 3) +
+ transformation_.coeff (2, 3) * transformation_.coeff (2, 3);
nr_iterations_++;
+ if (nr_iterations_ >= max_iterations_ ||
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ <= 0) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ <= 0)))
+ {
+ converged_ = true;
+ }
}
// Store transformation probability. The realtive differences within each scan registration are accurate
Eigen::Vector3d cov_dxd_pi;
// e^(-d_2/2 * (x_k - mu_k)^T Sigma_k^-1 (x_k - mu_k)) Equation 6.9 [Magnusson 2009]
double e_x_cov_x = exp (-gauss_d2_ * x_trans.dot (c_inv * x_trans) / 2);
- // Calculate probability of transtormed points existance, Equation 6.9 [Magnusson 2009]
+ // Calculate probability of transformed points existence, Equation 6.9 [Magnusson 2009]
double score_inc = -gauss_d1_ * e_x_cov_x;
e_x_cov_x = gauss_d2_ * e_x_cov_x;
// Iterate until max number of iterations, interval convergance or a value satisfies the sufficient decrease, Equation 1.1, and curvature condition, Equation 1.2 [More, Thuente 1994]
while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 /*Sufficient Decrease*/ && d_phi_t <= -nu * d_phi_0 /*Curvature Condition*/))
{
- // Use auxilary function if interval I is not closed
+ // Use auxiliary function if interval I is not closed
if (open_interval)
{
a_t = trialValueSelectionMT (a_l, f_l, g_l,
//std::cout << "eps=" << fabs ((transformation - previous_transformation_).sum ()) << std::endl;
- if (nr_iterations_ > max_iterations_ ||
- (transformation - previous_transformation_).array ().abs ().sum () < transformation_epsilon_)
+ Eigen::Matrix4f transformation_delta = transformation.inverse() * previous_transformation_;
+ double cos_angle = 0.5 * (transformation_delta.coeff (0, 0) + transformation_delta.coeff (1, 1) + transformation_delta.coeff (2, 2) - 1);
+ double translation_sqr = transformation_delta.coeff (0, 3) * transformation_delta.coeff (0, 3) +
+ transformation_delta.coeff (1, 3) * transformation_delta.coeff (1, 3) +
+ transformation_delta.coeff (2, 3) * transformation_delta.coeff (2, 3);
+
+ if (nr_iterations_ >= max_iterations_ ||
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ <= 0) && (transformation_rotation_epsilon_ > 0 && cos_angle >= transformation_rotation_epsilon_)) ||
+ ((transformation_epsilon_ > 0 && translation_sqr <= transformation_epsilon_) && (transformation_rotation_epsilon_ <= 0)))
+ {
converged_ = true;
+ }
}
final_transformation_ = transformation;
output = intm_cloud;
*
*/
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> void
-pcl::Registration<PointSource, PointTarget, Scalar>::setInputCloud (
- const typename pcl::Registration<PointSource, PointTarget, Scalar>::PointCloudSourceConstPtr &cloud)
-{
- setInputSource (cloud);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointSource, typename PointTarget, typename Scalar> typename pcl::Registration<PointSource, PointTarget, Scalar>::PointCloudSourceConstPtr const
-pcl::Registration<PointSource, PointTarget, Scalar>::getInputCloud ()
-{
- return (getInputSource ());
-}
-
///////////////////////////////////////////////////////////////////////////////////////////
template <typename PointSource, typename PointTarget, typename Scalar> inline void
pcl::Registration<PointSource, PointTarget, Scalar>::setInputTarget (const PointCloudTargetConstPtr &cloud)
using Registration<PointSource, PointTarget>::final_transformation_;
using Registration<PointSource, PointTarget>::transformation_;
using Registration<PointSource, PointTarget>::transformation_epsilon_;
+ using Registration<PointSource, PointTarget>::transformation_rotation_epsilon_;
using Registration<PointSource, PointTarget>::converged_;
using Registration<PointSource, PointTarget>::corr_dist_threshold_;
using Registration<PointSource, PointTarget>::inlier_threshold_;
using Registration<PointSource, PointTarget>::update_visualizer_;
/** \brief Estimate the transformation and returns the transformed source (input) as output.
- * \param[out] output the resultant input transfomed point cloud dataset
+ * \param[out] output the resultant input transformed point cloud dataset
*/
virtual void
computeTransformation (PointCloudSource &output)
}
/** \brief Estimate the transformation and returns the transformed source (input) as output.
- * \param[out] output the resultant input transfomed point cloud dataset
+ * \param[out] output the resultant input transformed point cloud dataset
* \param[in] guess the initial gross estimation of the transformation
*/
virtual void
PointCloudSource &trans_cloud);
/** \brief Update interval of possible step lengths for More-Thuente method, \f$ I \f$ in More-Thuente (1994)
- * \note Updating Algorithm until some value satifies \f$ \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$
+ * \note Updating Algorithm until some value satisfies \f$ \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$
* and Modified Updating Algorithm from then on [More, Thuente 1994].
* \param[in,out] a_l first endpoint of interval \f$ I \f$, \f$ \alpha_l \f$ in Moore-Thuente (1994)
* \param[in,out] f_l value at first endpoint, \f$ f_l \f$ in Moore-Thuente (1994), \f$ \psi(\alpha_l) \f$ for Update Algorithm and \f$ \phi(\alpha_l) \f$ for Modified Update Algorithm
/** \brief Select new trial value for More-Thuente method.
* \note Trial Value Selection [More, Thuente 1994], \f$ \psi(\alpha_k) \f$ is used for \f$ f_k \f$ and \f$ g_k \f$
- * until some value satifies the test \f$ \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$
+ * until some value satisfies the test \f$ \psi(\alpha_k) \leq 0 \f$ and \f$ \phi'(\alpha_k) \geq 0 \f$
* then \f$ \phi(\alpha_k) \f$ is used from then on.
* \note Interpolation Minimizer equations from Optimization Theory and Methods: Nonlinear Programming By Wenyu Sun, Ya-xiang Yuan (89-100).
* \param[in] a_l first endpoint of interval \f$ I \f$, \f$ \alpha_l \f$ in Moore-Thuente (1994)
double a_u, double f_u, double g_u,
double a_t, double f_t, double g_t);
- /** \brief Auxilary function used to determin endpoints of More-Thuente interval.
+ /** \brief Auxiliary function used to determine endpoints of More-Thuente interval.
* \note \f$ \psi(\alpha) \f$ in Equation 1.6 (Moore, Thuente 1994)
* \param[in] a the step length, \f$ \alpha \f$ in More-Thuente (1994)
* \param[in] f_a function value at step length a, \f$ \phi(\alpha) \f$ in More-Thuente (1994)
* \param[in] f_0 initial function value, \f$ \phi(0) \f$ in Moore-Thuente (1994)
* \param[in] g_0 initial function gradiant, \f$ \phi'(0) \f$ in More-Thuente (1994)
* \param[in] mu the step length, constant \f$ \mu \f$ in Equation 1.1 [More, Thuente 1994]
- * \return sufficent decrease value
+ * \return sufficient decrease value
*/
inline double
auxilaryFunction_PsiMT (double a, double f_a, double f_0, double g_0, double mu = 1.e-4)
return (f_a - f_0 - mu * g_0 * a);
}
- /** \brief Auxilary function derivative used to determin endpoints of More-Thuente interval.
+ /** \brief Auxiliary function derivative used to determine endpoints of More-Thuente interval.
* \note \f$ \psi'(\alpha) \f$, derivative of Equation 1.6 (Moore, Thuente 1994)
* \param[in] g_a function gradient at step length a, \f$ \phi'(\alpha) \f$ in More-Thuente (1994)
* \param[in] g_0 initial function gradiant, \f$ \phi'(0) \f$ in More-Thuente (1994)
* \param[in] mu the step length, constant \f$ \mu \f$ in Equation 1.1 [More, Thuente 1994]
- * \return sufficent decrease derivative
+ * \return sufficient decrease derivative
*/
inline double
auxilaryFunction_dPsiMT (double g_a, double g_0, double mu = 1.e-4)
using Registration<PointSource, PointTarget>::nr_iterations_;
using Registration<PointSource, PointTarget>::max_iterations_;
using Registration<PointSource, PointTarget>::transformation_epsilon_;
+ using Registration<PointSource, PointTarget>::transformation_rotation_epsilon_;
using Registration<PointSource, PointTarget>::transformation_;
- using Registration<PointSource, PointTarget>::previous_transformation_;
+ using Registration<PointSource, PointTarget>::previous_transformation_;
using Registration<PointSource, PointTarget>::final_transformation_;
using Registration<PointSource, PointTarget>::update_visualizer_;
using Registration<PointSource, PointTarget>::indices_;
isComputed () { return is_computed_; }
/** \brief Static method for comparing two pyramid histograms that returns a floating point value between 0 and 1,
- * representing the similiarity between the feature sets on which the two pyramid histograms are based.
+ * representing the similarity between the feature sets on which the two pyramid histograms are based.
* \param pyramid_a Pointer to the first pyramid to be compared (needs to be computed already).
* \param pyramid_b Pointer to the second pyramid to be compared (needs to be computed already).
*/
, transformation_ (Matrix4::Identity ())
, previous_transformation_ (Matrix4::Identity ())
, transformation_epsilon_ (0.0)
+ , transformation_rotation_epsilon_(0.0)
, euclidean_fitness_epsilon_ (-std::numeric_limits<double>::max ())
, corr_dist_threshold_ (std::sqrt (std::numeric_limits<double>::max ()))
, inlier_threshold_ (0.05)
, source_cloud_updated_ (true)
, force_no_recompute_ (false)
, force_no_recompute_reciprocal_ (false)
- , update_visualizer_ (NULL)
, point_representation_ ()
{
}
void
setCorrespondenceEstimation (const CorrespondenceEstimationPtr &ce) { correspondence_estimation_ = ce; }
- /** \brief Provide a pointer to the input source
- * (e.g., the point cloud that we want to align to the target)
- *
- * \param[in] cloud the input point cloud source
- */
- PCL_DEPRECATED ("[pcl::registration::Registration::setInputCloud] setInputCloud is deprecated. Please use setInputSource instead.")
- void
- setInputCloud (const PointCloudSourceConstPtr &cloud);
-
- /** \brief Get a pointer to the input point cloud dataset target. */
- PCL_DEPRECATED ("[pcl::registration::Registration::getInputCloud] getInputCloud is deprecated. Please use getInputSource instead.")
- PointCloudSourceConstPtr const
- getInputCloud ();
-
/** \brief Provide a pointer to the input source
* (e.g., the point cloud that we want to align to the target)
*
inline double
getMaxCorrespondenceDistance () { return (corr_dist_threshold_); }
- /** \brief Set the transformation epsilon (maximum allowable difference between two consecutive
+ /** \brief Set the transformation epsilon (maximum allowable translation squared difference between two consecutive
* transformations) in order for an optimization to be considered as having converged to the final
* solution.
* \param[in] epsilon the transformation epsilon in order for an optimization to be considered as having
inline void
setTransformationEpsilon (double epsilon) { transformation_epsilon_ = epsilon; }
- /** \brief Get the transformation epsilon (maximum allowable difference between two consecutive
+ /** \brief Get the transformation epsilon (maximum allowable translation squared difference between two consecutive
* transformations) as set by the user.
*/
inline double
getTransformationEpsilon () { return (transformation_epsilon_); }
+ /** \brief Set the transformation rotation epsilon (maximum allowable rotation difference between two consecutive
+ * transformations) in order for an optimization to be considered as having converged to the final
+ * solution.
+ * \param[in] epsilon the transformation rotation epsilon in order for an optimization to be considered as having
+ * converged to the final solution (epsilon is the cos(angle) in a axis-angle representation).
+ */
+ inline void
+ setTransformationRotationEpsilon (double epsilon) { transformation_rotation_epsilon_ = epsilon; }
+
+ /** \brief Get the transformation rotation epsilon (maximum allowable difference between two consecutive
+ * transformations) as set by the user (epsilon is the cos(angle) in a axis-angle representation).
+ */
+ inline double
+ getTransformationRotationEpsilon () { return (transformation_rotation_epsilon_); }
+
/** \brief Set the maximum allowed Euclidean error between two consecutive steps in the ICP loop, before
* the algorithm is considered to have converged.
* The error is estimated as the sum of the differences between correspondences in an Euclidean sense,
* \param[in] epsilon the maximum allowed distance error before the algorithm will be considered to have
* converged
*/
-
inline void
setEuclideanFitnessEpsilon (double epsilon) { euclidean_fitness_epsilon_ = epsilon; }
/** \brief Call the registration algorithm which estimates the transformation and returns the transformed source
* (input) as \a output.
- * \param[out] output the resultant input transfomed point cloud dataset
+ * \param[out] output the resultant input transformed point cloud dataset
*/
inline void
align (PointCloudSource &output);
/** \brief Call the registration algorithm which estimates the transformation and returns the transformed source
* (input) as \a output.
- * \param[out] output the resultant input transfomed point cloud dataset
+ * \param[out] output the resultant input transformed point cloud dataset
* \param[in] guess the initial gross estimation of the transformation
*/
inline void
inline const std::string&
getClassName () const { return (reg_name_); }
- /** \brief Internal computation initalization. */
+ /** \brief Internal computation initialization. */
bool
initCompute ();
*/
double transformation_epsilon_;
+ /** \brief The maximum rotation difference between two consecutive transformations in order to consider convergence
+ * (user defined).
+ */
+ double transformation_rotation_epsilon_;
+
/** \brief The maximum allowed Euclidean error between two consecutive steps in the ICP loop, before the
* algorithm is considered to have converged. The error is estimated as the sum of the differences between
* correspondences in an Euclidean sense, divided by the number of correspondences.
double euclidean_fitness_epsilon_;
/** \brief The maximum distance threshold between two correspondent points in source <-> target. If the
- * distance is larger than this threshold, the points will be ignored in the alignement process.
+ * distance is larger than this threshold, the points will be ignored in the alignment process.
*/
double corr_dist_threshold_;
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
virtual void
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
virtual void
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
virtual void
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
inline void
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
inline void
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from
* \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
/** Base functor all the models that need non linear optimization must
* define their own one and implement operator() (const Eigen::VectorXd& x, Eigen::VectorXd& fvec)
- * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) dependening on the choosen _Scalar
+ * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) depending on the chosen _Scalar
*/
template<typename _Scalar, int NX=Eigen::Dynamic, int NY=Eigen::Dynamic>
struct Functor
typedef Eigen::Matrix<_Scalar,ValuesAtCompileTime,1> ValueType;
typedef Eigen::Matrix<_Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
- /** \brief Empty Construtor. */
+ /** \brief Empty Constructor. */
Functor () : m_data_points_ (ValuesAtCompileTime) {}
/** \brief Constructor
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
inline void
ConstCloudIterator<PointTarget>& target_it,
Matrix4 &transformation_matrix) const;
- /** \brief Construct a 4 by 4 tranformation matrix from the provided rotation and translation.
+ /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and translation.
* \param[in] alpha the rotation about the x-axis
* \param[in] beta the rotation about the y-axis
* \param[in] gamma the rotation about the z-axis
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
inline void
typename std::vector<Scalar>::const_iterator& weights_it,
Matrix4 &transformation_matrix) const;
- /** \brief Construct a 4 by 4 tranformation matrix from the provided rotation and translation.
+ /** \brief Construct a 4 by 4 transformation matrix from the provided rotation and translation.
* \param[in] alpha the rotation about the x-axis
* \param[in] beta the rotation about the y-axis
* \param[in] gamma the rotation about the z-axis
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from
* \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
* \note Uses the weights given by setWeights.
/** Base functor all the models that need non linear optimization must
* define their own one and implement operator() (const Eigen::VectorXd& x, Eigen::VectorXd& fvec)
- * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) dependening on the choosen _Scalar
+ * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) depending on the chosen _Scalar
*/
template<typename _Scalar, int NX=Eigen::Dynamic, int NY=Eigen::Dynamic>
struct Functor
typedef Eigen::Matrix<_Scalar,ValuesAtCompileTime,1> ValueType;
typedef Eigen::Matrix<_Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
- /** \brief Empty Construtor. */
+ /** \brief Empty Constructor. */
Functor () : m_data_points_ (ValuesAtCompileTime) {}
/** \brief Constructor
* \param[in] cloud_src the source point cloud dataset
* \param[in] indices_src the vector of indices describing the points of interest in \a cloud_src
* \param[in] cloud_tgt the target point cloud dataset
- * \param[in] indices_tgt the vector of indices describing the correspondences of the interst points from \a indices_src
+ * \param[in] indices_tgt the vector of indices describing the correspondences of the interest points from \a indices_src
* \param[out] transformation_matrix the resultant transformation matrix
*/
inline void
/** \brief Comparator function for deciding which score is better after running the
* validation on multiple transforms. Pure virtual.
*
- * \note For example, for Euclidean distances smaller is better, for inliers the oposite.
+ * \note For example, for Euclidean distances smaller is better, for inliers the opposite.
*
* \param[in] score1 the first value
* \param[in] score2 the second value
PCL_DEBUG("[pcl::%s::computeTransformation] Convergence failed\n",
getClassName ().c_str ());
}
- //for some reason the static equivalent methode raises an error
+ //for some reason the static equivalent method raises an error
// final_transformation_.block<3,3> (0,0) = (transformation_.block<3,3> (0,0)) * (guess.block<3,3> (0,0));
// final_transformation_.block <3, 1> (0, 3) = transformation_.block <3, 1> (0, 3) + guess.rightCols<1>.block <3, 1> (0, 3);
final_transformation_.topLeftCorner (3, 3) =
int n_inliers_count = 0;
unsigned skipped_count = 0;
- // supress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
+ // suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
const unsigned max_skip = max_iterations_ * 10;
// Iterate
int n_inliers_count = 0;
size_t indices_size;
unsigned skipped_count = 0;
- // supress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
+ // suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
const unsigned max_skip = max_iterations_ * 10;
// Iterate
int n_inliers_count = 0;
unsigned skipped_count = 0;
- // supress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
+ // suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
const unsigned max_skip = max_iterations_ * 10;
// Iterate
int n_inliers_count = 0;
unsigned skipped_count = 0;
- // supress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
+ // suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
const unsigned max_skip = max_iterations_ * 10;
// Iterate
int n_inliers_count = 0;
unsigned skipped_count = 0;
- // supress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
+ // suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
const unsigned max_skip = max_iterations_ * 10;
// Number of samples to try randomly
int n_inliers_count = 0;
unsigned skipped_count = 0;
- // supress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
+ // suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
const unsigned max_skip = max_iterations_ * 10;
// Number of samples to try randomly
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelCircle2D<PointT>::computeModelCoefficients (const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelCircle2D<PointT>::computeModelCoefficients (const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 3 samples
if (samples.size () != 3)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::SampleConsensusModelCircle2D<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+pcl::SampleConsensusModelCircle2D<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelCircle2D<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelCircle2D<PointT>::optimizeModelCoefficients (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
optimized_coefficients = model_coefficients;
return;
}
- tmp_inliers_ = &inliers;
-
- OptimizationFunctor functor (static_cast<int> (inliers.size ()), this);
+ OptimizationFunctor functor (this, inliers);
Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, float> lm (num_diff);
int info = lm.minimize (optimized_coefficients);
template <typename PointT> void
pcl::SampleConsensusModelCircle2D<PointT>::projectPoints (
const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points, bool copy_data_fields)
+ PointCloud &projected_points, bool copy_data_fields) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != 3)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelCircle2D<PointT>::doSamplesVerifyModel (
- const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
+ const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid model coefficients
if (model_coefficients.size () != 3)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelCircle2D<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelCircle2D<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelCircle3D<PointT>::computeModelCoefficients (const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelCircle3D<PointT>::computeModelCoefficients (const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 3 samples
if (samples.size () != 3)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::SampleConsensusModelCircle3D<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+pcl::SampleConsensusModelCircle3D<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelCircle3D<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelCircle3D<PointT>::optimizeModelCoefficients (
- const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const
{
optimized_coefficients = model_coefficients;
return;
}
- tmp_inliers_ = &inliers;
-
- OptimizationFunctor functor (static_cast<int> (inliers.size ()), this);
+ OptimizationFunctor functor (this, inliers);
Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm (num_diff);
Eigen::VectorXd coeff;
template <typename PointT> void
pcl::SampleConsensusModelCircle3D<PointT>::projectPoints (
const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points, bool copy_data_fields)
+ PointCloud &projected_points, bool copy_data_fields) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != 7)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelCircle3D<PointT>::doSamplesVerifyModel (
- const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold)
+ const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const
{
// Needs a valid model coefficients
if (model_coefficients.size () != 7)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelCircle3D<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelCircle3D<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
pcl::SampleConsensusModelCone<PointT, PointNT>::computeModelCoefficients (
- const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+ const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 3 samples
if (samples.size () != 3)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelCone<PointT, PointNT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
// Calculate the cones perfect normals
Eigen::Vector4f cone_normal = sinf (opening_angle) * height + cosf (opening_angle) * dir;
- // Aproximate the distance from the point to the cone as the difference between
+ // Approximate the distance from the point to the cone as the difference between
// dist(point,cone_axis) and actual cone radius
double d_euclid = fabs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
// Calculate the cones perfect normals
Eigen::Vector4f cone_normal = sinf (opening_angle) * height + cosf (opening_angle) * pp_pt_dir;
- // Aproximate the distance from the point to the cone as the difference between
+ // Approximate the distance from the point to the cone as the difference between
// dist(point,cone_axis) and actual cone radius
double d_euclid = fabs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> int
pcl::SampleConsensusModelCone<PointT, PointNT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
// Calculate the cones perfect normals
Eigen::Vector4f cone_normal = sinf (opening_angle) * height + cosf (opening_angle) * pp_pt_dir;
- // Aproximate the distance from the point to the cone as the difference between
+ // Approximate the distance from the point to the cone as the difference between
// dist(point,cone_axis) and actual cone radius
double d_euclid = fabs (pointToAxisDistance (pt, model_coefficients) - actual_cone_radius);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelCone<PointT, PointNT>::optimizeModelCoefficients (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
optimized_coefficients = model_coefficients;
return;
}
- tmp_inliers_ = &inliers;
-
- OptimizationFunctor functor (static_cast<int> (inliers.size ()), this);
+ OptimizationFunctor functor (this, inliers);
Eigen::NumericalDiff<OptimizationFunctor > num_diff (functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, float> lm (num_diff);
int info = lm.minimize (optimized_coefficients);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelCone<PointT, PointNT>::projectPoints (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != 7)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
pcl::SampleConsensusModelCone<PointT, PointNT>::doSamplesVerifyModel (
- const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
+ const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid model coefficients
if (model_coefficients.size () != 7)
Eigen::Vector4f height = apex - pt_proj;
double actual_cone_radius = tan (openning_angle) * height.norm ();
- // Aproximate the distance from the point to the cone as the difference between
+ // Approximate the distance from the point to the cone as the difference between
// dist(point,cone_axis) and actual cone radius
if (fabs (static_cast<double>(pointToAxisDistance (pt, model_coefficients) - actual_cone_radius)) > threshold)
return (false);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> double
pcl::SampleConsensusModelCone<PointT, PointNT>::pointToAxisDistance (
- const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients)
+ const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients) const
{
Eigen::Vector4f apex (model_coefficients[0], model_coefficients[1], model_coefficients[2], 0);
Eigen::Vector4f axis_dir (model_coefficients[3], model_coefficients[4], model_coefficients[5], 0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
-pcl::SampleConsensusModelCone<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelCone<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
pcl::SampleConsensusModelCylinder<PointT, PointNT>::computeModelCoefficients (
- const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+ const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 2 samples
if (samples.size () != 2)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelCylinder<PointT, PointNT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
// Iterate through the 3d points and calculate the distances from them to the sphere
for (size_t i = 0; i < indices_->size (); ++i)
{
- // Aproximate the distance from the point to the cylinder as the difference between
+ // Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
// @note need to revise this.
Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0);
// Iterate through the 3d points and calculate the distances from them to the sphere
for (size_t i = 0; i < indices_->size (); ++i)
{
- // Aproximate the distance from the point to the cylinder as the difference between
+ // Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0);
Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> int
pcl::SampleConsensusModelCylinder<PointT, PointNT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
// Iterate through the 3d points and calculate the distances from them to the sphere
for (size_t i = 0; i < indices_->size (); ++i)
{
- // Aproximate the distance from the point to the cylinder as the difference between
+ // Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
Eigen::Vector4f pt (input_->points[(*indices_)[i]].x, input_->points[(*indices_)[i]].y, input_->points[(*indices_)[i]].z, 0);
Eigen::Vector4f n (normals_->points[(*indices_)[i]].normal[0], normals_->points[(*indices_)[i]].normal[1], normals_->points[(*indices_)[i]].normal[2], 0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelCylinder<PointT, PointNT>::optimizeModelCoefficients (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
optimized_coefficients = model_coefficients;
return;
}
- tmp_inliers_ = &inliers;
-
- OptimizationFunctor functor (static_cast<int> (inliers.size ()), this);
+ OptimizationFunctor functor (this, inliers);
Eigen::NumericalDiff<OptimizationFunctor > num_diff (functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, float> lm (num_diff);
int info = lm.minimize (optimized_coefficients);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelCylinder<PointT, PointNT>::projectPoints (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != 7)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
pcl::SampleConsensusModelCylinder<PointT, PointNT>::doSamplesVerifyModel (
- const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
+ const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid model coefficients
if (model_coefficients.size () != 7)
for (std::set<int>::const_iterator it = indices.begin (); it != indices.end (); ++it)
{
- // Aproximate the distance from the point to the cylinder as the difference between
+ // Approximate the distance from the point to the cylinder as the difference between
// dist(point,cylinder_axis) and cylinder radius
// @note need to revise this.
Eigen::Vector4f pt (input_->points[*it].x, input_->points[*it].y, input_->points[*it].z, 0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> double
pcl::SampleConsensusModelCylinder<PointT, PointNT>::pointToLineDistance (
- const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients)
+ const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients) const
{
Eigen::Vector4f line_pt (model_coefficients[0], model_coefficients[1], model_coefficients[2], 0);
Eigen::Vector4f line_dir (model_coefficients[3], model_coefficients[4], model_coefficients[5], 0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelCylinder<PointT, PointNT>::projectPointToCylinder (
- const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients, Eigen::Vector4f &pt_proj)
+ const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients, Eigen::Vector4f &pt_proj) const
{
Eigen::Vector4f line_pt (model_coefficients[0], model_coefficients[1], model_coefficients[2], 0);
Eigen::Vector4f line_dir (model_coefficients[3], model_coefficients[4], model_coefficients[5], 0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
-pcl::SampleConsensusModelCylinder<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelCylinder<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
(input_->points[samples[0]].z != input_->points[samples[1]].z))
return (true);
- return (true);
+ return (false);
}
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelLine<PointT>::computeModelCoefficients (
- const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+ const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 2 samples
if (samples.size () != 2)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelLine<PointT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelLine<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelLine<PointT>::optimizeModelCoefficients (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelLine<PointT>::projectPoints (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields) const
{
// Needs a valid model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelLine<PointT>::doSamplesVerifyModel (
- const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
+ const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
-pcl::SampleConsensusModelNormalParallelPlane<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelNormalParallelPlane<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> int
pcl::SampleConsensusModelNormalPlane<PointT, PointNT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
if (!normals_)
{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelNormalPlane<PointT, PointNT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
if (!normals_)
{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> int
pcl::SampleConsensusModelNormalSphere<PointT, PointNT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
if (!normals_)
{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> void
pcl::SampleConsensusModelNormalSphere<PointT, PointNT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
if (!normals_)
{
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointT, typename PointNT> bool
-pcl::SampleConsensusModelNormalSphere<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelNormalSphere<PointT, PointNT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelParallelLine<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelParallelLine<PointT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelParallelLine<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelParallelLine<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelParallelPlane<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelParallelPlane<PointT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelParallelPlane<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelParallelPlane<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelPerpendicularPlane<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelPerpendicularPlane<PointT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelPerpendicularPlane<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelPerpendicularPlane<PointT>::isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelPlane<PointT>::computeModelCoefficients (
- const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+ const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 3 samples
if (samples.size () != sample_size_)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelPlane<PointT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != model_size_)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelPlane<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != model_size_)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelPlane<PointT>::optimizeModelCoefficients (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != model_size_)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelPlane<PointT>::projectPoints (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != model_size_)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelPlane<PointT>::doSamplesVerifyModel (
- const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
+ const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid set of model coefficients
if (model_coefficients.size () != model_size_)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
-pcl::SampleConsensusModelRegistration<PointT>::computeModelCoefficients (const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+pcl::SampleConsensusModelRegistration<PointT>::computeModelCoefficients (const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
if (!target_)
{
std::vector<int> indices_tgt (3);
for (int i = 0; i < 3; ++i)
- indices_tgt[i] = correspondences_[samples[i]];
+ indices_tgt[i] = correspondences_.at (samples[i]);
estimateRigidTransformationSVD (*input_, samples, *target_, indices_tgt, model_coefficients);
return (true);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::SampleConsensusModelRegistration<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+pcl::SampleConsensusModelRegistration<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
if (indices_->size () != indices_tgt_->size ())
{
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelRegistration<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
if (indices_->size () != indices_tgt_->size ())
{
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::SampleConsensusModelRegistration<PointT>::optimizeModelCoefficients (const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+pcl::SampleConsensusModelRegistration<PointT>::optimizeModelCoefficients (const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
if (indices_->size () != indices_tgt_->size ())
{
for (size_t i = 0; i < inliers.size (); ++i)
{
indices_src[i] = inliers[i];
- indices_tgt[i] = correspondences_[indices_src[i]];
+ indices_tgt[i] = correspondences_.at (indices_src[i]);
}
estimateRigidTransformationSVD (*input_, indices_src, *target_, indices_tgt, optimized_coefficients);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelRegistration<PointT>::estimateRigidTransformationSVD (
- const pcl::PointCloud<PointT> &cloud_src,
- const std::vector<int> &indices_src,
+ const pcl::PointCloud<PointT> &cloud_src,
+ const std::vector<int> &indices_src,
const pcl::PointCloud<PointT> &cloud_tgt,
- const std::vector<int> &indices_tgt,
- Eigen::VectorXf &transform)
+ const std::vector<int> &indices_tgt,
+ Eigen::VectorXf &transform) const
{
transform.resize (16);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
-pcl::SampleConsensusModelRegistration2D<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+pcl::SampleConsensusModelRegistration2D<PointT>::getDistancesToModel (const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
PCL_INFO ("[pcl::SampleConsensusModelRegistration2D<PointT>::getDistancesToModel]\n");
if (indices_->size () != indices_tgt_->size ())
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelRegistration2D<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
if (indices_->size () != indices_tgt_->size ())
{
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelSphere<PointT>::computeModelCoefficients (
- const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+ const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 4 samples
if (samples.size () != 4)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelSphere<PointT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelSphere<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Check if the model is valid given the user constraints
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelSphere<PointT>::optimizeModelCoefficients (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
optimized_coefficients = model_coefficients;
return;
}
- tmp_inliers_ = &inliers;
-
- OptimizationFunctor functor (static_cast<int> (inliers.size ()), this);
+ OptimizationFunctor functor (this, inliers);
Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, float> lm (num_diff);
int info = lm.minimize (optimized_coefficients);
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelSphere<PointT>::projectPoints (
- const std::vector<int> &, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool)
+ const std::vector<int> &, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool) const
{
// Needs a valid model coefficients
if (model_coefficients.size () != 4)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelSphere<PointT>::doSamplesVerifyModel (
- const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
+ const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid model coefficients
if (model_coefficients.size () != 4)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelStick<PointT>::computeModelCoefficients (
- const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
+ const std::vector<int> &samples, Eigen::VectorXf &model_coefficients) const
{
// Need 2 samples
if (samples.size () != 2)
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelStick<PointT>::getDistancesToModel (
- const Eigen::VectorXf &model_coefficients, std::vector<double> &distances)
+ const Eigen::VectorXf &model_coefficients, std::vector<double> &distances) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
///////////////////////////////////////////////////////////////////////////
template <typename PointT> int
pcl::SampleConsensusModelStick<PointT>::countWithinDistance (
- const Eigen::VectorXf &model_coefficients, const double threshold)
+ const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelStick<PointT>::optimizeModelCoefficients (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, Eigen::VectorXf &optimized_coefficients) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::SampleConsensusModelStick<PointT>::projectPoints (
- const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields)
+ const std::vector<int> &inliers, const Eigen::VectorXf &model_coefficients, PointCloud &projected_points, bool copy_data_fields) const
{
// Needs a valid model coefficients
if (!isModelValid (model_coefficients))
//////////////////////////////////////////////////////////////////////////
template <typename PointT> bool
pcl::SampleConsensusModelStick<PointT>::doSamplesVerifyModel (
- const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold)
+ const std::set<int> &indices, const Eigen::VectorXf &model_coefficients, const double threshold) const
{
// Needs a valid set of model coefficients
if (!isModelValid (model_coefficients))
#ifndef PCL_SAMPLE_CONSENSUS_MODEL_TYPES_H_
#define PCL_SAMPLE_CONSENSUS_MODEL_TYPES_H_
-#include <map>
-
namespace pcl
{
enum SacModel
};
}
-typedef std::map<pcl::SacModel, unsigned int>::value_type SampleSizeModel;
-// Warning: sample_size_pairs is deprecated and is kept only to prevent breaking existing user code.
-// Starting from PCL 1.8.0 model sample size is a protected member of the SampleConsensusModel class.
-const static SampleSizeModel sample_size_pairs[] = {SampleSizeModel (pcl::SACMODEL_PLANE, 3),
- SampleSizeModel (pcl::SACMODEL_LINE, 2),
- SampleSizeModel (pcl::SACMODEL_CIRCLE2D, 3),
- SampleSizeModel (pcl::SACMODEL_CIRCLE3D, 3),
- SampleSizeModel (pcl::SACMODEL_SPHERE, 4),
- SampleSizeModel (pcl::SACMODEL_CYLINDER, 2),
- SampleSizeModel (pcl::SACMODEL_CONE, 3),
- //SampleSizeModel (pcl::SACMODEL_TORUS, 2),
- SampleSizeModel (pcl::SACMODEL_PARALLEL_LINE, 2),
- SampleSizeModel (pcl::SACMODEL_PERPENDICULAR_PLANE, 3),
- //SampleSizeModel (pcl::PARALLEL_LINES, 2),
- SampleSizeModel (pcl::SACMODEL_NORMAL_PLANE, 3),
- SampleSizeModel (pcl::SACMODEL_NORMAL_SPHERE, 4),
- SampleSizeModel (pcl::SACMODEL_REGISTRATION, 3),
- SampleSizeModel (pcl::SACMODEL_REGISTRATION_2D, 3),
- SampleSizeModel (pcl::SACMODEL_PARALLEL_PLANE, 3),
- SampleSizeModel (pcl::SACMODEL_NORMAL_PARALLEL_PLANE, 3),
- SampleSizeModel (pcl::SACMODEL_STICK, 2)};
-
-namespace pcl
-{
- const static std::map<pcl::SacModel, unsigned int>
- PCL_DEPRECATED("This map is deprecated and is kept only to prevent breaking "
- "existing user code. Starting from PCL 1.8.0 model sample size "
- "is a protected member of the SampleConsensusModel class")
- SAC_SAMPLE_SIZE (sample_size_pairs, sample_size_pairs + sizeof (sample_size_pairs) / sizeof (SampleSizeModel));
-}
-
#endif //#ifndef PCL_SAMPLE_CONSENSUS_MODEL_TYPES_H_
/** \brief Constructor for base SAC.
* \param[in] model a Sample Consensus model
- * \param[in] threshold distance to model threshol
+ * \param[in] threshold distance to model threshold
* \param[in] random if true set the random seed to the current time, else set to 12345 (default: false)
*/
SampleConsensus (const SampleConsensusModelPtr &model,
* for creating a valid model
* \param[out] model_coefficients the computed model coefficients
*/
- virtual bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients) = 0;
+ virtual bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const = 0;
/** \brief Recompute the model coefficients using the given inlier set
* and return them to the user. Pure virtual.
* \param[in] model_coefficients the initial guess for the model coefficients
* \param[out] optimized_coefficients the resultant recomputed coefficients after non-linear optimization
*/
- virtual void
- optimizeModelCoefficients (const std::vector<int> &inliers,
+ virtual void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients) = 0;
+ Eigen::VectorXf &optimized_coefficients) const = 0;
/** \brief Compute all distances from the cloud data to a given model. Pure virtual.
*
* \param[in] model_coefficients the coefficients of a model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- virtual void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances) = 0;
+ virtual void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const = 0;
/** \brief Select all the points which respect the given model
* coefficients as inliers. Pure virtual.
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold) = 0;
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const = 0;
/** \brief Create a new point cloud with inliers projected onto the model. Pure virtual.
* \param[in] inliers the data inliers that we want to project on the model
* projected_points cloud to be an exact copy of the input dataset minus
* the point projections on the plane model
*/
- virtual void
- projectPoints (const std::vector<int> &inliers,
+ virtual void
+ projectPoints (const std::vector<int> &inliers,
const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true) = 0;
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const = 0;
/** \brief Verify whether a subset of indices verifies a given set of
* model coefficients. Pure virtual.
* determining the inliers from the outliers
*/
virtual bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold) = 0;
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const = 0;
/** \brief Provide a pointer to the input dataset
* \param[in] cloud the const boost shared pointer to a PointCloud message
* \param[out] max_radius the resultant maximum radius model
*/
inline void
- getRadiusLimits (double &min_radius, double &max_radius)
+ getRadiusLimits (double &min_radius, double &max_radius) const
{
min_radius = radius_min_;
max_radius = radius_max_;
}
-
+
/** \brief Set the maximum distance allowed when drawing random samples
* \param[in] radius the maximum distance (L2 norm)
* \param search
* \param[out] radius the maximum distance (L2 norm)
*/
inline void
- getSamplesMaxDist (double &radius)
+ getSamplesMaxDist (double &radius) const
{
radius = samples_radius_;
}
friend class ProgressiveSampleConsensus<PointT>;
/** \brief Compute the variance of the errors to the model.
- * \param[in] error_sqr_dists a vector holding the distances
- */
+ * \param[in] error_sqr_dists a vector holding the distances
+ */
inline double
- computeVariance (const std::vector<double> &error_sqr_dists)
+ computeVariance (const std::vector<double> &error_sqr_dists) const
{
std::vector<double> dists (error_sqr_dists);
const size_t medIdx = dists.size () >> 1;
* selectWithinDistance must be called).
*/
inline double
- computeVariance ()
+ computeVariance () const
{
if (error_sqr_dists_.empty ())
{
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients)
+ isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (model_coefficients.size () != model_size_)
{
/** Base functor all the models that need non linear optimization must
* define their own one and implement operator() (const Eigen::VectorXd& x, Eigen::VectorXd& fvec)
- * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) dependening on the choosen _Scalar
+ * or operator() (const Eigen::VectorXf& x, Eigen::VectorXf& fvec) depending on the chosen _Scalar
*/
template<typename _Scalar, int NX=Eigen::Dynamic, int NY=Eigen::Dynamic>
struct Functor
* \param[in] random if true set the random seed to the current time, else set to 12345 (default: false)
*/
SampleConsensusModelCircle2D (const PointCloudConstPtr &cloud, bool random = false)
- : SampleConsensusModel<PointT> (cloud, random), tmp_inliers_ ()
+ : SampleConsensusModel<PointT> (cloud, random)
{
model_name_ = "SampleConsensusModelCircle2D";
sample_size_ = 3;
SampleConsensusModelCircle2D (const PointCloudConstPtr &cloud,
const std::vector<int> &indices,
bool random = false)
- : SampleConsensusModel<PointT> (cloud, indices, random), tmp_inliers_ ()
+ : SampleConsensusModel<PointT> (cloud, indices, random)
{
model_name_ = "SampleConsensusModelCircle2D";
sample_size_ = 3;
* \param[in] source the model to copy into this
*/
SampleConsensusModelCircle2D (const SampleConsensusModelCircle2D &source) :
- SampleConsensusModel<PointT> (), tmp_inliers_ ()
+ SampleConsensusModel<PointT> ()
{
*this = source;
model_name_ = "SampleConsensusModelCircle2D";
operator = (const SampleConsensusModelCircle2D &source)
{
SampleConsensusModel<PointT>::operator=(source);
- tmp_inliers_ = source.tmp_inliers_;
return (*this);
}
* \param[in] samples the point indices found as possible good candidates for creating a valid model
* \param[out] model_coefficients the resultant model coefficients
*/
- bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all distances from the cloud data to a given 2D circle model.
* \param[in] model_coefficients the coefficients of a 2D circle model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Compute all distances from the cloud data to a given 2D circle model.
* \param[in] model_coefficients the coefficients of a 2D circle model that we need to compute distances to
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Recompute the 2d circle coefficients using the given inlier set and return them to the user.
* @note: these are the coefficients of the 2d circle model after refinement (e.g. after SVD)
* \param[in] model_coefficients the initial guess for the optimization
* \param[out] optimized_coefficients the resultant recomputed coefficients after non-linear optimization
*/
- void
- optimizeModelCoefficients (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the 2d circle model.
* \param[in] inliers the data inliers that we want to project on the 2d circle model
* \param[out] projected_points the resultant projected points
* \param[in] copy_data_fields set to true if we need to copy the other data fields
*/
- void
- projectPoints (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true);
+ void
+ projectPoints (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given 2d circle model coefficients.
* \param[in] indices the data indices that need to be tested against the 2d circle model
* \param[in] model_coefficients the 2d circle model coefficients
* \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
*/
- bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ bool
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_CIRCLE2D). */
inline pcl::SacModel
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
/** \brief Check if a sample of indices results in a good sample of points indices.
* \param[in] samples the resultant index samples
isSampleGood(const std::vector<int> &samples) const;
private:
- /** \brief Temporary pointer to a list of given indices for optimizeModelCoefficients () */
- const std::vector<int> *tmp_inliers_;
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic ignored "-Weffc++"
struct OptimizationFunctor : pcl::Functor<float>
{
/** \brief Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
+ * \param[in] indices the indices of data points to evaluate
* \param[in] estimator pointer to the estimator object
- * \param[in] distance distance computation function pointer
*/
- OptimizationFunctor (int m_data_points, pcl::SampleConsensusModelCircle2D<PointT> *model) :
- pcl::Functor<float>(m_data_points), model_ (model) {}
+ OptimizationFunctor (const pcl::SampleConsensusModelCircle2D<PointT> *model, const std::vector<int>& indices) :
+ pcl::Functor<float> (indices.size ()), model_ (model), indices_ (indices) {}
/** Cost function to be minimized
* \param[in] x the variables array
for (int i = 0; i < values (); ++i)
{
// Compute the difference between the center of the circle and the datapoint X_i
- float xt = model_->input_->points[(*model_->tmp_inliers_)[i]].x - x[0];
- float yt = model_->input_->points[(*model_->tmp_inliers_)[i]].y - x[1];
-
+ float xt = model_->input_->points[indices_[i]].x - x[0];
+ float yt = model_->input_->points[indices_[i]].y - x[1];
+
// g = sqrt ((x-a)^2 + (y-b)^2) - R
fvec[i] = std::sqrt (xt * xt + yt * yt) - x[2];
}
return (0);
}
- pcl::SampleConsensusModelCircle2D<PointT> *model_;
+ const pcl::SampleConsensusModelCircle2D<PointT> *model_;
+ const std::vector<int> &indices_;
};
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic warning "-Weffc++"
* \param[in] source the model to copy into this
*/
SampleConsensusModelCircle3D (const SampleConsensusModelCircle3D &source) :
- SampleConsensusModel<PointT> (), tmp_inliers_ ()
+ SampleConsensusModel<PointT> ()
{
*this = source;
model_name_ = "SampleConsensusModelCircle3D";
operator = (const SampleConsensusModelCircle3D &source)
{
SampleConsensusModel<PointT>::operator=(source);
- tmp_inliers_ = source.tmp_inliers_;
return (*this);
}
*/
bool
computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all distances from the cloud data to a given 3D circle model.
* \param[in] model_coefficients the coefficients of a 2D circle model that we need to compute distances to
*/
void
getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ std::vector<double> &distances) const;
/** \brief Compute all distances from the cloud data to a given 3D circle model.
* \param[in] model_coefficients the coefficients of a 3D circle model that we need to compute distances to
*/
virtual int
countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ const double threshold) const;
/** \brief Recompute the 3d circle coefficients using the given inlier set and return them to the user.
* @note: these are the coefficients of the 3d circle model after refinement (e.g. after SVD)
void
optimizeModelCoefficients (const std::vector<int> &inliers,
const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the 3d circle model.
* \param[in] inliers the data inliers that we want to project on the 3d circle model
projectPoints (const std::vector<int> &inliers,
const Eigen::VectorXf &model_coefficients,
PointCloud &projected_points,
- bool copy_data_fields = true);
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given 3d circle model coefficients.
* \param[in] indices the data indices that need to be tested against the 3d circle model
bool
doSamplesVerifyModel (const std::set<int> &indices,
const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_CIRCLE3D). */
inline pcl::SacModel
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
/** \brief Check if a sample of indices results in a good sample of points indices.
* \param[in] samples the resultant index samples
isSampleGood(const std::vector<int> &samples) const;
private:
- /** \brief Temporary pointer to a list of given indices for optimizeModelCoefficients () */
- const std::vector<int> *tmp_inliers_;
-
/** \brief Functor for the optimization function */
struct OptimizationFunctor : pcl::Functor<double>
{
/** Functor constructor
- * \param[in] m_data_points the number of functions
- * \param[in] estimator pointer to the estimator object
- * \param[in] distance distance computation function pointer
- */
- OptimizationFunctor (int m_data_points, pcl::SampleConsensusModelCircle3D<PointT> *model) :
- pcl::Functor<double> (m_data_points), model_ (model) {}
+ * \param[in] indices the indices of data points to evaluate
+ * \param[in] estimator pointer to the estimator object
+ */
+ OptimizationFunctor (const pcl::SampleConsensusModelCircle3D<PointT> *model, const std::vector<int>& indices) :
+ pcl::Functor<double> (indices.size ()), model_ (model), indices_ (indices) {}
/** Cost function to be minimized
* \param[in] x the variables array
{
// what i have:
// P : Sample Point
- Eigen::Vector3d P (model_->input_->points[(*model_->tmp_inliers_)[i]].x, model_->input_->points[(*model_->tmp_inliers_)[i]].y, model_->input_->points[(*model_->tmp_inliers_)[i]].z);
+ Eigen::Vector3d P (model_->input_->points[indices_[i]].x, model_->input_->points[indices_[i]].y, model_->input_->points[indices_[i]].z);
// C : Circle Center
Eigen::Vector3d C (x[0], x[1], x[2]);
// N : Circle (Plane) Normal
return (0);
}
- pcl::SampleConsensusModelCircle3D<PointT> *model_;
+ const pcl::SampleConsensusModelCircle3D<PointT> *model_;
+ const std::vector<int> &indices_;
};
};
}
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/pcl_macros.h>
#include <pcl/common/common.h>
#include <pcl/common/distances.h>
#include <limits.h>
, eps_angle_ (0)
, min_angle_ (-std::numeric_limits<double>::max ())
, max_angle_ (std::numeric_limits<double>::max ())
- , tmp_inliers_ ()
{
model_name_ = "SampleConsensusModelCone";
sample_size_ = 3;
, eps_angle_ (0)
, min_angle_ (-std::numeric_limits<double>::max ())
, max_angle_ (std::numeric_limits<double>::max ())
- , tmp_inliers_ ()
{
model_name_ = "SampleConsensusModelCone";
sample_size_ = 3;
SampleConsensusModelCone (const SampleConsensusModelCone &source) :
SampleConsensusModel<PointT> (),
SampleConsensusModelFromNormals<PointT, PointNT> (),
- axis_ (), eps_angle_ (), min_angle_ (), max_angle_ (), tmp_inliers_ ()
+ axis_ (), eps_angle_ (), min_angle_ (), max_angle_ ()
{
*this = source;
model_name_ = "SampleConsensusModelCone";
eps_angle_ = source.eps_angle_;
min_angle_ = source.min_angle_;
max_angle_ = source.max_angle_;
- tmp_inliers_ = source.tmp_inliers_;
return (*this);
}
* \param[in] samples the point indices found as possible good candidates for creating a valid model
* \param[out] model_coefficients the resultant model coefficients
*/
- bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all distances from the cloud data to a given cone model.
* \param[in] model_coefficients the coefficients of a cone model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the coefficients of a cone model that we need to compute distances to
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Recompute the cone coefficients using the given inlier set and return them to the user.
* \param[in] model_coefficients the initial guess for the optimization
* \param[out] optimized_coefficients the resultant recomputed coefficients after non-linear optimization
*/
- void
- optimizeModelCoefficients (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the cone model.
* \param[out] projected_points the resultant projected points
* \param[in] copy_data_fields set to true if we need to copy the other data fields
*/
- void
- projectPoints (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true);
+ void
+ projectPoints (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given cone model coefficients.
* \param[in] indices the data indices that need to be tested against the cone model
* \param[in] model_coefficients the cone model coefficients
* \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
*/
- bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ bool
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_CONE). */
inline pcl::SacModel
* \param[in] pt a point
* \param[in] model_coefficients the line coefficients (a point on the line, line direction)
*/
- double
- pointToAxisDistance (const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients);
-
- /** \brief Get a string representation of the name of this class. */
- PCL_DEPRECATED ("[pcl::SampleConsensusModelCone::getName] getName is deprecated. Please use getClassName instead.")
- std::string
- getName () const { return (model_name_); }
+ double
+ pointToAxisDistance (const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients) const;
- protected:
/** \brief Check whether a model is valid given the user constraints.
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
/** \brief Check if a sample of indices results in a good sample of points
* indices. Pure virtual.
double min_angle_;
double max_angle_;
- /** \brief temporary pointer to a list of given indices for optimizeModelCoefficients () */
- const std::vector<int> *tmp_inliers_;
-
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic ignored "-Weffc++"
#endif
struct OptimizationFunctor : pcl::Functor<float>
{
/** Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
+ * \param[in] indices the indices of data points to evaluate
* \param[in] estimator pointer to the estimator object
- * \param[in] distance distance computation function pointer
*/
- OptimizationFunctor (int m_data_points, pcl::SampleConsensusModelCone<PointT, PointNT> *model) :
- pcl::Functor<float> (m_data_points), model_ (model) {}
+ OptimizationFunctor (const pcl::SampleConsensusModelCone<PointT, PointNT> *model, const std::vector<int>& indices) :
+ pcl::Functor<float> (indices.size ()), model_ (model), indices_ (indices) {}
/** Cost function to be minimized
* \param[in] x variables array
for (int i = 0; i < values (); ++i)
{
// dist = f - r
- Eigen::Vector4f pt (model_->input_->points[(*model_->tmp_inliers_)[i]].x,
- model_->input_->points[(*model_->tmp_inliers_)[i]].y,
- model_->input_->points[(*model_->tmp_inliers_)[i]].z, 0);
+ Eigen::Vector4f pt (model_->input_->points[indices_[i]].x,
+ model_->input_->points[indices_[i]].y,
+ model_->input_->points[indices_[i]].z, 0);
// Calculate the point's projection on the cone axis
float k = (pt.dot (axis_dir) - apexdotdir) * dirdotdir;
return (0);
}
- pcl::SampleConsensusModelCone<PointT, PointNT> *model_;
+ const pcl::SampleConsensusModelCone<PointT, PointNT> *model_;
+ const std::vector<int> &indices_;
};
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic warning "-Weffc++"
#include <pcl/sample_consensus/sac_model.h>
#include <pcl/sample_consensus/model_types.h>
-#include <pcl/pcl_macros.h>
#include <pcl/common/common.h>
#include <pcl/common/distances.h>
, SampleConsensusModelFromNormals<PointT, PointNT> ()
, axis_ (Eigen::Vector3f::Zero ())
, eps_angle_ (0)
- , tmp_inliers_ ()
{
model_name_ = "SampleConsensusModelCylinder";
sample_size_ = 2;
, SampleConsensusModelFromNormals<PointT, PointNT> ()
, axis_ (Eigen::Vector3f::Zero ())
, eps_angle_ (0)
- , tmp_inliers_ ()
{
model_name_ = "SampleConsensusModelCylinder";
sample_size_ = 2;
SampleConsensusModel<PointT> (),
SampleConsensusModelFromNormals<PointT, PointNT> (),
axis_ (Eigen::Vector3f::Zero ()),
- eps_angle_ (0),
- tmp_inliers_ ()
+ eps_angle_ (0)
{
*this = source;
model_name_ = "SampleConsensusModelCylinder";
SampleConsensusModelFromNormals<PointT, PointNT>::operator=(source);
axis_ = source.axis_;
eps_angle_ = source.eps_angle_;
- tmp_inliers_ = source.tmp_inliers_;
return (*this);
}
* \param[in] samples the point indices found as possible good candidates for creating a valid model
* \param[out] model_coefficients the resultant model coefficients
*/
- bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all distances from the cloud data to a given cylinder model.
* \param[in] model_coefficients the coefficients of a cylinder model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the coefficients of a cylinder model that we need to compute distances to
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Recompute the cylinder coefficients using the given inlier set and return them to the user.
* @note: these are the coefficients of the cylinder model after refinement (e.g. after SVD)
* \param[in] model_coefficients the initial guess for the optimization
* \param[out] optimized_coefficients the resultant recomputed coefficients after non-linear optimization
*/
- void
- optimizeModelCoefficients (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the cylinder model.
* \param[out] projected_points the resultant projected points
* \param[in] copy_data_fields set to true if we need to copy the other data fields
*/
- void
- projectPoints (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true);
+ void
+ projectPoints (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given cylinder model coefficients.
* \param[in] indices the data indices that need to be tested against the cylinder model
* \param[in] model_coefficients the cylinder model coefficients
* \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
*/
- bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ bool
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_CYLINDER). */
inline pcl::SacModel
* \param[in] pt a point
* \param[in] model_coefficients the line coefficients (a point on the line, line direction)
*/
- double
- pointToLineDistance (const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients);
+ double
+ pointToLineDistance (const Eigen::Vector4f &pt, const Eigen::VectorXf &model_coefficients) const;
/** \brief Project a point onto a line given by a point and a direction vector
* \param[in] pt the input point to project
* \param[out] pt_proj the resultant projected point
*/
inline void
- projectPointToLine (const Eigen::Vector4f &pt,
- const Eigen::Vector4f &line_pt,
+ projectPointToLine (const Eigen::Vector4f &pt,
+ const Eigen::Vector4f &line_pt,
const Eigen::Vector4f &line_dir,
- Eigen::Vector4f &pt_proj)
+ Eigen::Vector4f &pt_proj) const
{
float k = (pt.dot (line_dir) - line_pt.dot (line_dir)) / line_dir.dot (line_dir);
// Calculate the projection of the point on the line
* \param[in] model_coefficients the coefficients of the cylinder (point_on_axis, axis_direction, cylinder_radius_R)
* \param[out] pt_proj the resultant projected point
*/
- void
- projectPointToCylinder (const Eigen::Vector4f &pt,
- const Eigen::VectorXf &model_coefficients,
- Eigen::Vector4f &pt_proj);
-
- /** \brief Get a string representation of the name of this class. */
- PCL_DEPRECATED ("[pcl::SampleConsensusModelCylinder::getName] getName is deprecated. Please use getClassName instead.")
- std::string
- getName () const { return (model_name_); }
+ void
+ projectPointToCylinder (const Eigen::Vector4f &pt,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::Vector4f &pt_proj) const;
- protected:
/** \brief Check whether a model is valid given the user constraints.
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
/** \brief Check if a sample of indices results in a good sample of points
* indices. Pure virtual.
/** \brief The maximum allowed difference between the plane normal and the given axis. */
double eps_angle_;
- /** \brief temporary pointer to a list of given indices for optimizeModelCoefficients () */
- const std::vector<int> *tmp_inliers_;
-
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic ignored "-Weffc++"
#endif
struct OptimizationFunctor : pcl::Functor<float>
{
/** Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
+ * \param[in] indices the indices of data points to evaluate
* \param[in] estimator pointer to the estimator object
- * \param[in] distance distance computation function pointer
*/
- OptimizationFunctor (int m_data_points, pcl::SampleConsensusModelCylinder<PointT, PointNT> *model) :
- pcl::Functor<float> (m_data_points), model_ (model) {}
+ OptimizationFunctor (const pcl::SampleConsensusModelCylinder<PointT, PointNT> *model, const std::vector<int>& indices) :
+ pcl::Functor<float> (indices.size ()), model_ (model), indices_ (indices) {}
/** Cost function to be minimized
* \param[in] x variables array
for (int i = 0; i < values (); ++i)
{
// dist = f - r
- Eigen::Vector4f pt (model_->input_->points[(*model_->tmp_inliers_)[i]].x,
- model_->input_->points[(*model_->tmp_inliers_)[i]].y,
- model_->input_->points[(*model_->tmp_inliers_)[i]].z, 0);
+ Eigen::Vector4f pt (model_->input_->points[indices_[i]].x,
+ model_->input_->points[indices_[i]].y,
+ model_->input_->points[indices_[i]].z, 0);
fvec[i] = static_cast<float> (pcl::sqrPointToLineDistance (pt, line_pt, line_dir) - x[6]*x[6]);
}
return (0);
}
- pcl::SampleConsensusModelCylinder<PointT, PointNT> *model_;
+ const pcl::SampleConsensusModelCylinder<PointT, PointNT> *model_;
+ const std::vector<int> &indices_;
};
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic warning "-Weffc++"
* \param[in] samples the point indices found as possible good candidates for creating a valid model
* \param[out] model_coefficients the resultant model coefficients
*/
- bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all squared distances from the cloud data to a given line model.
* \param[in] model_coefficients the coefficients of a line model that we need to compute distances to
* \param[out] distances the resultant estimated squared distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the coefficients of a line model that we need to compute distances to
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Recompute the line coefficients using the given inlier set and return them to the user.
* @note: these are the coefficients of the line model after refinement (e.g. after SVD)
* \param[in] model_coefficients the initial guess for the model coefficients
* \param[out] optimized_coefficients the resultant recomputed coefficients after optimization
*/
- void
- optimizeModelCoefficients (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the line model.
* \param[in] inliers the data inliers that we want to project on the line model
* \param[out] projected_points the resultant projected points
* \param[in] copy_data_fields set to true if we need to copy the other data fields
*/
- void
- projectPoints (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true);
+ void
+ projectPoints (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given line model coefficients.
* \param[in] indices the data indices that need to be tested against the line model
* \param[in] model_coefficients the line model coefficients
* \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
*/
- bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ bool
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_LINE). */
inline pcl::SacModel
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
private:
/** \brief The axis along which we need to search for a plane perpendicular to. */
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Compute all distances from the cloud data to a given plane model.
* \param[in] model_coefficients the coefficients of a plane model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Return an unique id for this model (SACMODEL_NORMAL_PLANE). */
inline pcl::SacModel
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Compute all distances from the cloud data to a given sphere model.
* \param[in] model_coefficients the coefficients of a sphere model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Return an unique id for this model (SACMODEL_NORMAL_SPHERE). */
inline pcl::SacModel
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
};
}
*/
virtual int
countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ const double threshold) const;
/** \brief Compute all squared distances from the cloud data to a given line model.
* \param[in] model_coefficients the coefficients of a line model that we need to compute distances to
*/
void
getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ std::vector<double> &distances) const;
/** \brief Return an unique id for this model (SACMODEL_PARALLEL_LINE). */
inline pcl::SacModel
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
/** \brief The axis along which we need to search for a line. */
Eigen::Vector3f axis_;
*/
virtual int
countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ const double threshold) const;
/** \brief Compute all distances from the cloud data to a given plane model.
* \param[in] model_coefficients the coefficients of a plane model that we need to compute distances to
*/
void
getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ std::vector<double> &distances) const;
/** \brief Return an unique id for this model (SACMODEL_PARALLEL_PLANE). */
inline pcl::SacModel
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
/** \brief The axis along which we need to search for a plane perpendicular to. */
Eigen::Vector3f axis_;
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Compute all distances from the cloud data to a given plane model.
* \param[in] model_coefficients the coefficients of a plane model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Return an unique id for this model (SACMODEL_PERPENDICULAR_PLANE). */
inline pcl::SacModel
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients);
+ isModelValid (const Eigen::VectorXf &model_coefficients) const;
/** \brief The axis along which we need to search for a plane perpendicular to. */
Eigen::Vector3f axis_;
* \param[in] samples the point indices found as possible good candidates for creating a valid model
* \param[out] model_coefficients the resultant model coefficients
*/
- bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all distances from the cloud data to a given plane model.
* \param[in] model_coefficients the coefficients of a plane model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the coefficients of a plane model that we need to compute distances to
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Recompute the plane coefficients using the given inlier set and return them to the user.
* @note: these are the coefficients of the plane model after refinement (e.g. after SVD)
* \param[in] model_coefficients the initial guess for the model coefficients
* \param[out] optimized_coefficients the resultant recomputed coefficients after non-linear optimization
*/
- void
- optimizeModelCoefficients (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the plane model.
* \param[in] inliers the data inliers that we want to project on the plane model
* \param[out] projected_points the resultant projected points
* \param[in] copy_data_fields set to true if we need to copy the other data fields
*/
- void
- projectPoints (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true);
+ void
+ projectPoints (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given plane model coefficients.
* \param[in] indices the data indices that need to be tested against the plane model
* \param[in] model_coefficients the plane model coefficients
* \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
*/
- bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ bool
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_PLANE). */
inline pcl::SacModel
*/
bool
computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all distances from the transformed points to their correspondences
* \param[in] model_coefficients the 4x4 transformation matrix
*/
void
getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the 4x4 transformation matrix
*/
virtual int
countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ const double threshold) const;
/** \brief Recompute the 4x4 transformation using the given inlier set
* \param[in] inliers the data inliers found as supporting the model
void
optimizeModelCoefficients (const std::vector<int> &inliers,
const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ Eigen::VectorXf &optimized_coefficients) const;
void
projectPoints (const std::vector<int> &,
const Eigen::VectorXf &,
- PointCloud &, bool = true)
+ PointCloud &, bool = true) const
{
};
bool
doSamplesVerifyModel (const std::set<int> &,
const Eigen::VectorXf &,
- const double)
+ const double) const
{
return (false);
}
const std::vector<int> &indices_src,
const pcl::PointCloud<PointT> &cloud_tgt,
const std::vector<int> &indices_tgt,
- Eigen::VectorXf &transform);
+ Eigen::VectorXf &transform) const;
/** \brief Compute mappings between original indices of the input_/target_ clouds. */
void
*/
void
getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the 4x4 transformation matrix
*/
virtual int
countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ const double threshold) const;
/** \brief Set the camera projection matrix.
* \param[in] projection_matrix the camera projection matrix
* \param[in] random if true set the random seed to the current time, else set to 12345 (default: false)
*/
SampleConsensusModelSphere (const PointCloudConstPtr &cloud,
- bool random = false)
- : SampleConsensusModel<PointT> (cloud, random), tmp_inliers_ ()
+ bool random = false)
+ : SampleConsensusModel<PointT> (cloud, random)
{
model_name_ = "SampleConsensusModelSphere";
sample_size_ = 4;
* \param[in] indices a vector of point indices to be used from \a cloud
* \param[in] random if true set the random seed to the current time, else set to 12345 (default: false)
*/
- SampleConsensusModelSphere (const PointCloudConstPtr &cloud,
+ SampleConsensusModelSphere (const PointCloudConstPtr &cloud,
const std::vector<int> &indices,
- bool random = false)
- : SampleConsensusModel<PointT> (cloud, indices, random), tmp_inliers_ ()
+ bool random = false)
+ : SampleConsensusModel<PointT> (cloud, indices, random)
{
model_name_ = "SampleConsensusModelSphere";
sample_size_ = 4;
* \param[in] source the model to copy into this
*/
SampleConsensusModelSphere (const SampleConsensusModelSphere &source) :
- SampleConsensusModel<PointT> (), tmp_inliers_ ()
+ SampleConsensusModel<PointT> ()
{
*this = source;
model_name_ = "SampleConsensusModelSphere";
operator = (const SampleConsensusModelSphere &source)
{
SampleConsensusModel<PointT>::operator=(source);
- tmp_inliers_ = source.tmp_inliers_;
return (*this);
}
* \param[in] samples the point indices found as possible good candidates for creating a valid model
* \param[out] model_coefficients the resultant model coefficients
*/
- bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all distances from the cloud data to a given sphere model.
* \param[in] model_coefficients the coefficients of a sphere model that we need to compute distances to
* \param[out] distances the resultant estimated distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the coefficients of a sphere model that we need to compute distances to
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Recompute the sphere coefficients using the given inlier set and return them to the user.
* @note: these are the coefficients of the sphere model after refinement (e.g. after SVD)
* \param[in] model_coefficients the initial guess for the optimization
* \param[out] optimized_coefficients the resultant recomputed coefficients after non-linear optimization
*/
- void
- optimizeModelCoefficients (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the sphere model.
* \param[in] inliers the data inliers that we want to project on the sphere model
* \param[in] copy_data_fields set to true if we need to copy the other data fields
* \todo implement this.
*/
- void
- projectPoints (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true);
+ void
+ projectPoints (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given sphere model coefficients.
* \param[in] indices the data indices that need to be tested against the sphere model
* \param[in] model_coefficients the sphere model coefficients
* \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
*/
- bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ bool
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_SPHERE). */
inline pcl::SacModel getModelType () const { return (SACMODEL_SPHERE); }
* \param[in] model_coefficients the set of model coefficients
*/
virtual bool
- isModelValid (const Eigen::VectorXf &model_coefficients)
+ isModelValid (const Eigen::VectorXf &model_coefficients) const
{
if (!SampleConsensusModel<PointT>::isModelValid (model_coefficients))
return (false);
isSampleGood(const std::vector<int> &samples) const;
private:
- /** \brief Temporary pointer to a list of given indices for optimizeModelCoefficients () */
- const std::vector<int> *tmp_inliers_;
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic ignored "-Weffc++"
struct OptimizationFunctor : pcl::Functor<float>
{
/** Functor constructor
- * \param[in] m_data_points the number of data points to evaluate
+ * \param[in] indices the indices of data points to evaluate
* \param[in] estimator pointer to the estimator object
- * \param[in] distance distance computation function pointer
*/
- OptimizationFunctor (int m_data_points, pcl::SampleConsensusModelSphere<PointT> *model) :
- pcl::Functor<float>(m_data_points), model_ (model) {}
+ OptimizationFunctor (const pcl::SampleConsensusModelSphere<PointT> *model, const std::vector<int>& indices) :
+ pcl::Functor<float> (indices.size ()), model_ (model), indices_ (indices) {}
/** Cost function to be minimized
* \param[in] x the variables array
for (int i = 0; i < values (); ++i)
{
// Compute the difference between the center of the sphere and the datapoint X_i
- cen_t[0] = model_->input_->points[(*model_->tmp_inliers_)[i]].x - x[0];
- cen_t[1] = model_->input_->points[(*model_->tmp_inliers_)[i]].y - x[1];
- cen_t[2] = model_->input_->points[(*model_->tmp_inliers_)[i]].z - x[2];
-
+ cen_t[0] = model_->input_->points[indices_[i]].x - x[0];
+ cen_t[1] = model_->input_->points[indices_[i]].y - x[1];
+ cen_t[2] = model_->input_->points[indices_[i]].z - x[2];
+
// g = sqrt ((x-a)^2 + (y-b)^2 + (z-c)^2) - R
fvec[i] = std::sqrt (cen_t.dot (cen_t)) - x[3];
}
return (0);
}
-
- pcl::SampleConsensusModelSphere<PointT> *model_;
+
+ const pcl::SampleConsensusModelSphere<PointT> *model_;
+ const std::vector<int> &indices_;
};
#if defined BUILD_Maintainer && defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ > 3
#pragma GCC diagnostic warning "-Weffc++"
* \param[in] samples the point indices found as possible good candidates for creating a valid model
* \param[out] model_coefficients the resultant model coefficients
*/
- bool
- computeModelCoefficients (const std::vector<int> &samples,
- Eigen::VectorXf &model_coefficients);
+ bool
+ computeModelCoefficients (const std::vector<int> &samples,
+ Eigen::VectorXf &model_coefficients) const;
/** \brief Compute all squared distances from the cloud data to a given stick model.
* \param[in] model_coefficients the coefficients of a stick model that we need to compute distances to
* \param[out] distances the resultant estimated squared distances
*/
- void
- getDistancesToModel (const Eigen::VectorXf &model_coefficients,
- std::vector<double> &distances);
+ void
+ getDistancesToModel (const Eigen::VectorXf &model_coefficients,
+ std::vector<double> &distances) const;
/** \brief Select all the points which respect the given model coefficients as inliers.
* \param[in] model_coefficients the coefficients of a stick model that we need to compute distances to
* \return the resultant number of inliers
*/
virtual int
- countWithinDistance (const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ countWithinDistance (const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Recompute the stick coefficients using the given inlier set and return them to the user.
* @note: these are the coefficients of the stick model after refinement (e.g. after SVD)
* \param[in] model_coefficients the initial guess for the model coefficients
* \param[out] optimized_coefficients the resultant recomputed coefficients after optimization
*/
- void
- optimizeModelCoefficients (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- Eigen::VectorXf &optimized_coefficients);
+ void
+ optimizeModelCoefficients (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ Eigen::VectorXf &optimized_coefficients) const;
/** \brief Create a new point cloud with inliers projected onto the stick model.
* \param[in] inliers the data inliers that we want to project on the stick model
* \param[out] projected_points the resultant projected points
* \param[in] copy_data_fields set to true if we need to copy the other data fields
*/
- void
- projectPoints (const std::vector<int> &inliers,
- const Eigen::VectorXf &model_coefficients,
- PointCloud &projected_points,
- bool copy_data_fields = true);
+ void
+ projectPoints (const std::vector<int> &inliers,
+ const Eigen::VectorXf &model_coefficients,
+ PointCloud &projected_points,
+ bool copy_data_fields = true) const;
/** \brief Verify whether a subset of indices verifies the given stick model coefficients.
* \param[in] indices the data indices that need to be tested against the plane model
* \param[in] model_coefficients the plane model coefficients
* \param[in] threshold a maximum admissible distance threshold for determining the inliers from the outliers
*/
- bool
- doSamplesVerifyModel (const std::set<int> &indices,
- const Eigen::VectorXf &model_coefficients,
- const double threshold);
+ bool
+ doSamplesVerifyModel (const std::set<int> &indices,
+ const Eigen::VectorXf &model_coefficients,
+ const double threshold) const;
/** \brief Return an unique id for this model (SACMODEL_STICK). */
inline pcl::SacModel
{
point_representation_ = point_representation;
dim_ = point_representation->getNumberOfDimensions ();
- if (input_) // re-create the tree, since point_represenation might change things such as the scaling of the point clouds.
+ if (input_) // re-create the tree, since point_representation might change things such as the scaling of the point clouds.
setInputCloud (input_, indices_);
}
testPoint (query, k, results, yBegin * input_->width + xBegin);
else // point lys
{
- // find the box that touches the image border -> dont waste time evaluating boxes that are completely outside the image!
+ // find the box that touches the image border -> don't waste time evaluating boxes that are completely outside the image!
int dist = std::numeric_limits<int>::max ();
if (xBegin < 0)
/** \brief test if point given by index is among the k NN in results to the query point.
* \param[in] query query point
* \param[in] k number of maximum nn interested in
- * \param[in] queue priority queue with k NN
+ * \param[in,out] queue priority queue with k NN
* \param[in] index index on point to be tested
* \return whether the top element changed or not.
*/
float dist_z = point.z - query.z;
float squared_distance = dist_x * dist_x + dist_y * dist_y + dist_z * dist_z;
if (queue.size () < k)
+ {
queue.push (Entry (index, squared_distance));
+ return queue.size () == k;
+ }
else if (queue.top ().distance > squared_distance)
{
queue.pop ();
protected:
PointCloudConstPtr input_;
+ public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
}
{
}
+ /** \brief Provide a pointer to the search object.
+ * \param[in] tree a pointer to the spatial search object.
+ */
+ inline void
+ setSearchMethod (const SearcherPtr &tree)
+ {
+ searcher_ = tree;
+ }
+
+ /** \brief Get a pointer to the search method used.
+ */
+ inline const SearcherPtr&
+ getSearchMethod () const
+ {
+ return searcher_;
+ }
+
/** \brief Set the condition that needs to hold for neighboring points to be considered part of the same cluster.
* \details Any two points within a certain distance from one another will need to evaluate this condition in order to be made part of the same cluster.
* The distance can be set using setClusterTolerance().
/** \brief Use clean cutting */
bool use_clean_cutting_;
- /** \brief Interations for RANSAC */
+ /** \brief Iterations for RANSAC */
uint32_t ransac_itrs_;
#include <pcl/segmentation/boost.h>
#include <pcl/segmentation/comparator.h>
+#include <pcl/point_types.h>
namespace pcl
{
- /** \brief EuclideanClusterComparator is a comparator used for finding clusters supported by planar surfaces.
- * This needs to be run as a second pass after extracting planar surfaces, using MultiPlaneSegmentation for example.
+ namespace experimental
+ {
+ template<typename PointT, typename PointLT = pcl::Label>
+ class EuclideanClusterComparator : public ::pcl::Comparator<PointT>
+ {
+ protected:
+
+ using pcl::Comparator<PointT>::input_;
+
+ public:
+ using typename Comparator<PointT>::PointCloud;
+ using typename Comparator<PointT>::PointCloudConstPtr;
+
+ typedef typename pcl::PointCloud<PointLT> PointCloudL;
+ typedef typename PointCloudL::Ptr PointCloudLPtr;
+ typedef typename PointCloudL::ConstPtr PointCloudLConstPtr;
+
+ typedef boost::shared_ptr<EuclideanClusterComparator<PointT, PointLT> > Ptr;
+ typedef boost::shared_ptr<const EuclideanClusterComparator<PointT, PointLT> > ConstPtr;
+
+ typedef std::set<uint32_t> ExcludeLabelSet;
+ typedef boost::shared_ptr<ExcludeLabelSet> ExcludeLabelSetPtr;
+ typedef boost::shared_ptr<const ExcludeLabelSet> ExcludeLabelSetConstPtr;
+
+ /** \brief Default constructor for EuclideanClusterComparator. */
+ EuclideanClusterComparator ()
+ : distance_threshold_ (0.005f)
+ , depth_dependent_ ()
+ , z_axis_ ()
+ {}
+
+ virtual void
+ setInputCloud (const PointCloudConstPtr& cloud)
+ {
+ input_ = cloud;
+ Eigen::Matrix3f rot = input_->sensor_orientation_.toRotationMatrix ();
+ z_axis_ = rot.col (2);
+ }
+
+ /** \brief Set the tolerance in meters for difference in perpendicular distance (d component of plane equation) to the plane between neighboring points, to be considered part of the same plane.
+ * \param[in] distance_threshold the tolerance in meters
+ * \param depth_dependent
+ */
+ inline void
+ setDistanceThreshold (float distance_threshold, bool depth_dependent)
+ {
+ distance_threshold_ = distance_threshold;
+ depth_dependent_ = depth_dependent;
+ }
+
+ /** \brief Get the distance threshold in meters (d component of plane equation) between neighboring points, to be considered part of the same plane. */
+ inline float
+ getDistanceThreshold () const
+ {
+ return (distance_threshold_);
+ }
+
+ /** \brief Set label cloud
+ * \param[in] labels The label cloud
+ */
+ void
+ setLabels (const PointCloudLPtr& labels)
+ {
+ labels_ = labels;
+ }
+
+ const ExcludeLabelSetConstPtr&
+ getExcludeLabels () const
+ {
+ return exclude_labels_;
+ }
+
+ /** \brief Set labels in the label cloud to exclude.
+ * \param exclude_labels a vector of bools corresponding to whether or not a given label should be considered
+ */
+ void
+ setExcludeLabels (const ExcludeLabelSetConstPtr &exclude_labels)
+ {
+ exclude_labels_ = exclude_labels;
+ }
+
+ /** \brief Compare points at two indices by their euclidean distance
+ * \param idx1 The first index for the comparison
+ * \param idx2 The second index for the comparison
+ */
+ virtual bool
+ compare (int idx1, int idx2) const
+ {
+ if (labels_ && exclude_labels_)
+ {
+ assert (labels_->size () == input_->size ());
+ const uint32_t &label1 = (*labels_)[idx1].label;
+ const uint32_t &label2 = (*labels_)[idx2].label;
+
+ const std::set<uint32_t>::const_iterator it1 = exclude_labels_->find (label1);
+ if (it1 == exclude_labels_->end ())
+ return false;
+
+ const std::set<uint32_t>::const_iterator it2 = exclude_labels_->find (label2);
+ if (it2 == exclude_labels_->end ())
+ return false;
+ }
+
+ float dist_threshold = distance_threshold_;
+ if (depth_dependent_)
+ {
+ Eigen::Vector3f vec = input_->points[idx1].getVector3fMap ();
+ float z = vec.dot (z_axis_);
+ dist_threshold *= z * z;
+ }
+
+ const float dist = ((*input_)[idx1].getVector3fMap ()
+ - (*input_)[idx2].getVector3fMap ()).norm ();
+ return (dist < dist_threshold);
+ }
+
+ protected:
+
+
+ /** \brief Set of labels with similar size as the input point cloud,
+ * aggregating points into groups based on a similar label identifier.
+ *
+ * It needs to be set in conjunction with the \ref exclude_labels_
+ * member in order to provided a masking functionality.
+ */
+ PointCloudLPtr labels_;
+
+ /** \brief Specifies which labels should be excluded com being clustered.
+ *
+ * If a label is not specified, it's assumed by default that it's
+ * intended be excluded
+ */
+ ExcludeLabelSetConstPtr exclude_labels_;
+
+ float distance_threshold_;
+
+ bool depth_dependent_;
+
+ Eigen::Vector3f z_axis_;
+ };
+ } // namespace experimental
+
+
+ /** \brief EuclideanClusterComparator is a comparator used for finding clusters based on euclidian distance.
*
* \author Alex Trevor
*/
- template<typename PointT, typename PointNT, typename PointLT>
- class EuclideanClusterComparator: public Comparator<PointT>
+ template<typename PointT, typename PointNT, typename PointLT = deprecated::T>
+ class EuclideanClusterComparator : public experimental::EuclideanClusterComparator<PointT, PointLT>
{
+ protected:
+
+ using experimental::EuclideanClusterComparator<PointT, PointLT>::exclude_labels_;
+
public:
- typedef typename Comparator<PointT>::PointCloud PointCloud;
- typedef typename Comparator<PointT>::PointCloudConstPtr PointCloudConstPtr;
-
+
typedef typename pcl::PointCloud<PointNT> PointCloudN;
typedef typename PointCloudN::Ptr PointCloudNPtr;
typedef typename PointCloudN::ConstPtr PointCloudNConstPtr;
-
- typedef typename pcl::PointCloud<PointLT> PointCloudL;
- typedef typename PointCloudL::Ptr PointCloudLPtr;
- typedef typename PointCloudL::ConstPtr PointCloudLConstPtr;
typedef boost::shared_ptr<EuclideanClusterComparator<PointT, PointNT, PointLT> > Ptr;
typedef boost::shared_ptr<const EuclideanClusterComparator<PointT, PointNT, PointLT> > ConstPtr;
- using pcl::Comparator<PointT>::input_;
-
- /** \brief Empty constructor for EuclideanClusterComparator. */
+ using experimental::EuclideanClusterComparator<PointT, PointLT>::setExcludeLabels;
+
+ /** \brief Default constructor for EuclideanClusterComparator. */
+ PCL_DEPRECATED ("Remove PointNT from template parameters.")
EuclideanClusterComparator ()
: normals_ ()
, angular_threshold_ (0.0f)
- , distance_threshold_ (0.005f)
- , depth_dependent_ ()
- , z_axis_ ()
- {
- }
-
- /** \brief Destructor for EuclideanClusterComparator. */
- virtual
- ~EuclideanClusterComparator ()
- {
- }
+ {}
- virtual void
- setInputCloud (const PointCloudConstPtr& cloud)
- {
- input_ = cloud;
- Eigen::Matrix3f rot = input_->sensor_orientation_.toRotationMatrix ();
- z_axis_ = rot.col (2);
- }
-
/** \brief Provide a pointer to the input normals.
- * \param[in] normals the input normal cloud
- */
+ * \param[in] normals the input normal cloud
+ */
inline void
- setInputNormals (const PointCloudNConstPtr &normals)
- {
- normals_ = normals;
- }
+ PCL_DEPRECATED ("EuclideadClusterComparator never actually used normals and angular threshold, "
+ "this function has no effect on the behavior of the comparator. Therefore it is "
+ "deprecated and will be removed in future releases.")
+ setInputNormals (const PointCloudNConstPtr& normals) { normals_ = normals; }
/** \brief Get the input normals. */
inline PointCloudNConstPtr
- getInputNormals () const
- {
- return (normals_);
- }
+ PCL_DEPRECATED ("EuclideadClusterComparator never actually used normals and angular threshold, "
+ "this function has no effect on the behavior of the comparator. Therefore it is "
+ "deprecated and will be removed in future releases.")
+ getInputNormals () const { return (normals_); }
/** \brief Set the tolerance in radians for difference in normal direction between neighboring points, to be considered part of the same plane.
* \param[in] angular_threshold the tolerance in radians
*/
- virtual inline void
- setAngularThreshold (float angular_threshold)
- {
- angular_threshold_ = cosf (angular_threshold);
- }
-
- /** \brief Get the angular threshold in radians for difference in normal direction between neighboring points, to be considered part of the same plane. */
- inline float
- getAngularThreshold () const
- {
- return (acos (angular_threshold_) );
- }
-
- /** \brief Set the tolerance in meters for difference in perpendicular distance (d component of plane equation) to the plane between neighboring points, to be considered part of the same plane.
- * \param[in] distance_threshold the tolerance in meters
- * \param depth_dependent
- */
inline void
- setDistanceThreshold (float distance_threshold, bool depth_dependent)
+ PCL_DEPRECATED ("EuclideadClusterComparator never actually used normals and angular threshold, "
+ "this function has no effect on the behavior of the comparator. Therefore it is "
+ "deprecated and will be removed in future releases.")
+ setAngularThreshold (float angular_threshold)
{
- distance_threshold_ = distance_threshold;
- depth_dependent_ = depth_dependent;
+ angular_threshold_ = std::cos (angular_threshold);
}
- /** \brief Get the distance threshold in meters (d component of plane equation) between neighboring points, to be considered part of the same plane. */
+ /** \brief Get the angular threshold in radians for difference in normal direction between neighboring points, to be considered part of the same plane. */
inline float
- getDistanceThreshold () const
- {
- return (distance_threshold_);
- }
-
- /** \brief Set label cloud
- * \param[in] labels The label cloud
- */
- void
- setLabels (PointCloudLPtr& labels)
- {
- labels_ = labels;
- }
+ PCL_DEPRECATED ("EuclideadClusterComparator never actually used normals and angular threshold, "
+ "this function has no effect on the behavior of the comparator. Therefore it is "
+ "deprecated and will be removed in future releases.")
+ getAngularThreshold () const { return (std::acos (angular_threshold_) ); }
/** \brief Set labels in the label cloud to exclude.
- * \param exclude_labels a vector of bools corresponding to whether or not a given label should be considered
+ * \param[in] exclude_labels a vector of bools corresponding to whether or not a given label should be considered
*/
void
- setExcludeLabels (std::vector<bool>& exclude_labels)
+ PCL_DEPRECATED ("Use setExcludeLabels (const ExcludeLabelSetConstPtr &) instead")
+ setExcludeLabels (const std::vector<bool>& exclude_labels)
{
- exclude_labels_ = boost::make_shared<std::vector<bool> >(exclude_labels);
+ exclude_labels_ = boost::make_shared<std::set<uint32_t> > ();
+ for (uint32_t i = 0; i < exclude_labels.size (); ++i)
+ if (exclude_labels[i])
+ exclude_labels_->insert (i);
}
- /** \brief Compare points at two indices by their plane equations. True if the angle between the normals is less than the angular threshold,
- * and the difference between the d component of the normals is less than distance threshold, else false
- * \param idx1 The first index for the comparison
- * \param idx2 The second index for the comparison
- */
- virtual bool
- compare (int idx1, int idx2) const
- {
- int label1 = labels_->points[idx1].label;
- int label2 = labels_->points[idx2].label;
-
- if (label1 == -1 || label2 == -1)
- return false;
-
- if ( (*exclude_labels_)[label1] || (*exclude_labels_)[label2])
- return false;
-
- float dist_threshold = distance_threshold_;
- if (depth_dependent_)
- {
- Eigen::Vector3f vec = input_->points[idx1].getVector3fMap ();
- float z = vec.dot (z_axis_);
- dist_threshold *= z * z;
- }
-
- float dx = input_->points[idx1].x - input_->points[idx2].x;
- float dy = input_->points[idx1].y - input_->points[idx2].y;
- float dz = input_->points[idx1].z - input_->points[idx2].z;
- float dist = std::sqrt (dx*dx + dy*dy + dz*dz);
-
- return (dist < dist_threshold);
- }
-
protected:
+
PointCloudNConstPtr normals_;
- PointCloudLPtr labels_;
- boost::shared_ptr<std::vector<bool> > exclude_labels_;
float angular_threshold_;
- float distance_threshold_;
- bool depth_dependent_;
- Eigen::Vector3f z_axis_;
};
+
+ template<typename PointT, typename PointLT>
+ class EuclideanClusterComparator<PointT, PointLT, deprecated::T>
+ : public experimental::EuclideanClusterComparator<PointT, PointLT> {};
}
#endif // PCL_SEGMENTATION_PLANE_COEFFICIENT_COMPARATOR_H_
namespace grabcut
{
/** boost implementation of Boykov and Kolmogorov's maxflow algorithm doesn't support
- * negative flows which makes it inappropriate for this conext.
+ * negative flows which makes it inappropriate for this context.
* This implementation of Boykov and Kolmogorov's maxflow algorithm by Stephen Gould
* <stephen.gould@anu.edu.au> in DARWIN under BSD does the trick however solwer than original
* implementation.
colorDistance (const Color& c1, const Color& c2);
/// User supplied Trimap values
enum TrimapValue { TrimapUnknown = -1, TrimapForeground, TrimapBackground };
- /// Grabcut derived hard segementation values
+ /// Grabcut derived hard segmentation values
enum SegmentationValue { SegmentationForeground = 0, SegmentationBackground };
/// Gaussian structure
struct Gaussian
Eigen::Matrix3f inverse;
/// weighting of this gaussian in the GMM.
float pi;
- /// heighest eigenvalue of covariance matrix
+ /// highest eigenvalue of covariance matrix
float eigenvalue;
- /// eigenvector corresponding to the heighest eigenvector
+ /// eigenvector corresponding to the highest eigenvector
Eigen::Vector3f eigenvector;
};
uint32_t count_;
/// small value to add to covariance matrix diagonal to avoid singular values
float epsilon_;
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
/** Build the initial GMMs using the Orchard and Bouman color clustering algorithm */
, nb_neighbours_ (9)
, initialized_ (false)
{}
- /// Desctructor
+ /// Destructor
virtual ~GrabCut () {};
// /// Set input cloud
void
const std::vector<float>&
getPlaneCoeffD () const
{
- return (plane_coeff_d_);
+ return (*plane_coeff_d_);
}
/** \brief Set the tolerance in radians for difference in normal direction between neighboring points, to be considered part of the same plane.
// Correct edge relations using extended convexity definition if k>0
applyKconvexity (k_factor_);
- // Determine wether to use cutting planes
+ // Determine whether to use cutting planes
doGrouping ();
grouping_data_valid_ = true;
boost::tie (edge_itr, edge_itr_end) = boost::edges (sv_adjacency_list_);
for (next_edge = edge_itr; edge_itr != edge_itr_end; edge_itr = next_edge)
{
- next_edge++; // next_edge iterator is neccessary, because removing an edge invalidates the iterator to the current edge
+ next_edge++; // next_edge iterator is necessary, because removing an edge invalidates the iterator to the current edge
uint32_t source_sv_label = sv_adjacency_list_[boost::source (*edge_itr, sv_adjacency_list_)];
uint32_t target_sv_label = sv_adjacency_list_[boost::target (*edge_itr, sv_adjacency_list_)];
{
Eigen::Vector3f plane_normal (model_coefficients[0], model_coefficients[1], model_coefficients[2]);
// Cut the connections.
- // We only interate through the points which are within the support (when we are local, otherwise all points in the segment).
- // We also just acutally cut when the edge goes through the plane. This is why we check the planedistance
+ // We only iterate through the points which are within the support (when we are local, otherwise all points in the segment).
+ // We also just actually cut when the edge goes through the plane. This is why we check the planedistance
std::vector<pcl::PointIndices> cluster_indices;
pcl::EuclideanClusterExtraction<WeightSACPointType> euclidean_clusterer;
pcl::search::KdTree<WeightSACPointType>::Ptr tree (new pcl::search::KdTree<WeightSACPointType>);
}
if (cut_support_indices.size () == 0)
{
-// std::cout << "Could not find planes which exceed required minumum score (threshold " << min_cut_score_ << "), not cutting" << std::endl;
+// std::cout << "Could not find planes which exceed required minimum score (threshold " << min_cut_score_ << "), not cutting" << std::endl;
continue;
}
}
Eigen::VectorXf model_coefficients;
unsigned skipped_count = 0;
- // supress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
+ // suppress infinite loops by just allowing 10 x maximum allowed iterations for invalid model parameters!
const unsigned max_skip = max_iterations_ * 10;
// Iterate
// After filtered Segments are deleted, compute completely new adjacency map
// NOTE Recomputing the adjacency of every segment in every iteration is an easy but inefficient solution.
- // Because the number of segments in an average scene is usually well below 1000, the time spend for noise filtering is still neglible in most cases
+ // Because the number of segments in an average scene is usually well below 1000, the time spend for noise filtering is still negligible in most cases
computeSegmentAdjacency ();
} // End while (Filtering)
}
sv_label_to_seg_label_map_[sv_label] = segment_label;
seg_label_to_sv_list_map_[segment_label].insert (sv_label);
- // Iterate through all neighbors of this supervoxel and check wether they should be merged with the current supervoxel
+ // Iterate through all neighbors of this supervoxel and check whether they should be merged with the current supervoxel
std::pair<OutEdgeIterator, OutEdgeIterator> out_edge_iterator_range;
out_edge_iterator_range = boost::out_edges (query_point_id, sv_adjacency_list_); // adjacent vertices to node (*itr) in graph sv_adjacency_list_
for (OutEdgeIterator out_Edge_itr = out_edge_iterator_range.first; out_Edge_itr != out_edge_iterator_range.second; ++out_Edge_itr)
// Check all edges in the graph for k-convexity
for (next_edge = edge_itr; edge_itr != edge_itr_end; edge_itr = next_edge)
{
- next_edge++; // next_edge iterator is neccessary, because removing an edge invalidates the iterator to the current edge
+ next_edge++; // next_edge iterator is necessary, because removing an edge invalidates the iterator to the current edge
is_convex = sv_adjacency_list_[*edge_itr].is_convex;
for (next_edge = edge_itr; edge_itr != edge_itr_end; edge_itr = next_edge)
{
- next_edge++; // next_edge iterator is neccessary, because removing an edge invalidates the iterator to the current edge
+ next_edge++; // next_edge iterator is necessary, because removing an edge invalidates the iterator to the current edge
uint32_t source_sv_label = adjacency_list_arg[boost::source (*edge_itr, adjacency_list_arg)];
uint32_t target_sv_label = adjacency_list_arg[boost::target (*edge_itr, adjacency_list_arg)];
Neighbor( 0, 1, labels->width ),
Neighbor(-1, 1, labels->width - 1)};
- // find one pixel with other label in the neighborhood -> assume thats the one we came from
+ // find one pixel with other label in the neighborhood -> assume that's the one we came from
int direction = -1;
int x;
int y;
PointCloudLPtr& labels,
std::vector<pcl::PointIndices>& label_indices)
{
- //List of lables to grow, and index of model corresponding to each label
+ //List of labels to grow, and index of model corresponding to each label
std::vector<bool> grow_labels;
std::vector<int> label_to_model;
grow_labels.resize (label_indices.size (), false);
distances.resize (clusters_.size (), max_dist);
int number_of_points = num_pts_in_segment_[index];
- //loop throug every point in this segment and check neighbours
+ //loop through every point in this segment and check neighbours
for (int i_point = 0; i_point < number_of_points; i_point++)
{
int point_index = clusters_[index].indices[i_point];
int number_of_neighbours = static_cast<int> (point_neighbours_[point_index].size ());
- //loop throug every neighbour of the current point, find out to which segment it belongs
+ //loop through every neighbour of the current point, find out to which segment it belongs
//and if it belongs to neighbouring segment and is close enough then remember segment and its distance
for (int i_nghbr = 0; i_nghbr < number_of_neighbours; i_nghbr++)
{
//////////////////////////////////////////////////////////////////////////
template <typename PointT> void
pcl::getPointCloudDifference (
- const pcl::PointCloud<PointT> &src,
- const pcl::PointCloud<PointT> &,
- double threshold, const boost::shared_ptr<pcl::search::Search<PointT> > &tree,
+ const pcl::PointCloud<PointT> &src,
+ double threshold,
+ const boost::shared_ptr<pcl::search::Search<PointT> > &tree,
pcl::PointCloud<PointT> &output)
{
// We're interested in a single nearest neighbor only
std::vector<int> nn_indices (1);
std::vector<float> nn_distances (1);
- // The src indices that do not have a neighbor in tgt
+ // The input cloud indices that do not have a neighbor in the target cloud
std::vector<int> src_indices;
// Iterate through the source data set
for (int i = 0; i < static_cast<int> (src.points.size ()); ++i)
{
+ // Ignore invalid points in the inpout cloud
if (!isFinite (src.points[i]))
continue;
// Search for the closest point in the target data set (number of neighbors to find = 1)
PCL_WARN ("No neighbor found for point %lu (%f %f %f)!\n", i, src.points[i].x, src.points[i].y, src.points[i].z);
continue;
}
-
+ // Add points without a corresponding point in the target cloud to the output cloud
if (nn_distances[0] > threshold)
src_indices.push_back (i);
}
-
- // Allocate enough space and copy the basics
- output.points.resize (src_indices.size ());
- output.header = src.header;
- output.width = static_cast<uint32_t> (src_indices.size ());
- output.height = 1;
- //if (src.is_dense)
- output.is_dense = true;
- //else
- // It's not necessarily true that is_dense is false if cloud_in.is_dense is false
- // To verify this, we would need to iterate over all points and check for NaNs
- //output.is_dense = false;
// Copy all the data fields from the input cloud to the output one
copyPointCloud (src, src_indices, output);
+
+ // Output is always dense, as invalid points in the input cloud are ignored
+ output.is_dense = true;
}
//////////////////////////////////////////////////////////////////////////
// Send the input dataset to the spatial locator
tree_->setInputCloud (target_);
- getPointCloudDifference (*input_, *target_, distance_threshold_, tree_, output);
+ getPointCloudDifference (*input_, distance_threshold_, tree_, output);
deinitCompute ();
}
#define PCL_INSTANTIATE_SegmentDifferences(T) template class PCL_EXPORTS pcl::SegmentDifferences<T>;
-#define PCL_INSTANTIATE_getPointCloudDifference(T) template PCL_EXPORTS void pcl::getPointCloudDifference<T>(const pcl::PointCloud<T> &, const pcl::PointCloud<T> &, double, const boost::shared_ptr<pcl::search::Search<T> > &, pcl::PointCloud<T> &);
+#define PCL_INSTANTIATE_getPointCloudDifference(T) template PCL_EXPORTS void pcl::getPointCloudDifference<T>(const pcl::PointCloud<T> &, double, const boost::shared_ptr<pcl::search::Search<T> > &, pcl::PointCloud<T> &);
#endif // PCL_SEGMENTATION_IMPL_SEGMENT_DIFFERENCES_H_
-
/** \brief Edge Properties stored in the adjacency graph.*/
struct EdgeProperties
{
- /** \brief Desribes the difference of normals of the two supervoxels being connected*/
+ /** \brief Describes the difference of normals of the two supervoxels being connected*/
float normal_difference;
- /** \brief Desribes if a connection is convex or concave*/
+ /** \brief Describes if a connection is convex or concave*/
bool is_convex;
/** \brief Describes if a connection is valid for the segment growing. Usually convex connections are and concave connection are not. Due to k-concavity a convex connection can be invalidated*/
/** \brief Normal Threshold in degrees [0,180] used for merging */
float concavity_tolerance_threshold_;
- /** \brief Marks if valid grouping data (\ref sv_adjacency_list_, \ref sv_label_to_seg_label_map_, \ref processed_) is avaiable */
+ /** \brief Marks if valid grouping data (\ref sv_adjacency_list_, \ref sv_label_to_seg_label_map_, \ref processed_) is available */
bool grouping_data_valid_;
/** \brief Marks if supervoxels have been set by calling \ref setInputSupervoxels */
segment (pcl::PointCloud<PointLT>& labels, std::vector<pcl::PointIndices>& label_indices) const;
/** \brief Find the boundary points / contour of a connected component
- * \param[in] start_idx the first (lowest) index of the connected component for which a boundary shoudl be returned
+ * \param[in] start_idx the first (lowest) index of the connected component for which a boundary should be returned
* \param[in] labels the Label cloud produced by segmentation
* \param[out] boundary_indices the indices of the boundary points for the label corresponding to start_idx
*/
const std::vector<float>&
getPlaneCoeffD () const
{
- return (plane_coeff_d_);
+ return (*plane_coeff_d_);
}
/** \brief Set the tolerance in radians for difference in normal direction between neighboring points, to be considered part of the same plane.
getNormalDistanceWeight () const { return (distance_weight_); }
/** \brief Set the minimum opning angle for a cone model.
- * \param min_angle the opening angle which we need minumum to validate a cone model.
+ * \param min_angle the opening angle which we need minimum to validate a cone model.
* \param max_angle the opening angle which we need maximum to validate a cone model.
*/
inline void
max_angle_ = max_angle;
}
- /** \brief Get the opening angle which we need minumum to validate a cone model. */
+ /** \brief Get the opening angle which we need minimum to validate a cone model. */
inline void
getMinMaxOpeningAngle (double &min_angle, double &max_angle)
{
////////////////////////////////////////////////////////////////////////////////////////////
/** \brief Obtain the difference between two aligned point clouds as another point cloud, given a distance threshold.
* \param src the input point cloud source
- * \param tgt the input point cloud target we need to obtain the difference against
* \param threshold the distance threshold (tolerance) for point correspondences. (e.g., check if f a point p1 from
* src has a correspondence > threshold than a point p2 from tgt)
- * \param tree the spatial locator (e.g., kd-tree) used for nearest neighbors searching built over \a tgt
+ * \param tree the spatial locator (e.g., kd-tree) used for nearest neighbors searching built over the target cloud
* \param output the resultant output point cloud difference
* \ingroup segmentation
*/
template <typename PointT>
void getPointCloudDifference (
- const pcl::PointCloud<PointT> &src, const pcl::PointCloud<PointT> &tgt,
- double threshold, const boost::shared_ptr<pcl::search::Search<PointT> > &tree,
+ const pcl::PointCloud<PointT> &src,
+ double threshold,
+ const boost::shared_ptr<pcl::search::Search<PointT> > &tree,
pcl::PointCloud<PointT> &output);
+ template <typename PointT>
+ PCL_DEPRECATED("getPointCloudDifference() does not use the tgt parameter, thus it is deprecated and will be removed in future releases.")
+ inline void getPointCloudDifference (
+ const pcl::PointCloud<PointT> &src,
+ const pcl::PointCloud<PointT> & /* tgt */,
+ double threshold,
+ const boost::shared_ptr<pcl::search::Search<PointT> > &tree,
+ pcl::PointCloud<PointT> &output)
+ {
+ getPointCloudDifference<PointT> (src, threshold, tree, output);
+ }
+
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
* Points that belong to the same supervoxel have the same color.
* But this function doesn't guarantee that different segments will have different
* color(it's random). Points that are unlabeled will be black
- * \note This will expand the label_colors_ vector so that it can accomodate all labels
+ * \note This will expand the label_colors_ vector so that it can accommodate all labels
*/
PCL_DEPRECATED ("SupervoxelClustering::getColoredCloud is deprecated. Use the getLabeledCloud function instead. examples/segmentation/example_supervoxels.cpp shows how to use this to display and save with colorized labels.")
typename pcl::PointCloud<PointXYZRGBA>::Ptr
* Points that belong to the same supervoxel have the same color.
* But this function doesn't guarantee that different segments will have different
* color(it's random). Points that are unlabeled will be black
- * \note This will expand the label_colors_ vector so that it can accomodate all labels
+ * \note This will expand the label_colors_ vector so that it can accommodate all labels
*/
PCL_DEPRECATED ("SupervoxelClustering::getColoredVoxelCloud is deprecated. Use the getLabeledVoxelCloud function instead. examples/segmentation/example_supervoxels.cpp shows how to use this to display and save with colorized labels.")
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr
data_.xyz_[0] += new_point.x;
data_.xyz_[1] += new_point.y;
data_.xyz_[2] += new_point.z;
- //Separate sums for r,g,b since we cant sum in uchars
+ //Separate sums for r,g,b since we can't sum in uchars
data_.rgb_[0] += static_cast<float> (new_point.r);
data_.rgb_[1] += static_cast<float> (new_point.g);
data_.rgb_[2] += static_cast<float> (new_point.b);
data_.xyz_[0] += new_point.x;
data_.xyz_[1] += new_point.y;
data_.xyz_[2] += new_point.z;
- //Separate sums for r,g,b since we cant sum in uchars
+ //Separate sums for r,g,b since we can't sum in uchars
data_.rgb_[0] += static_cast<float> (new_point.r);
data_.rgb_[1] += static_cast<float> (new_point.g);
data_.rgb_[2] += static_cast<float> (new_point.b);
};
typedef std::vector<Vertex> Vertices;
- typedef std::vector<size_t> Indices;
+ typedef std::vector<GLuint> Indices;
class Model
{
*/
void
setCameraIntrinsicsParameters (int camera_width_in,
- int camera_height_in,
- float camera_fx_in,
- float camera_fy_in,
- float camera_cx_in,
- float camera_cy_in)
+ int camera_height_in,
+ float camera_fx_in,
+ float camera_fy_in,
+ float camera_cx_in,
+ float camera_cy_in)
{
camera_width_ = camera_width_in;
camera_height_ = camera_height_in;
camera_cy_ = camera_cy_in;
}
+ /**
+ * Get the basic camera intrinsic parameters
+ */
+ void
+ getCameraIntrinsicsParameters (int &camera_width_in,
+ int &camera_height_in,
+ float &camera_fx_in,
+ float &camera_fy_in,
+ float &camera_cx_in,
+ float &camera_cy_in) const
+ {
+ camera_width_in = camera_width_;
+ camera_height_in = camera_height_;
+ camera_fx_in = camera_fx_;
+ camera_fy_in = camera_fy_;
+ camera_cx_in = camera_cx_;
+ camera_cy_in = camera_cy_;
+ }
+
/**
* Set the cost function to be used - one of 4 hard coded currently
*/
void setSigma (double sigma_in){ sigma_ = sigma_in; }
void setFloorProportion (double floor_proportion_in){ floor_proportion_ = floor_proportion_in;}
- int getRows () {return rows_;}
- int getCols () {return cols_;}
- int getRowHeight () {return row_height_;}
- int getColWidth () {return col_width_;}
- int getWidth () {return width_;}
- int getHeight () {return height_;}
+ int getRows () const {return rows_;}
+ int getCols () const {return cols_;}
+ int getRowHeight () const {return row_height_;}
+ int getColWidth () const {return col_width_;}
+ int getWidth () const {return width_;}
+ int getHeight () const {return height_;}
// Convenience function to return simulated RGB-D PointCloud
// Two modes:
// global=false - PointCloud is as would be captured by an RGB-D camera [default]
// global=true - PointCloud is transformed into the model/world frame using the camera pose
void getPointCloud (pcl::PointCloud<pcl::PointXYZRGB>::Ptr pc,
- bool make_global, const Eigen::Isometry3d & pose);
+ bool make_global, const Eigen::Isometry3d & pose, bool organized = false) const;
// Convenience function to return RangeImagePlanar containing
// simulated RGB-D:
- void getRangeImagePlanar (pcl::RangeImagePlanar &rip);
+ void getRangeImagePlanar (pcl::RangeImagePlanar &rip) const;
// Add various types of noise to simulated RGB-D data
void addNoise ();
setUseColor(bool use_color) { use_color_ = use_color; }
const uint8_t*
- getColorBuffer ();
+ getColorBuffer () const;
const float*
- getDepthBuffer ();
+ getDepthBuffer () const;
const float*
- getScoreBuffer ();
+ getScoreBuffer () const;
+ float
+ getZNear () const { return z_near_; }
+
+ float
+ getZFar () const { return z_far_; }
+
+ void
+ setZNear (float z){ z_near_ = z; }
+
+ void
+ setZFar (float z){ z_far_ = z; }
+
private:
/**
* Evaluate the likelihood/score for a set of particles
float camera_cy_;
// min and max range of the rgbd sensor
- // everything outside this doesnt appear in depth images
+ // everything outside this doesn't appear in depth images
float z_near_;
float z_far_;
- bool depth_buffer_dirty_;
- bool color_buffer_dirty_;
- bool score_buffer_dirty_;
+ // For caching only, not part of observable state.
+ mutable bool depth_buffer_dirty_;
+ mutable bool color_buffer_dirty_;
+ mutable bool score_buffer_dirty_;
int which_cost_function_;
double floor_proportion_;
{
/** \brief Implements a parallel summation of float arrays using GLSL.
* The input array is provided as a float texture and the summation
- * is performed over set number of levels, where each level halfs each
+ * is performed over set number of levels, where each level halves each
* dimension.
*
* \author Hordur Johannsson
{
bool found_rgb=false;
for (size_t i=0; i<plg->cloud.fields.size () ;i++)
- if (plg->cloud.fields[i].name.compare ("rgb") == 0)
+ if (plg->cloud.fields[i].name.compare ("rgb") == 0 || plg->cloud.fields[i].name.compare ("rgba") == 0)
found_rgb = true;
if (found_rgb)
apoly.colors_[4*j + 0] = newcloud.points[pt].r/255.0f; // Red
apoly.colors_[4*j + 1] = newcloud.points[pt].g/255.0f; // Green
apoly.colors_[4*j + 2] = newcloud.points[pt].b/255.0f; // Blue
- apoly.colors_[4*j + 3] = 1.0f; // transparancy? unnecessary?
+ apoly.colors_[4*j + 3] = 1.0f; // transparency? unnecessary?
}
polygons.push_back (apoly);
}
{ // working range
float min_dist = abs (ref_val - 0.7253f/(1.0360f - (depth_val)));
- int lup = static_cast<int> (ceil (min_dist*100)); // has resulution of 0.01m
+ int lup = static_cast<int> (ceil (min_dist*100)); // has resolution of 0.01m
if (lup > 300)
{ // implicitly this caps the cost if there is a hole in the model
lup = 300;
{
float disparity_diff = abs( ( -0.7253f/ref_val +1.0360f ) - depth_val );
- int top_lup = static_cast<int> (ceil (disparity_diff*300)); // has resulution of 0.001m
+ int top_lup = static_cast<int> (ceil (disparity_diff*300)); // has resolution of 0.001m
if (top_lup > 300)
{
top_lup =300;
// bottom:
//bottom = bottom_lookup( round(mu*1000+1));
- int bottom_lup = static_cast<int> (ceil( (depth_val) * 300)); // has resulution of 0.001m
+ int bottom_lup = static_cast<int> (ceil( (depth_val) * 300)); // has resolution of 0.001m
if (bottom_lup > 300)
{
bottom_lup =300;
float proportion = 0.999f;
float lhood = proportion + (1-proportion)*(top/bottom);
- // safety fix thats seems to be required due to opengl ayschronizate
+ // safety fix that seems to be required due to opengl asynchronization
// ask hordur about this
if (bottom == 0)
{
void
pcl::simulation::RangeLikelihood::getPointCloud (pcl::PointCloud<pcl::PointXYZRGB>::Ptr pc,
bool make_global,
- const Eigen::Isometry3d & pose)
+ const Eigen::Isometry3d & pose,
+ bool organized) const
{
// TODO: check if this works for for rows/cols >1 and for width&height != 640x480
// i.e. multiple tiled images
//pc->width = camera_width_;
//pc->height = camera_height_;
- pc->is_dense = false;
+ pc->is_dense = true;
pc->points.resize (pc->width*pc->height);
int points_added = 0;
for (int x = 0; x < col_width_ ; ++x) // camera_width_
{
// Find XYZ from normalized 0->1 mapped disparity
- int idx = points_added; // y*camera_width_ + x;
+ int idx;
+ if (organized) idx = y*col_width_ + x;
+ else idx = points_added; // y*camera_width_ + x;
+
float d = depth_buffer_[y*camera_width_ + x] ;
+
if (d < 1.0) // only add points with depth buffer less than max (20m) range
{
float z = zf*zn/((zf-zn)*(d - zf/(zf-zn)));
pc->points[idx].x = (static_cast<float> (x)-camera_cx_) * z * (-camera_fx_reciprocal_);
pc->points[idx].y = (static_cast<float> (y)-camera_cy_) * z * (-camera_fy_reciprocal_);
- int rgb_idx = y*col_width_ + x; //camera_width_
+ int rgb_idx = y*col_width_ + x; //camera_width_
pc->points[idx].b = color_buffer[rgb_idx*3+2]; // blue
pc->points[idx].g = color_buffer[rgb_idx*3+1]; // green
pc->points[idx].r = color_buffer[rgb_idx*3]; // red
points_added++;
}
+ else if (organized)
+ {
+ pc->is_dense = false;
+ pc->points[idx].z = std::numeric_limits<float>::quiet_NaN ();
+ pc->points[idx].x = std::numeric_limits<float>::quiet_NaN ();
+ pc->points[idx].y = std::numeric_limits<float>::quiet_NaN ();
+ pc->points[idx].rgba = 0;
+ }
}
}
- pc->width = 1;
- pc->height = points_added;
- pc->points.resize (points_added);
+
+ if (!organized)
+ {
+ pc->width = 1;
+ pc->height = points_added;
+ pc->points.resize (points_added);
+ }
if (make_global)
{
}
void
-pcl::simulation::RangeLikelihood::getRangeImagePlanar(pcl::RangeImagePlanar &rip)
+pcl::simulation::RangeLikelihood::getRangeImagePlanar(pcl::RangeImagePlanar &rip) const
{
rip.setDepthImage (depth_buffer_,
camera_width_,camera_height_, camera_fx_,camera_fy_,
}
const float*
-RangeLikelihood::getDepthBuffer ()
+RangeLikelihood::getDepthBuffer () const
{
if (depth_buffer_dirty_)
{
}
const uint8_t*
-RangeLikelihood::getColorBuffer ()
+RangeLikelihood::getColorBuffer () const
{
// It's only possible to read the color buffer if it
// was rendered in the first place.
// The scores are in score_texture_
const float*
-RangeLikelihood::getScoreBuffer ()
+RangeLikelihood::getScoreBuffer () const
{
if (score_buffer_dirty_ && !compute_likelihood_on_cpu_)
{
1. sim_viewer.cpp
purpose: simulate in viewer which is almost the same as pcl_viewer
status : use the mouse to drive around, and 'v' to capture a cloud. reads vtk and obj.
- visualizes vtk and makes pcd of obj. conflict between RL and VTK means doesnt visualize/simulate properly
+ visualizes vtk and makes pcd of obj. conflict between RL and VTK means doesn't visualize/simulate properly
was : range_pcd_viewer.cpp
2. sim_terminal_demo.cpp
boost::this_thread::sleep (boost::posix_time::microseconds (100000));
}
- // doesnt work:
+ // doesn't work:
// viewer->~PCLVisualizer();
// viewer.reset();
cout << "done\n";
- // Problem: vtk and opengl dont seem to play very well together
- // vtk seems to misbehave after a little while and wont keep the window on the screen
+ // Problem: vtk and opengl don't seem to play very well together
+ // vtk seems to misbehave after a little while and won't keep the window on the screen
// method1: kill with [x] - but eventually it crashes:
//while (!viewer.wasStopped ()){
void capture (Eigen::Isometry3d pose_in, string point_cloud_fname)
{
- // No reference image - but this is kept for compatability with range_test_v2:
+ // No reference image - but this is kept for compatibility with range_test_v2:
float* reference = new float[range_likelihood_->getRowHeight() * range_likelihood_->getColWidth()];
const float* depth_buffer = range_likelihood_->getDepthBuffer();
// Copy one image from our last as a reference.
writer.writeBinary (point_cloud_fname, *pc_out);
//cout << "finished writing file\n";
}
- // Disabled all OpenCV stuff for now: dont want the dependency
+ // Disabled all OpenCV stuff for now: don't want the dependency
/*
bool demo_other_stuff = false;
if (demo_other_stuff && write_cloud)
void
pcl::simulation::SimExample::doSim (Eigen::Isometry3d pose_in)
{
- // No reference image - but this is kept for compatability with range_test_v2:
+ // No reference image - but this is kept for compatibility with range_test_v2:
float* reference = new float[rl_->getRowHeight() * rl_->getColWidth()];
const float* depth_buffer = rl_->getDepthBuffer();
// Copy one image from our last as a reference.
radius_ = radius;
};
- /** \brief setter for the spatial bandwith used for cost aggregation based on adaptive weights
- * \param[in] gamma_s spatial bandwith used for cost aggregation based on adaptive weights
+ /** \brief setter for the spatial bandwidth used for cost aggregation based on adaptive weights
+ * \param[in] gamma_s spatial bandwidth used for cost aggregation based on adaptive weights
*/
void
setGammaS (int gamma_s)
gamma_s_ = gamma_s;
};
- /** \brief setter for the color bandwith used for cost aggregation based on adaptive weights
- * \param[in] gamma_c color bandwith used for cost aggregation based on adaptive weights
+ /** \brief setter for the color bandwidth used for cost aggregation based on adaptive weights
+ * \param[in] gamma_c color bandwidth used for cost aggregation based on adaptive weights
*/
void
setGammaC (int gamma_c)
2d xy profile to 3d world space.
Parameters:
P - [in] start or end of path
- T - [in] unit tanget to path
+ T - [in] unit tangent to path
U - [in] unit up vector perpendicular to T
Normal - [in] optional unit vector with Normal->z > 0 that
defines the unit normal to the miter plane.
order1 - [in]
order2 - [in]
Returns:
- True if input was valid and creation succeded.
+ True if input was valid and creation succeeded.
*/
bool Create(
int dim,
// Description:
// Create a regular polygon circumscribe about a circle.
- // The midpoints of the polygon's edges will be tanget to the
+ // The midpoints of the polygon's edges will be tangent to the
// circle.
// Parameters:
// circle - [in]
{
vvDotTable = dvDotTable = ddDotTable = NULL;
valueTables = dValueTables = NULL;
+ baseFunctions = NULL;
+ baseBSplines = NULL;
functionCount = sampleCount = 0;
}
if( valueTables ) delete[] valueTables;
if( dValueTables ) delete[] dValueTables;
+
+ if( baseFunctions ) delete[] baseFunctions;
+ if( baseBSplines ) delete[] baseBSplines;
}
vvDotTable = dvDotTable = ddDotTable = NULL;
valueTables = dValueTables=NULL;
+ baseFunctions = NULL;
+ baseBSplines = NULL;
functionCount = 0;
}
/** \brief Bilateral filtering implementation, based on the following paper:
* * Kopf, Johannes and Cohen, Michael F. and Lischinski, Dani and Uyttendaele, Matt - Joint Bilateral Upsampling,
- * * ACM Transations in Graphics, July 2007
+ * * ACM Transactions in Graphics, July 2007
*
* Takes in a colored organized point cloud (i.e. PointXYZRGB or PointXYZRGBA), that might contain nan values for the
* depth information, and it will return an upsampled version of this cloud, based on the formula:
{
keep_information_ = value;
}
-
- /** \brief Returns the dimensionality (2 or 3) of the calculated hull. */
- PCL_DEPRECATED ("[pcl::ConcaveHull::getDim] This method is deprecated. Please use getDimension () instead.")
- int
- getDim () const;
/** \brief Returns the dimensionality (2 or 3) of the calculated hull. */
inline int
* when taking into account the segment between the points S1 and S2
* \param X 2D coordinate of the point
* \param S1 2D coordinate of the segment's first point
- * \param S2 2D coordinate of the segment's secont point
- * \param R 2D coorddinate of the reference point (defaults to 0,0)
+ * \param S2 2D coordinate of the segment's second point
+ * \param R 2D coordinate of the reference point (defaults to 0,0)
* \ingroup surface
*/
inline bool
/** \brief 2D coordinates of the second fringe neighbor of the next point **/
Eigen::Vector2f uvn_next_sfn_;
- /** \brief Temporary variable to store 3 coordiantes **/
+ /** \brief Temporary variable to store 3 coordinates **/
Eigen::Vector3f tmp_;
/** \brief The actual surface reconstruction method.
* points within the padding area,and do a weighted average. Say if the padding
* size is 1, when we process cell (x,y,z), we will find union of input data points
* from (x-1) to (x+1), (y-1) to (y+1), (z-1) to (z+1)(in total, 27 cells). In this
- * way, even the cells itself doesnt contain any data points, we will stil process it
+ * way, even the cells itself doesn't contain any data points, we will still process it
* because there are data points in the padding area. This can help us fix holes which
* is smaller than the padding size.
* \param padding_size The num of padding cells we want to create
for (int x_w = start_window_x; x_w < end_window_x; ++ x_w)
for (int y_w = start_window_y; y_w < end_window_y; ++ y_w)
{
- float dx = float (x - x_w),
- dy = float (y - y_w);
-
- float val_exp_depth = val_exp_depth_matrix(dx+window_size_, dy+window_size_);
-
- float d_color = static_cast<float> (
+ float val_exp_depth = val_exp_depth_matrix (static_cast<Eigen::MatrixXf::Index> (x - x_w + window_size_),
+ static_cast<Eigen::MatrixXf::Index> (y - y_w + window_size_));
+
+ Eigen::VectorXf::Index d_color = static_cast<Eigen::VectorXf::Index> (
abs (input_->points[y_w * input_->width + x_w].r - input_->points[y * input_->width + x].r) +
abs (input_->points[y_w * input_->width + x_w].g - input_->points[y * input_->width + x].g) +
abs (input_->points[y_w * input_->width + x_w].b - input_->points[y * input_->width + x].b));
- float val_exp_rgb = val_exp_rgb_vector (static_cast<Eigen::VectorXf::Index> (d_color));
+ float val_exp_rgb = val_exp_rgb_vector (d_color);
if (pcl_isfinite (input_->points[y_w*input_->width + x_w].z))
{
#include <stdlib.h>
#include <pcl/surface/qhull.h>
-//////////////////////////////////////////////////////////////////////////
-/** \brief Get dimension of concave hull
- * \return dimension
- */
-template <typename PointInT> int
-pcl::ConcaveHull<PointInT>::getDim () const
-{
- return (getDimension ());
-}
-
//////////////////////////////////////////////////////////////////////////
template <typename PointInT> void
pcl::ConcaveHull<PointInT>::reconstruct (PointCloud &output)
{
Eigen::Vector4d xyz_centroid;
compute3DCentroid (*input_, *indices_, xyz_centroid);
- EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
+ EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix = Eigen::Matrix3d::Zero ();
computeCovarianceMatrixNormalized (*input_, *indices_, xyz_centroid, covariance_matrix);
// Check if the covariance matrix is finite or not.
int max_vertex_id = 0;
FORALLvertices
{
- if (vertex->id + 1 > max_vertex_id)
+ if (vertex->id + 1 > unsigned (max_vertex_id))
max_vertex_id = vertex->id + 1;
}
PCL_DEBUG ("[pcl::%s::calculateInputDimension] WARNING: Input dimension not specified. Automatically determining input dimension.\n", getClassName ().c_str ());
Eigen::Vector4d xyz_centroid;
compute3DCentroid (*input_, *indices_, xyz_centroid);
- EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
+ EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix = Eigen::Matrix3d::Zero ();
computeCovarianceMatrixNormalized (*input_, *indices_, xyz_centroid, covariance_matrix);
EIGEN_ALIGN16 Eigen::Vector3d eigen_values;
#include <pcl/Vertices.h>
#include <pcl/kdtree/kdtree_flann.h>
-//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointNT>
-pcl::MarchingCubes<PointNT>::MarchingCubes ()
-: min_p_ (), max_p_ (), percentage_extend_grid_ (), iso_level_ ()
-{
-}
-
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT>
pcl::MarchingCubes<PointNT>::~MarchingCubes ()
template <typename PointNT> void
pcl::MarchingCubes<PointNT>::getBoundingBox ()
{
- pcl::getMinMax3D (*input_, min_p_, max_p_);
-
- min_p_ -= (max_p_ - min_p_) * percentage_extend_grid_/2;
- max_p_ += (max_p_ - min_p_) * percentage_extend_grid_/2;
-
- Eigen::Vector4f bounding_box_size = max_p_ - min_p_;
-
- bounding_box_size = max_p_ - min_p_;
- PCL_DEBUG ("[pcl::MarchingCubesHoppe::getBoundingBox] Size of Bounding Box is [%f, %f, %f]\n",
- bounding_box_size.x (), bounding_box_size.y (), bounding_box_size.z ());
- double max_size =
- (std::max) ((std::max)(bounding_box_size.x (), bounding_box_size.y ()),
- bounding_box_size.z ());
- (void)max_size;
- // ????
- // data_size_ = static_cast<uint64_t> (max_size / leaf_size_);
- PCL_DEBUG ("[pcl::MarchingCubesHoppe::getBoundingBox] Lower left point is [%f, %f, %f]\n",
- min_p_.x (), min_p_.y (), min_p_.z ());
- PCL_DEBUG ("[pcl::MarchingCubesHoppe::getBoundingBox] Upper left point is [%f, %f, %f]\n",
- max_p_.x (), max_p_.y (), max_p_.z ());
+ PointNT max_pt, min_pt;
+ pcl::getMinMax3D (*input_, min_pt, max_pt);
+
+ lower_boundary_ = min_pt.getArray3fMap ();
+ upper_boundary_ = max_pt.getArray3fMap ();
+
+ const Eigen::Array3f size3_extend = 0.5f * percentage_extend_grid_
+ * (upper_boundary_ - lower_boundary_);
+
+ lower_boundary_ -= size3_extend;
+ upper_boundary_ += size3_extend;
}
float val_p2,
Eigen::Vector3f &output)
{
- float mu = (iso_level_ - val_p1) / (val_p2-val_p1);
+ const float mu = (iso_level_ - val_p1) / (val_p2 - val_p1);
output = p1 + mu * (p2 - p1);
}
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> void
-pcl::MarchingCubes<PointNT>::createSurface (std::vector<float> &leaf_node,
- Eigen::Vector3i &index_3d,
+pcl::MarchingCubes<PointNT>::createSurface (const std::vector<float> &leaf_node,
+ const Eigen::Vector3i &index_3d,
pcl::PointCloud<PointNT> &cloud)
{
int cubeindex = 0;
- Eigen::Vector3f vertex_list[12];
if (leaf_node[0] < iso_level_) cubeindex |= 1;
if (leaf_node[1] < iso_level_) cubeindex |= 2;
if (leaf_node[2] < iso_level_) cubeindex |= 4;
if (edgeTable[cubeindex] == 0)
return;
- //Eigen::Vector4f index_3df (index_3d[0], index_3d[1], index_3d[2], 0.0f);
- Eigen::Vector3f center;// TODO coeff wise product = min_p_ + Eigen::Vector4f (1.0f/res_x_, 1.0f/res_y_, 1.0f/res_z_) * index_3df * (max_p_ - min_p_);
- center[0] = min_p_[0] + (max_p_[0] - min_p_[0]) * float (index_3d[0]) / float (res_x_);
- center[1] = min_p_[1] + (max_p_[1] - min_p_[1]) * float (index_3d[1]) / float (res_y_);
- center[2] = min_p_[2] + (max_p_[2] - min_p_[2]) * float (index_3d[2]) / float (res_z_);
+ const Eigen::Vector3f center = lower_boundary_
+ + size_voxel_ * index_3d.cast<float> ().array ();
std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > p;
p.resize (8);
for (int i = 0; i < 8; ++i)
{
Eigen::Vector3f point = center;
- if(i & 0x4)
- point[1] = static_cast<float> (center[1] + (max_p_[1] - min_p_[1]) / float (res_y_));
+ if (i & 0x4)
+ point[1] = static_cast<float> (center[1] + size_voxel_[1]);
- if(i & 0x2)
- point[2] = static_cast<float> (center[2] + (max_p_[2] - min_p_[2]) / float (res_z_));
+ if (i & 0x2)
+ point[2] = static_cast<float> (center[2] + size_voxel_[2]);
- if((i & 0x1) ^ ((i >> 1) & 0x1))
- point[0] = static_cast<float> (center[0] + (max_p_[0] - min_p_[0]) / float (res_x_));
+ if ((i & 0x1) ^ ((i >> 1) & 0x1))
+ point[0] = static_cast<float> (center[0] + size_voxel_[0]);
p[i] = point;
}
-
// Find the vertices where the surface intersects the cube
+ std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > vertex_list;
+ vertex_list.resize (12);
if (edgeTable[cubeindex] & 1)
interpolateEdge (p[0], p[1], leaf_node[0], leaf_node[1], vertex_list[0]);
if (edgeTable[cubeindex] & 2)
interpolateEdge (p[3], p[7], leaf_node[3], leaf_node[7], vertex_list[11]);
// Create the triangle
- for (int i = 0; triTable[cubeindex][i] != -1; i+=3)
+ for (int i = 0; triTable[cubeindex][i] != -1; i += 3)
{
- PointNT p1,p2,p3;
- p1.x = vertex_list[triTable[cubeindex][i ]][0];
- p1.y = vertex_list[triTable[cubeindex][i ]][1];
- p1.z = vertex_list[triTable[cubeindex][i ]][2];
+ PointNT p1, p2, p3;
+ p1.getVector3fMap () = vertex_list[triTable[cubeindex][i]];
cloud.push_back (p1);
- p2.x = vertex_list[triTable[cubeindex][i+1]][0];
- p2.y = vertex_list[triTable[cubeindex][i+1]][1];
- p2.z = vertex_list[triTable[cubeindex][i+1]][2];
+ p2.getVector3fMap () = vertex_list[triTable[cubeindex][i+1]];
cloud.push_back (p2);
- p3.x = vertex_list[triTable[cubeindex][i+2]][0];
- p3.y = vertex_list[triTable[cubeindex][i+2]][1];
- p3.z = vertex_list[triTable[cubeindex][i+2]][2];
+ p3.getVector3fMap () = vertex_list[triTable[cubeindex][i+2]];
cloud.push_back (p3);
}
}
pcl::MarchingCubes<PointNT>::getNeighborList1D (std::vector<float> &leaf,
Eigen::Vector3i &index3d)
{
- leaf = std::vector<float> (8, 0.0f);
+ leaf.resize (8);
leaf[0] = getGridValue (index3d);
leaf[1] = getGridValue (index3d + Eigen::Vector3i (1, 0, 0));
leaf[5] = getGridValue (index3d + Eigen::Vector3i (1, 1, 0));
leaf[6] = getGridValue (index3d + Eigen::Vector3i (1, 1, 1));
leaf[7] = getGridValue (index3d + Eigen::Vector3i (0, 1, 1));
+
+ for (int i = 0; i < 8; ++i)
+ {
+ if (std::isnan (leaf[i]))
+ {
+ leaf.clear ();
+ break;
+ }
+ }
}
template <typename PointNT> void
pcl::MarchingCubes<PointNT>::performReconstruction (pcl::PolygonMesh &output)
{
- if (!(iso_level_ >= 0 && iso_level_ < 1))
- {
- PCL_ERROR ("[pcl::%s::performReconstruction] Invalid iso level %f! Please use a number between 0 and 1.\n", getClassName ().c_str (), iso_level_);
- output.cloud.width = output.cloud.height = 0;
- output.cloud.data.clear ();
- output.polygons.clear ();
- return;
- }
-
- // Create grid
- grid_ = std::vector<float> (res_x_*res_y_*res_z_, 0.0f);
-
- // Populate tree
- tree_->setInputCloud (input_);
-
- getBoundingBox ();
-
- // Transform the point cloud into a voxel grid
- // This needs to be implemented in a child class
- voxelizeData ();
-
+ pcl::PointCloud<PointNT> points;
+ performReconstruction (points, output.polygons);
- // Run the actual marching cubes algorithm, store it into a point cloud,
- // and copy the point cloud + connectivity into output
- pcl::PointCloud<PointNT> cloud;
-
- for (int x = 1; x < res_x_-1; ++x)
- for (int y = 1; y < res_y_-1; ++y)
- for (int z = 1; z < res_z_-1; ++z)
- {
- Eigen::Vector3i index_3d (x, y, z);
- std::vector<float> leaf_node;
- getNeighborList1D (leaf_node, index_3d);
- createSurface (leaf_node, index_3d, cloud);
- }
- pcl::toPCLPointCloud2 (cloud, output.cloud);
-
- output.polygons.resize (cloud.size () / 3);
- for (size_t i = 0; i < output.polygons.size (); ++i)
- {
- pcl::Vertices v;
- v.vertices.resize (3);
- for (int j = 0; j < 3; ++j)
- v.vertices[j] = static_cast<int> (i) * 3 + j;
- output.polygons[i] = v;
- }
+ pcl::toPCLPointCloud2 (points, output.cloud);
}
{
if (!(iso_level_ >= 0 && iso_level_ < 1))
{
- PCL_ERROR ("[pcl::%s::performReconstruction] Invalid iso level %f! Please use a number between 0 and 1.\n", getClassName ().c_str (), iso_level_);
+ PCL_ERROR ("[pcl::%s::performReconstruction] Invalid iso level %f! Please use a number between 0 and 1.\n",
+ getClassName ().c_str (), iso_level_);
points.width = points.height = 0;
points.points.clear ();
polygons.clear ();
return;
}
+ // the point cloud really generated from Marching Cubes, prev intermediate_cloud_
+ pcl::PointCloud<PointNT> intermediate_cloud;
+
// Create grid
- grid_ = std::vector<float> (res_x_*res_y_*res_z_, 0.0f);
+ grid_ = std::vector<float> (res_x_*res_y_*res_z_, NAN);
// Populate tree
tree_->setInputCloud (input_);
+ // Compute bounding box and voxel size
getBoundingBox ();
+ size_voxel_ = (upper_boundary_ - lower_boundary_)
+ * Eigen::Array3f (res_x_, res_y_, res_z_).inverse ();
// Transform the point cloud into a voxel grid
// This needs to be implemented in a child class
voxelizeData ();
- // Run the actual marching cubes algorithm, store it into a point cloud,
- // and copy the point cloud + connectivity into output
- points.clear ();
+ // preallocate memory assuming a hull. suppose 6 point per voxel
+ double size_reserve = std::min((double) intermediate_cloud.points.max_size (),
+ 2.0 * 6.0 * (double) (res_y_*res_z_ + res_x_*res_z_ + res_x_*res_y_));
+ intermediate_cloud.reserve ((size_t) size_reserve);
+
for (int x = 1; x < res_x_-1; ++x)
for (int y = 1; y < res_y_-1; ++y)
for (int z = 1; z < res_z_-1; ++z)
Eigen::Vector3i index_3d (x, y, z);
std::vector<float> leaf_node;
getNeighborList1D (leaf_node, index_3d);
- createSurface (leaf_node, index_3d, points);
+ if (!leaf_node.empty ())
+ createSurface (leaf_node, index_3d, intermediate_cloud);
}
+ points.swap (intermediate_cloud);
+
polygons.resize (points.size () / 3);
for (size_t i = 0; i < polygons.size (); ++i)
{
}
}
-
-
#define PCL_INSTANTIATE_MarchingCubes(T) template class PCL_EXPORTS pcl::MarchingCubes<T>;
#endif // PCL_SURFACE_IMPL_MARCHING_CUBES_H_
#include <pcl/Vertices.h>
#include <pcl/kdtree/kdtree_flann.h>
-//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointNT>
-pcl::MarchingCubesHoppe<PointNT>::MarchingCubesHoppe ()
- : MarchingCubes<PointNT> ()
-{
-}
-
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT>
pcl::MarchingCubesHoppe<PointNT>::~MarchingCubesHoppe ()
template <typename PointNT> void
pcl::MarchingCubesHoppe<PointNT>::voxelizeData ()
{
+ const bool is_far_ignored = dist_ignore_ > 0.0f;
+
for (int x = 0; x < res_x_; ++x)
+ {
+ const int y_start = x * res_y_ * res_z_;
+
for (int y = 0; y < res_y_; ++y)
+ {
+ const int z_start = y_start + y * res_z_;
+
for (int z = 0; z < res_z_; ++z)
{
- std::vector<int> nn_indices;
- std::vector<float> nn_sqr_dists;
-
- Eigen::Vector3f point;
- point[0] = min_p_[0] + (max_p_[0] - min_p_[0]) * float (x) / float (res_x_);
- point[1] = min_p_[1] + (max_p_[1] - min_p_[1]) * float (y) / float (res_y_);
- point[2] = min_p_[2] + (max_p_[2] - min_p_[2]) * float (z) / float (res_z_);
-
+ std::vector<int> nn_indices (1, 0);
+ std::vector<float> nn_sqr_dists (1, 0.0f);
+ const Eigen::Vector3f point = (lower_boundary_ + size_voxel_ * Eigen::Array3f (x, y, z)).matrix ();
PointNT p;
+
p.getVector3fMap () = point;
tree_->nearestKSearch (p, 1, nn_indices, nn_sqr_dists);
- grid_[x * res_y_*res_z_ + y * res_z_ + z] = input_->points[nn_indices[0]].getNormalVector3fMap ().dot (
- point - input_->points[nn_indices[0]].getVector3fMap ());
+ if (!is_far_ignored || nn_sqr_dists[0] < dist_ignore_)
+ {
+ const Eigen::Vector3f normal = input_->points[nn_indices[0]].getNormalVector3fMap ();
+
+ if (!std::isnan (normal (0)) && normal.norm () > 0.5f)
+ grid_[z_start + z] = normal.dot (
+ point - input_->points[nn_indices[0]].getVector3fMap ());
+ }
}
+ }
+ }
}
#include <pcl/Vertices.h>
#include <pcl/kdtree/kdtree_flann.h>
-//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointNT>
-pcl::MarchingCubesRBF<PointNT>::MarchingCubesRBF ()
- : MarchingCubes<PointNT> (),
- off_surface_epsilon_ (0.1f)
-{
-}
-
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT>
pcl::MarchingCubesRBF<PointNT>::~MarchingCubesRBF ()
pcl::MarchingCubesRBF<PointNT>::voxelizeData ()
{
// Initialize data structures
- unsigned int N = static_cast<unsigned int> (input_->size ());
+ const unsigned int N = static_cast<unsigned int> (input_->size ());
Eigen::MatrixXd M (2*N, 2*N),
d (2*N, 1);
for (int y = 0; y < res_y_; ++y)
for (int z = 0; z < res_z_; ++z)
{
- Eigen::Vector3d point;
- point[0] = min_p_[0] + (max_p_[0] - min_p_[0]) * float (x) / float (res_x_);
- point[1] = min_p_[1] + (max_p_[1] - min_p_[1]) * float (y) / float (res_y_);
- point[2] = min_p_[2] + (max_p_[2] - min_p_[2]) * float (z) / float (res_z_);
+ const Eigen::Vector3f point_f = (size_voxel_ * Eigen::Array3f (x, y, z)
+ + lower_boundary_).matrix ();
+ const Eigen::Vector3d point = point_f.cast<double> ();
double f = 0.0;
std::vector<double>::const_iterator w_it (weights.begin());
#include <pcl/common/centroid.h>
#include <pcl/common/eigen.h>
#include <pcl/common/geometry.h>
+#include <boost/bind.hpp>
#ifdef _OPENMP
#include <omp.h>
normals_->points.clear ();
}
-
// Copy the header
output.header = input_->header;
output.width = output.height = 0;
if (!initCompute ())
return;
-
// Initialize the spatial locator
if (!tree_)
{
boost::uniform_real<float> uniform_distrib (-tmp, tmp);
rng_uniform_distribution_.reset (new boost::variate_generator<boost::mt19937&, boost::uniform_real<float> > (rng_alg_, uniform_distrib));
- mls_results_.resize (1); // Need to have a reference to a single dummy result.
-
break;
}
case (VOXEL_GRID_DILATION):
case (DISTINCT_CLOUD):
- {
- mls_results_.resize (input_->size ());
- break;
- }
+ {
+ if (!cache_mls_results_)
+ PCL_WARN ("The cache mls results is forced when using upsampling method VOXEL_GRID_DILATION or DISTINCT_CLOUD.\n");
+
+ cache_mls_results_ = true;
+ break;
+ }
default:
- {
- mls_results_.resize (1); // Need to have a reference to a single dummy result.
- break;
- }
+ break;
+ }
+
+ if (cache_mls_results_)
+ {
+ mls_results_.resize (input_->size ());
+ }
+ else
+ {
+ mls_results_.resize (1); // Need to have a reference to a single dummy result.
}
// Perform the actual surface reconstruction
template <typename PointInT, typename PointOutT> void
pcl::MovingLeastSquares<PointInT, PointOutT>::computeMLSPointNormal (int index,
const std::vector<int> &nn_indices,
- std::vector<float> &nn_sqr_dists,
PointCloudOut &projected_points,
NormalCloud &projected_points_normals,
PointIndices &corresponding_input_indices,
// Note: this method is const because it needs to be thread-safe
// (MovingLeastSquaresOMP calls it from multiple threads)
- // Compute the plane coefficients
- EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
- Eigen::Vector4d xyz_centroid;
-
- // Estimate the XYZ centroid
- pcl::compute3DCentroid (*input_, nn_indices, xyz_centroid);
-
- // Compute the 3x3 covariance matrix
- pcl::computeCovarianceMatrix (*input_, nn_indices, xyz_centroid, covariance_matrix);
- EIGEN_ALIGN16 Eigen::Vector3d::Scalar eigen_value;
- EIGEN_ALIGN16 Eigen::Vector3d eigen_vector;
- Eigen::Vector4d model_coefficients;
- pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector);
- model_coefficients.head<3> ().matrix () = eigen_vector;
- model_coefficients[3] = 0;
- model_coefficients[3] = -1 * model_coefficients.dot (xyz_centroid);
-
- // Projected query point
- Eigen::Vector3d point = input_->points[index].getVector3fMap ().template cast<double> ();
- double distance = point.dot (model_coefficients.head<3> ()) + model_coefficients[3];
- point -= distance * model_coefficients.head<3> ();
-
- float curvature = static_cast<float> (covariance_matrix.trace ());
- // Compute the curvature surface change
- if (curvature != 0)
- curvature = fabsf (float (eigen_value / double (curvature)));
-
-
- // Get a copy of the plane normal easy access
- Eigen::Vector3d plane_normal = model_coefficients.head<3> ();
- // Vector in which the polynomial coefficients will be put
- Eigen::VectorXd c_vec;
- // Local coordinate system (Darboux frame)
- Eigen::Vector3d v_axis (0.0f, 0.0f, 0.0f), u_axis (0.0f, 0.0f, 0.0f);
-
-
-
- // Perform polynomial fit to update point and normal
- ////////////////////////////////////////////////////
- if (polynomial_fit_ && static_cast<int> (nn_indices.size ()) >= nr_coeff_)
- {
- // Update neighborhood, since point was projected, and computing relative
- // positions. Note updating only distances for the weights for speed
- std::vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d> > de_meaned (nn_indices.size ());
- for (size_t ni = 0; ni < nn_indices.size (); ++ni)
- {
- de_meaned[ni][0] = input_->points[nn_indices[ni]].x - point[0];
- de_meaned[ni][1] = input_->points[nn_indices[ni]].y - point[1];
- de_meaned[ni][2] = input_->points[nn_indices[ni]].z - point[2];
- nn_sqr_dists[ni] = static_cast<float> (de_meaned[ni].dot (de_meaned[ni]));
- }
-
- // Allocate matrices and vectors to hold the data used for the polynomial fit
- Eigen::VectorXd weight_vec (nn_indices.size ());
- Eigen::MatrixXd P (nr_coeff_, nn_indices.size ());
- Eigen::VectorXd f_vec (nn_indices.size ());
- Eigen::MatrixXd P_weight; // size will be (nr_coeff_, nn_indices.size ());
- Eigen::MatrixXd P_weight_Pt (nr_coeff_, nr_coeff_);
-
- // Get local coordinate system (Darboux frame)
- v_axis = plane_normal.unitOrthogonal ();
- u_axis = plane_normal.cross (v_axis);
-
- // Go through neighbors, transform them in the local coordinate system,
- // save height and the evaluation of the polynome's terms
- double u_coord, v_coord, u_pow, v_pow;
- for (size_t ni = 0; ni < nn_indices.size (); ++ni)
- {
- // (Re-)compute weights
- weight_vec (ni) = exp (-nn_sqr_dists[ni] / sqr_gauss_param_);
-
- // Transforming coordinates
- u_coord = de_meaned[ni].dot (u_axis);
- v_coord = de_meaned[ni].dot (v_axis);
- f_vec (ni) = de_meaned[ni].dot (plane_normal);
-
- // Compute the polynomial's terms at the current point
- int j = 0;
- u_pow = 1;
- for (int ui = 0; ui <= order_; ++ui)
- {
- v_pow = 1;
- for (int vi = 0; vi <= order_ - ui; ++vi)
- {
- P (j++, ni) = u_pow * v_pow;
- v_pow *= v_coord;
- }
- u_pow *= u_coord;
- }
- }
-
- // Computing coefficients
- P_weight = P * weight_vec.asDiagonal ();
- P_weight_Pt = P_weight * P.transpose ();
- c_vec = P_weight * f_vec;
- P_weight_Pt.llt ().solveInPlace (c_vec);
- }
+ mls_result.computeMLSSurface<PointInT> (*input_, index, nn_indices, search_radius_, order_);
switch (upsample_method_)
{
case (NONE):
{
- Eigen::Vector3d normal = plane_normal;
-
- if (polynomial_fit_ && static_cast<int> (nn_indices.size ()) >= nr_coeff_ && pcl_isfinite (c_vec[0]))
- {
- point += (c_vec[0] * plane_normal);
-
- // Compute tangent vectors using the partial derivates evaluated at (0,0) which is c_vec[order_+1] and c_vec[1]
- if (compute_normals_)
- normal = plane_normal - c_vec[order_ + 1] * u_axis - c_vec[1] * v_axis;
- }
-
- PointOutT aux;
- aux.x = static_cast<float> (point[0]);
- aux.y = static_cast<float> (point[1]);
- aux.z = static_cast<float> (point[2]);
- projected_points.push_back (aux);
-
- if (compute_normals_)
- {
- pcl::Normal aux_normal;
- aux_normal.normal_x = static_cast<float> (normal[0]);
- aux_normal.normal_y = static_cast<float> (normal[1]);
- aux_normal.normal_z = static_cast<float> (normal[2]);
- aux_normal.curvature = curvature;
- projected_points_normals.push_back (aux_normal);
- corresponding_input_indices.indices.push_back (index);
- }
-
+ MLSResult::MLSProjectionResults proj = mls_result.projectQueryPoint (projection_method_, nr_coeff_);
+ addProjectedPointNormal (index, proj.point, proj.normal, mls_result.curvature, projected_points, projected_points_normals, corresponding_input_indices);
break;
}
// Uniformly sample a circle around the query point using the radius and step parameters
for (float u_disp = -static_cast<float> (upsampling_radius_); u_disp <= upsampling_radius_; u_disp += static_cast<float> (upsampling_step_))
for (float v_disp = -static_cast<float> (upsampling_radius_); v_disp <= upsampling_radius_; v_disp += static_cast<float> (upsampling_step_))
- if (u_disp*u_disp + v_disp*v_disp < upsampling_radius_*upsampling_radius_)
+ if (u_disp * u_disp + v_disp * v_disp < upsampling_radius_ * upsampling_radius_)
{
- PointOutT projected_point;
- pcl::Normal projected_normal;
- projectPointToMLSSurface (u_disp, v_disp, u_axis, v_axis, plane_normal, point,
- curvature, c_vec,
- static_cast<int> (nn_indices.size ()),
- projected_point, projected_normal);
-
- projected_points.push_back (projected_point);
- corresponding_input_indices.indices.push_back (index);
- if (compute_normals_)
- projected_points_normals.push_back (projected_normal);
+ MLSResult::MLSProjectionResults proj = mls_result.projectPointSimpleToPolynomialSurface (u_disp, v_disp);
+ addProjectedPointNormal (index, proj.point, proj.normal, mls_result.curvature, projected_points, projected_points_normals, corresponding_input_indices);
}
break;
}
if (num_points_to_add <= 0)
{
// Just add the current point
- Eigen::Vector3d normal = plane_normal;
- if (polynomial_fit_ && static_cast<int> (nn_indices.size ()) >= nr_coeff_ && pcl_isfinite (c_vec[0]))
- {
- // Projection onto MLS surface along Darboux normal to the height at (0,0)
- point += (c_vec[0] * plane_normal);
- // Compute tangent vectors using the partial derivates evaluated at (0,0) which is c_vec[order_+1] and c_vec[1]
- if (compute_normals_)
- normal = plane_normal - c_vec[order_ + 1] * u_axis - c_vec[1] * v_axis;
- }
- PointOutT aux;
- aux.x = static_cast<float> (point[0]);
- aux.y = static_cast<float> (point[1]);
- aux.z = static_cast<float> (point[2]);
- projected_points.push_back (aux);
- corresponding_input_indices.indices.push_back (index);
-
- if (compute_normals_)
- {
- pcl::Normal aux_normal;
- aux_normal.normal_x = static_cast<float> (normal[0]);
- aux_normal.normal_y = static_cast<float> (normal[1]);
- aux_normal.normal_z = static_cast<float> (normal[2]);
- aux_normal.curvature = curvature;
- projected_points_normals.push_back (aux_normal);
- }
+ MLSResult::MLSProjectionResults proj = mls_result.projectQueryPoint (projection_method_, nr_coeff_);
+ addProjectedPointNormal (index, proj.point, proj.normal, mls_result.curvature, projected_points, projected_points_normals, corresponding_input_indices);
}
else
{
// Sample the local plane
for (int num_added = 0; num_added < num_points_to_add;)
{
- float u_disp = (*rng_uniform_distribution_) (),
- v_disp = (*rng_uniform_distribution_) ();
+ double u = (*rng_uniform_distribution_) ();
+ double v = (*rng_uniform_distribution_) ();
+
// Check if inside circle; if not, try another coin flip
- if (u_disp * u_disp + v_disp * v_disp > search_radius_ * search_radius_/4)
+ if (u * u + v * v > search_radius_ * search_radius_ / 4)
continue;
+ MLSResult::MLSProjectionResults proj;
+ if (order_ > 1 && mls_result.num_neighbors >= 5 * nr_coeff_)
+ proj = mls_result.projectPointSimpleToPolynomialSurface (u, v);
+ else
+ proj = mls_result.projectPointToMLSPlane (u, v);
- PointOutT projected_point;
- pcl::Normal projected_normal;
- projectPointToMLSSurface (u_disp, v_disp, u_axis, v_axis, plane_normal, point,
- curvature, c_vec,
- static_cast<int> (nn_indices.size ()),
- projected_point, projected_normal);
-
- projected_points.push_back (projected_point);
- corresponding_input_indices.indices.push_back (index);
- if (compute_normals_)
- projected_points_normals.push_back (projected_normal);
+ addProjectedPointNormal (index, proj.point, proj.normal, mls_result.curvature, projected_points, projected_points_normals, corresponding_input_indices);
- num_added ++;
+ num_added++;
}
}
break;
}
- case (VOXEL_GRID_DILATION):
- case (DISTINCT_CLOUD):
- {
- // Take all point pairs and sample space between them in a grid-fashion
- // \note consider only point pairs with increasing indices
- mls_result = MLSResult (point, plane_normal, u_axis, v_axis, c_vec, static_cast<int> (nn_indices.size ()), curvature);
+ default:
break;
- }
}
}
-//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
-pcl::MovingLeastSquares<PointInT, PointOutT>::projectPointToMLSSurface (float &u_disp, float &v_disp,
- Eigen::Vector3d &u, Eigen::Vector3d &v,
- Eigen::Vector3d &plane_normal,
- Eigen::Vector3d &mean,
- float &curvature,
- Eigen::VectorXd &c_vec,
- int num_neighbors,
- PointOutT &result_point,
- pcl::Normal &result_normal) const
+pcl::MovingLeastSquares<PointInT, PointOutT>::addProjectedPointNormal (int index,
+ const Eigen::Vector3d &point,
+ const Eigen::Vector3d &normal,
+ double curvature,
+ PointCloudOut &projected_points,
+ NormalCloud &projected_points_normals,
+ PointIndices &corresponding_input_indices) const
{
- double n_disp = 0.0f;
- double d_u = 0.0f, d_v = 0.0f;
+ PointOutT aux;
+ aux.x = static_cast<float> (point[0]);
+ aux.y = static_cast<float> (point[1]);
+ aux.z = static_cast<float> (point[2]);
- // HARDCODED 5*nr_coeff_ to guarantee that the computed polynomial had a proper point set basis
- if (polynomial_fit_ && num_neighbors >= 5*nr_coeff_ && pcl_isfinite (c_vec[0]))
- {
- // Compute the displacement along the normal using the fitted polynomial
- // and compute the partial derivatives needed for estimating the normal
- int j = 0;
- float u_pow = 1.0f, v_pow = 1.0f, u_pow_prev = 1.0f, v_pow_prev = 1.0f;
- for (int ui = 0; ui <= order_; ++ui)
- {
- v_pow = 1;
- for (int vi = 0; vi <= order_ - ui; ++vi)
- {
- // Compute displacement along normal
- n_disp += u_pow * v_pow * c_vec[j++];
+ // Copy additional point information if available
+ copyMissingFields (input_->points[index], aux);
- // Compute partial derivatives
- if (ui >= 1)
- d_u += c_vec[j-1] * ui * u_pow_prev * v_pow;
- if (vi >= 1)
- d_v += c_vec[j-1] * vi * u_pow * v_pow_prev;
+ projected_points.push_back (aux);
+ corresponding_input_indices.indices.push_back (index);
- v_pow_prev = v_pow;
- v_pow *= v_disp;
- }
- u_pow_prev = u_pow;
- u_pow *= u_disp;
- }
+ if (compute_normals_)
+ {
+ pcl::Normal aux_normal;
+ aux_normal.normal_x = static_cast<float> (normal[0]);
+ aux_normal.normal_y = static_cast<float> (normal[1]);
+ aux_normal.normal_z = static_cast<float> (normal[2]);
+ aux_normal.curvature = curvature;
+ projected_points_normals.push_back (aux_normal);
}
-
- result_point.x = static_cast<float> (mean[0] + u[0] * u_disp + v[0] * v_disp + plane_normal[0] * n_disp);
- result_point.y = static_cast<float> (mean[1] + u[1] * u_disp + v[1] * v_disp + plane_normal[1] * n_disp);
- result_point.z = static_cast<float> (mean[2] + u[2] * u_disp + v[2] * v_disp + plane_normal[2] * n_disp);
-
- Eigen::Vector3d normal = plane_normal - d_u * u - d_v * v;
- normal.normalize ();
-
- result_normal.normal_x = static_cast<float> (normal[0]);
- result_normal.normal_y = static_cast<float> (normal[1]);
- result_normal.normal_z = static_cast<float> (normal[2]);
- result_normal.curvature = curvature;
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Compute the number of coefficients
nr_coeff_ = (order_ + 1) * (order_ + 2) / 2;
- // Allocate enough space to hold the results of nearest neighbor searches
- // \note resize is irrelevant for a radiusSearch ().
- std::vector<int> nn_indices;
- std::vector<float> nn_sqr_dists;
-
- size_t mls_result_index = 0;
-
- // For all points
- for (size_t cp = 0; cp < indices_->size (); ++cp)
- {
- // Get the initial estimates of point positions and their neighborhoods
- if (!searchForNeighbors ((*indices_)[cp], nn_indices, nn_sqr_dists))
- continue;
-
-
- // Check the number of nearest neighbors for normal estimation (and later
- // for polynomial fit as well)
- if (nn_indices.size () < 3)
- continue;
-
-
- PointCloudOut projected_points;
- NormalCloud projected_points_normals;
- // Get a plane approximating the local surface's tangent and project point onto it
- int index = (*indices_)[cp];
-
- if (upsample_method_ == VOXEL_GRID_DILATION || upsample_method_ == DISTINCT_CLOUD)
- mls_result_index = index; // otherwise we give it a dummy location.
-
- computeMLSPointNormal (index, nn_indices, nn_sqr_dists, projected_points, projected_points_normals, *corresponding_input_indices_, mls_results_[mls_result_index]);
-
-
- // Copy all information from the input cloud to the output points (not doing any interpolation)
- for (size_t pp = 0; pp < projected_points.size (); ++pp)
- copyMissingFields (input_->points[(*indices_)[cp]], projected_points[pp]);
-
-
- // Append projected points to output
- output.insert (output.end (), projected_points.begin (), projected_points.end ());
- if (compute_normals_)
- normals_->insert (normals_->end (), projected_points_normals.begin (), projected_points_normals.end ());
- }
-
- // Perform the distinct-cloud or voxel-grid upsampling
- performUpsampling (output);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////
#ifdef _OPENMP
-template <typename PointInT, typename PointOutT> void
-pcl::MovingLeastSquaresOMP<PointInT, PointOutT>::performProcessing (PointCloudOut &output)
-{
- // Compute the number of coefficients
- nr_coeff_ = (order_ + 1) * (order_ + 2) / 2;
-
// (Maximum) number of threads
- unsigned int threads = threads_ == 0 ? 1 : threads_;
-
+ const unsigned int threads = threads_ == 0 ? 1 : threads_;
// Create temporaries for each thread in order to avoid synchronization
typename PointCloudOut::CloudVectorType projected_points (threads);
typename NormalCloud::CloudVectorType projected_points_normals (threads);
std::vector<PointIndices> corresponding_input_indices (threads);
+#endif
// For all points
+#ifdef _OPENMP
#pragma omp parallel for schedule (dynamic,1000) num_threads (threads)
+#endif
for (int cp = 0; cp < static_cast<int> (indices_->size ()); ++cp)
{
// Allocate enough space to hold the results of nearest neighbor searches
std::vector<float> nn_sqr_dists;
// Get the initial estimates of point positions and their neighborhoods
- if (this->searchForNeighbors ((*indices_)[cp], nn_indices, nn_sqr_dists))
+ if (searchForNeighbors ((*indices_)[cp], nn_indices, nn_sqr_dists))
{
- // Check the number of nearest neighbors for normal estimation (and later
- // for polynomial fit as well)
+ // Check the number of nearest neighbors for normal estimation (and later for polynomial fit as well)
if (nn_indices.size () >= 3)
{
// This thread's ID (range 0 to threads-1)
- int tn = omp_get_thread_num ();
-
+#ifdef _OPENMP
+ const int tn = omp_get_thread_num ();
// Size of projected points before computeMLSPointNormal () adds points
size_t pp_size = projected_points[tn].size ();
+#else
+ PointCloudOut projected_points;
+ NormalCloud projected_points_normals;
+#endif
// Get a plane approximating the local surface's tangent and project point onto it
- int index = (*indices_)[cp];
+ const int index = (*indices_)[cp];
+
size_t mls_result_index = 0;
-
- if (upsample_method_ == VOXEL_GRID_DILATION || upsample_method_ == DISTINCT_CLOUD)
+ if (cache_mls_results_)
mls_result_index = index; // otherwise we give it a dummy location.
-
- this->computeMLSPointNormal (index, nn_indices, nn_sqr_dists, projected_points[tn], projected_points_normals[tn], corresponding_input_indices[tn], this->mls_results_[mls_result_index]);
+
+#ifdef _OPENMP
+ computeMLSPointNormal (index, nn_indices, projected_points[tn], projected_points_normals[tn], corresponding_input_indices[tn], mls_results_[mls_result_index]);
// Copy all information from the input cloud to the output points (not doing any interpolation)
for (size_t pp = pp_size; pp < projected_points[tn].size (); ++pp)
- this->copyMissingFields (input_->points[(*indices_)[cp]], projected_points[tn][pp]);
- }
- }
- }
+ copyMissingFields (input_->points[(*indices_)[cp]], projected_points[tn][pp]);
+#else
+ computeMLSPointNormal (index, nn_indices, projected_points, projected_points_normals, *corresponding_input_indices_, mls_results_[mls_result_index]);
+ // Append projected points to output
+ output.insert (output.end (), projected_points.begin (), projected_points.end ());
+ if (compute_normals_)
+ normals_->insert (normals_->end (), projected_points_normals.begin (), projected_points_normals.end ());
+#endif
+ }
+ }
+ }
+#ifdef _OPENMP
// Combine all threads' results into the output vectors
for (unsigned int tn = 0; tn < threads; ++tn)
{
output.insert (output.end (), projected_points[tn].begin (), projected_points[tn].end ());
corresponding_input_indices_->indices.insert (corresponding_input_indices_->indices.end (),
- corresponding_input_indices[tn].indices.begin (), corresponding_input_indices[tn].indices.end ());
+ corresponding_input_indices[tn].indices.begin (), corresponding_input_indices[tn].indices.end ());
if (compute_normals_)
normals_->insert (normals_->end (), projected_points_normals[tn].begin (), projected_points_normals[tn].end ());
}
+#endif
// Perform the distinct-cloud or voxel-grid upsampling
- this->performUpsampling (output);
+ performUpsampling (output);
}
-#endif
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT> void
pcl::MovingLeastSquares<PointInT, PointOutT>::performUpsampling (PointCloudOut &output)
{
+
if (upsample_method_ == DISTINCT_CLOUD)
{
+ corresponding_input_indices_.reset (new PointIndices);
for (size_t dp_i = 0; dp_i < distinct_cloud_->size (); ++dp_i) // dp_i = distinct_point_i
{
// Distinct cloud may have nan points, skip them
continue;
Eigen::Vector3d add_point = distinct_cloud_->points[dp_i].getVector3fMap ().template cast<double> ();
-
- float u_disp = static_cast<float> ((add_point - mls_results_[input_index].mean).dot (mls_results_[input_index].u_axis)),
- v_disp = static_cast<float> ((add_point - mls_results_[input_index].mean).dot (mls_results_[input_index].v_axis));
-
- PointOutT result_point;
- pcl::Normal result_normal;
- projectPointToMLSSurface (u_disp, v_disp,
- mls_results_[input_index].u_axis, mls_results_[input_index].v_axis,
- mls_results_[input_index].plane_normal,
- mls_results_[input_index].mean,
- mls_results_[input_index].curvature,
- mls_results_[input_index].c_vec,
- mls_results_[input_index].num_neighbors,
- result_point, result_normal);
-
- // Copy additional point information if available
- copyMissingFields (input_->points[input_index], result_point);
-
- // Store the id of the original point
- corresponding_input_indices_->indices.push_back (input_index);
-
- output.push_back (result_point);
- if (compute_normals_)
- normals_->push_back (result_normal);
+ MLSResult::MLSProjectionResults proj = mls_results_[input_index].projectPoint (add_point, projection_method_, 5 * nr_coeff_);
+ addProjectedPointNormal (input_index, proj.point, proj.normal, mls_results_[input_index].curvature, output, *normals_, *corresponding_input_indices_);
}
}
// Then, project the newly obtained points to the MLS surface
if (upsample_method_ == VOXEL_GRID_DILATION)
{
+ corresponding_input_indices_.reset (new PointIndices);
+
MLSVoxelGrid voxel_grid (input_, indices_, voxel_size_);
for (int iteration = 0; iteration < dilation_iteration_num_; ++iteration)
voxel_grid.dilate ();
continue;
Eigen::Vector3d add_point = p.getVector3fMap ().template cast<double> ();
- float u_disp = static_cast<float> ((add_point - mls_results_[input_index].mean).dot (mls_results_[input_index].u_axis)),
- v_disp = static_cast<float> ((add_point - mls_results_[input_index].mean).dot (mls_results_[input_index].v_axis));
-
- PointOutT result_point;
- pcl::Normal result_normal;
- projectPointToMLSSurface (u_disp, v_disp,
- mls_results_[input_index].u_axis, mls_results_[input_index].v_axis,
- mls_results_[input_index].plane_normal,
- mls_results_[input_index].mean,
- mls_results_[input_index].curvature,
- mls_results_[input_index].c_vec,
- mls_results_[input_index].num_neighbors,
- result_point, result_normal);
-
- // Copy additional point information if available
- copyMissingFields (input_->points[input_index], result_point);
-
- // Store the id of the original point
- corresponding_input_indices_->indices.push_back (input_index);
-
- output.push_back (result_point);
-
- if (compute_normals_)
- normals_->push_back (result_normal);
+ MLSResult::MLSProjectionResults proj = mls_results_[input_index].projectPoint (add_point, projection_method_, 5 * nr_coeff_);
+ addProjectedPointNormal (input_index, proj.point, proj.normal, mls_results_[input_index].curvature, output, *normals_, *corresponding_input_indices_);
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////
-template <typename PointInT, typename PointOutT>
-pcl::MovingLeastSquares<PointInT, PointOutT>::MLSResult::MLSResult (const Eigen::Vector3d &a_mean,
- const Eigen::Vector3d &a_plane_normal,
- const Eigen::Vector3d &a_u,
- const Eigen::Vector3d &a_v,
- const Eigen::VectorXd a_c_vec,
- const int a_num_neighbors,
- const float &a_curvature) :
- mean (a_mean), plane_normal (a_plane_normal), u_axis (a_u), v_axis (a_v), c_vec (a_c_vec), num_neighbors (a_num_neighbors),
- curvature (a_curvature), valid (true)
+pcl::MLSResult::MLSResult (const Eigen::Vector3d &a_query_point,
+ const Eigen::Vector3d &a_mean,
+ const Eigen::Vector3d &a_plane_normal,
+ const Eigen::Vector3d &a_u,
+ const Eigen::Vector3d &a_v,
+ const Eigen::VectorXd &a_c_vec,
+ const int a_num_neighbors,
+ const float a_curvature,
+ const int a_order) :
+ query_point (a_query_point), mean (a_mean), plane_normal (a_plane_normal), u_axis (a_u), v_axis (a_v), c_vec (a_c_vec), num_neighbors (a_num_neighbors),
+ curvature (a_curvature), order (a_order), valid (true)
+{}
+
+void
+pcl::MLSResult::getMLSCoordinates (const Eigen::Vector3d &pt, double &u, double &v, double &w) const
{
+ Eigen::Vector3d delta = pt - mean;
+ u = delta.dot (u_axis);
+ v = delta.dot (v_axis);
+ w = delta.dot (plane_normal);
+}
+
+void
+pcl::MLSResult::getMLSCoordinates (const Eigen::Vector3d &pt, double &u, double &v) const
+{
+ Eigen::Vector3d delta = pt - mean;
+ u = delta.dot (u_axis);
+ v = delta.dot (v_axis);
+}
+
+double
+pcl::MLSResult::getPolynomialValue (const double u, const double v) const
+{
+ // Compute the polynomial's terms at the current point
+ // Example for second order: z = a + b*y + c*y^2 + d*x + e*x*y + f*x^2
+ double u_pow, v_pow, result;
+ int j = 0;
+ u_pow = 1;
+ result = 0;
+ for (int ui = 0; ui <= order; ++ui)
+ {
+ v_pow = 1;
+ for (int vi = 0; vi <= order - ui; ++vi)
+ {
+ result += c_vec[j++] * u_pow * v_pow;
+ v_pow *= v;
+ }
+ u_pow *= u;
+ }
+
+ return (result);
+}
+
+pcl::MLSResult::PolynomialPartialDerivative
+pcl::MLSResult::getPolynomialPartialDerivative (const double u, const double v) const
+{
+ // Compute the displacement along the normal using the fitted polynomial
+ // and compute the partial derivatives needed for estimating the normal
+ PolynomialPartialDerivative d;
+ Eigen::VectorXd u_pow (order + 2), v_pow (order + 2);
+ int j = 0;
+
+ d.z = d.z_u = d.z_v = d.z_uu = d.z_vv = d.z_uv = 0;
+ u_pow (0) = v_pow (0) = 1;
+ for (int ui = 0; ui <= order; ++ui)
+ {
+ for (int vi = 0; vi <= order - ui; ++vi)
+ {
+ // Compute displacement along normal
+ d.z += u_pow (ui) * v_pow (vi) * c_vec[j];
+
+ // Compute partial derivatives
+ if (ui >= 1)
+ d.z_u += c_vec[j] * ui * u_pow (ui - 1) * v_pow (vi);
+
+ if (vi >= 1)
+ d.z_v += c_vec[j] * vi * u_pow (ui) * v_pow (vi - 1);
+
+ if (ui >= 1 && vi >= 1)
+ d.z_uv += c_vec[j] * ui * u_pow (ui - 1) * vi * v_pow (vi - 1);
+
+ if (ui >= 2)
+ d.z_uu += c_vec[j] * ui * (ui - 1) * u_pow (ui - 2) * v_pow (vi);
+
+ if (vi >= 2)
+ d.z_vv += c_vec[j] * vi * (vi - 1) * u_pow (ui) * v_pow (vi - 2);
+
+ if (ui == 0)
+ v_pow (vi + 1) = v_pow (vi) * v;
+
+ ++j;
+ }
+ u_pow (ui + 1) = u_pow (ui) * u;
+ }
+
+ return (d);
+}
+
+Eigen::Vector2f
+pcl::MLSResult::calculatePrincipleCurvatures (const double u, const double v) const
+{
+ Eigen::Vector2f k (1e-5, 1e-5);
+
+ // Note: this use the Monge Patch to derive the Gaussian curvature and Mean Curvature found here http://mathworld.wolfram.com/MongePatch.html
+ // Then:
+ // k1 = H + sqrt(H^2 - K)
+ // k1 = H - sqrt(H^2 - K)
+ if (order > 1 && c_vec.size () >= (order + 1) * (order + 2) / 2 && pcl_isfinite (c_vec[0]))
+ {
+ PolynomialPartialDerivative d = getPolynomialPartialDerivative (u, v);
+ double Z = 1 + d.z_u * d.z_u + d.z_v * d.z_v;
+ double Zlen = std::sqrt (Z);
+ double K = (d.z_uu * d.z_vv - d.z_uv * d.z_uv) / (Z * Z);
+ double H = ((1.0 + d.z_v * d.z_v) * d.z_uu - 2.0 * d.z_u * d.z_v * d.z_uv + (1.0 + d.z_u * d.z_u) * d.z_vv) / (2.0 * Zlen * Zlen * Zlen);
+ double disc2 = H * H - K;
+ assert (disc2 >= 0.0);
+ double disc = std::sqrt (disc2);
+ k[0] = H + disc;
+ k[1] = H - disc;
+
+ if (std::abs (k[0]) > std::abs (k[1])) std::swap (k[0], k[1]);
+ }
+ else
+ {
+ PCL_ERROR ("No Polynomial fit data, unable to calculate the principle curvatures!\n");
+ }
+
+ return (k);
+}
+
+pcl::MLSResult::MLSProjectionResults
+pcl::MLSResult::projectPointOrthogonalToPolynomialSurface (const double u, const double v, const double w) const
+{
+ double gu = u;
+ double gv = v;
+ double gw = 0;
+
+ MLSProjectionResults result;
+ result.normal = plane_normal;
+ if (order > 1 && c_vec.size () >= (order + 1) * (order + 2) / 2 && pcl_isfinite (c_vec[0]))
+ {
+ PolynomialPartialDerivative d = getPolynomialPartialDerivative (gu, gv);
+ gw = d.z;
+ double err_total;
+ double dist1 = std::abs (gw - w);
+ double dist2;
+ do
+ {
+ double e1 = (gu - u) + d.z_u * gw - d.z_u * w;
+ double e2 = (gv - v) + d.z_v * gw - d.z_v * w;
+
+ double F1u = 1 + d.z_uu * gw + d.z_u * d.z_u - d.z_uu * w;
+ double F1v = d.z_uv * gw + d.z_u * d.z_v - d.z_uv * w;
+
+ double F2u = d.z_uv * gw + d.z_v * d.z_u - d.z_uv * w;
+ double F2v = 1 + d.z_vv * gw + d.z_v * d.z_v - d.z_vv * w;
+
+ Eigen::MatrixXd J (2, 2);
+ J (0, 0) = F1u;
+ J (0, 1) = F1v;
+ J (1, 0) = F2u;
+ J (1, 1) = F2v;
+
+ Eigen::Vector2d err (e1, e2);
+ Eigen::Vector2d update = J.inverse () * err;
+ gu -= update (0);
+ gv -= update (1);
+
+ d = getPolynomialPartialDerivative (gu, gv);
+ gw = d.z;
+ dist2 = std::sqrt ((gu - u) * (gu - u) + (gv - v) * (gv - v) + (gw - w) * (gw - w));
+
+ err_total = std::sqrt (e1 * e1 + e2 * e2);
+
+ } while (err_total > 1e-8 && dist2 < dist1);
+
+ if (dist2 > dist1) // the optimization was diverging reset the coordinates for simple projection
+ {
+ gu = u;
+ gv = v;
+ d = getPolynomialPartialDerivative (u, v);
+ gw = d.z;
+ }
+
+ result.u = gu;
+ result.v = gv;
+ result.normal -= (d.z_u * u_axis + d.z_v * v_axis);
+ result.normal.normalize ();
+ }
+
+ result.point = mean + gu * u_axis + gv * v_axis + gw * plane_normal;
+
+ return (result);
+}
+
+pcl::MLSResult::MLSProjectionResults
+pcl::MLSResult::projectPointToMLSPlane (const double u, const double v) const
+{
+ MLSProjectionResults result;
+ result.u = u;
+ result.v = v;
+ result.normal = plane_normal;
+ result.point = mean + u * u_axis + v * v_axis;
+
+ return (result);
+}
+
+pcl::MLSResult::MLSProjectionResults
+pcl::MLSResult::projectPointSimpleToPolynomialSurface (const double u, const double v) const
+{
+ MLSProjectionResults result;
+ double w = 0;
+
+ result.u = u;
+ result.v = v;
+ result.normal = plane_normal;
+
+ if (order > 1 && c_vec.size () >= (order + 1) * (order + 2) / 2 && pcl_isfinite (c_vec[0]))
+ {
+ PolynomialPartialDerivative d = getPolynomialPartialDerivative (u, v);
+ w = d.z;
+ result.normal -= (d.z_u * u_axis + d.z_v * v_axis);
+ result.normal.normalize ();
+ }
+
+ result.point = mean + u * u_axis + v * v_axis + w * plane_normal;
+
+ return (result);
+}
+
+pcl::MLSResult::MLSProjectionResults
+pcl::MLSResult::projectPoint (const Eigen::Vector3d &pt, ProjectionMethod method, int required_neighbors) const
+{
+ double u, v, w;
+ getMLSCoordinates (pt, u, v, w);
+
+ MLSResult::MLSProjectionResults proj;
+ if (order > 1 && num_neighbors >= required_neighbors && pcl_isfinite (c_vec[0]) && method != NONE)
+ {
+ if (method == ORTHOGONAL)
+ proj = projectPointOrthogonalToPolynomialSurface (u, v, w);
+ else // SIMPLE
+ proj = projectPointSimpleToPolynomialSurface (u, v);
+ }
+ else
+ {
+ proj = projectPointToMLSPlane (u, v);
+ }
+
+ return (proj);
+}
+
+pcl::MLSResult::MLSProjectionResults
+pcl::MLSResult::projectQueryPoint (ProjectionMethod method, int required_neighbors) const
+{
+ MLSResult::MLSProjectionResults proj;
+ if (order > 1 && num_neighbors >= required_neighbors && pcl_isfinite (c_vec[0]) && method != NONE)
+ {
+ if (method == ORTHOGONAL)
+ {
+ double u, v, w;
+ getMLSCoordinates (query_point, u, v, w);
+ proj = projectPointOrthogonalToPolynomialSurface (u, v, w);
+ }
+ else // SIMPLE
+ {
+ // Projection onto MLS surface along Darboux normal to the height at (0,0)
+ proj.point = mean + (c_vec[0] * plane_normal);
+
+ // Compute tangent vectors using the partial derivates evaluated at (0,0) which is c_vec[order_+1] and c_vec[1]
+ proj.normal = plane_normal - c_vec[order + 1] * u_axis - c_vec[1] * v_axis;
+ proj.normal.normalize ();
+ }
+ }
+ else
+ {
+ proj.normal = plane_normal;
+ proj.point = mean;
+ }
+
+ return (proj);
+}
+
+template <typename PointT> void
+pcl::MLSResult::computeMLSSurface (const pcl::PointCloud<PointT> &cloud,
+ int index,
+ const std::vector<int> &nn_indices,
+ double search_radius,
+ int polynomial_order,
+ boost::function<double(const double)> weight_func)
+{
+ // Compute the plane coefficients
+ EIGEN_ALIGN16 Eigen::Matrix3d covariance_matrix;
+ Eigen::Vector4d xyz_centroid;
+
+ // Estimate the XYZ centroid
+ pcl::compute3DCentroid (cloud, nn_indices, xyz_centroid);
+
+ // Compute the 3x3 covariance matrix
+ pcl::computeCovarianceMatrix (cloud, nn_indices, xyz_centroid, covariance_matrix);
+ EIGEN_ALIGN16 Eigen::Vector3d::Scalar eigen_value;
+ EIGEN_ALIGN16 Eigen::Vector3d eigen_vector;
+ Eigen::Vector4d model_coefficients (0, 0, 0, 0);
+ pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector);
+ model_coefficients.head<3> ().matrix () = eigen_vector;
+ model_coefficients[3] = -1 * model_coefficients.dot (xyz_centroid);
+
+ // Projected query point
+ valid = true;
+ query_point = cloud.points[index].getVector3fMap ().template cast<double> ();
+ double distance = query_point.dot (model_coefficients.head<3> ()) + model_coefficients[3];
+ mean = query_point - distance * model_coefficients.head<3> ();
+
+ curvature = covariance_matrix.trace ();
+ // Compute the curvature surface change
+ if (curvature != 0)
+ curvature = std::abs (eigen_value / curvature);
+
+ // Get a copy of the plane normal easy access
+ plane_normal = model_coefficients.head<3> ();
+
+ // Local coordinate system (Darboux frame)
+ v_axis = plane_normal.unitOrthogonal ();
+ u_axis = plane_normal.cross (v_axis);
+
+ // Perform polynomial fit to update point and normal
+ ////////////////////////////////////////////////////
+ num_neighbors = static_cast<int> (nn_indices.size ());
+ order = polynomial_order;
+ if (order > 1)
+ {
+ int nr_coeff = (order + 1) * (order + 2) / 2;
+
+ if (num_neighbors >= nr_coeff)
+ {
+ // Note: The max_sq_radius parameter is only used if weight_func was not defined
+ double max_sq_radius = 1;
+ if (weight_func == 0)
+ {
+ max_sq_radius = search_radius * search_radius;
+ weight_func = boost::bind (&pcl::MLSResult::computeMLSWeight, this, _1, max_sq_radius);
+ }
+
+ // Allocate matrices and vectors to hold the data used for the polynomial fit
+ Eigen::VectorXd weight_vec (num_neighbors);
+ Eigen::MatrixXd P (nr_coeff, num_neighbors);
+ Eigen::VectorXd f_vec (num_neighbors);
+ Eigen::MatrixXd P_weight; // size will be (nr_coeff_, nn_indices.size ());
+ Eigen::MatrixXd P_weight_Pt (nr_coeff, nr_coeff);
+
+ // Update neighborhood, since point was projected, and computing relative
+ // positions. Note updating only distances for the weights for speed
+ std::vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d> > de_meaned (num_neighbors);
+ for (size_t ni = 0; ni < (size_t) num_neighbors; ++ni)
+ {
+ de_meaned[ni][0] = cloud.points[nn_indices[ni]].x - mean[0];
+ de_meaned[ni][1] = cloud.points[nn_indices[ni]].y - mean[1];
+ de_meaned[ni][2] = cloud.points[nn_indices[ni]].z - mean[2];
+ weight_vec (ni) = weight_func (de_meaned[ni].dot (de_meaned[ni]));
+ }
+
+ // Go through neighbors, transform them in the local coordinate system,
+ // save height and the evaluation of the polynome's terms
+ double u_coord, v_coord, u_pow, v_pow;
+ for (size_t ni = 0; ni < (size_t) num_neighbors; ++ni)
+ {
+ // Transforming coordinates
+ u_coord = de_meaned[ni].dot (u_axis);
+ v_coord = de_meaned[ni].dot (v_axis);
+ f_vec (ni) = de_meaned[ni].dot (plane_normal);
+
+ // Compute the polynomial's terms at the current point
+ int j = 0;
+ u_pow = 1;
+ for (int ui = 0; ui <= order; ++ui)
+ {
+ v_pow = 1;
+ for (int vi = 0; vi <= order - ui; ++vi)
+ {
+ P (j++, ni) = u_pow * v_pow;
+ v_pow *= v_coord;
+ }
+ u_pow *= u_coord;
+ }
+ }
+
+ // Computing coefficients
+ P_weight = P * weight_vec.asDiagonal ();
+ P_weight_Pt = P_weight * P.transpose ();
+ c_vec = P_weight * f_vec;
+ P_weight_Pt.llt ().solveInPlace (c_vec);
+ }
+ }
}
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename PointOutT>
pcl::MovingLeastSquares<PointInT, PointOutT>::MLSVoxelGrid::MLSVoxelGrid (PointCloudInConstPtr& cloud,
- IndicesPtr &indices,
- float voxel_size) :
- voxel_grid_ (), bounding_min_ (), bounding_max_ (), data_size_ (), voxel_size_ (voxel_size)
+ IndicesPtr &indices,
+ float voxel_size) :
+ voxel_grid_ (), bounding_min_ (), bounding_max_ (), data_size_ (), voxel_size_ (voxel_size)
{
pcl::getMinMax3D (*cloud, *indices, bounding_min_, bounding_max_);
point_out.z = temp.z;
}
-
#define PCL_INSTANTIATE_MovingLeastSquares(T,OutT) template class PCL_EXPORTS pcl::MovingLeastSquares<T,OutT>;
-
-#ifdef _OPENMP
#define PCL_INSTANTIATE_MovingLeastSquaresOMP(T,OutT) template class PCL_EXPORTS pcl::MovingLeastSquaresOMP<T,OutT>;
-#endif
#endif // PCL_SURFACE_IMPL_MLS_H_
typedef typename pcl::KdTree<PointNT> KdTree;
typedef typename pcl::KdTree<PointNT>::Ptr KdTreePtr;
-
/** \brief Constructor. */
- MarchingCubes ();
+ MarchingCubes (const float percentage_extend_grid = 0.0f,
+ const float iso_level = 0.0f) :
+ percentage_extend_grid_ (percentage_extend_grid),
+ iso_level_ (iso_level)
+ {
+ }
/** \brief Destructor. */
virtual ~MarchingCubes ();
setGridResolution (int res_x, int res_y, int res_z)
{ res_x_ = res_x; res_y_ = res_y; res_z_ = res_z; }
-
/** \brief Method to get the marching cubes grid resolution.
* \param[in] res_x the resolution of the grid along the x-axis
* \param[in] res_y the resolution of the grid along the y-axis
/** \brief The grid resolution */
int res_x_, res_y_, res_z_;
- /** \brief Min and max data points. */
- Eigen::Vector4f min_p_, max_p_;
+ /** \brief bounding box */
+ Eigen::Array3f upper_boundary_;
+ Eigen::Array3f lower_boundary_;
+
+ /** \brief size of voxels */
+ Eigen::Array3f size_voxel_;
/** \brief Parameter that defines how much free space should be left inside the grid between
* the bounding box of the point cloud and the grid limits, as a percentage of the bounding box.*/
/** \brief The iso level to be extracted. */
float iso_level_;
- /** \brief Convert the point cloud into voxel data. */
+ /** \brief Convert the point cloud into voxel data.
+ */
virtual void
voxelizeData () = 0;
* \param leaf_node the leaf node to be checked
* \param index_3d the 3d index of the leaf node to be checked
* \param cloud point cloud to store the vertices of the polygon
- */
+ */
void
- createSurface (std::vector<float> &leaf_node,
- Eigen::Vector3i &index_3d,
+ createSurface (const std::vector<float> &leaf_node,
+ const Eigen::Vector3i &index_3d,
pcl::PointCloud<PointNT> &cloud);
- /** \brief Get the bounding box for the input data points. */
+ /** \brief Get the bounding box for the input data points.
+ */
void
getBoundingBox ();
using MarchingCubes<PointNT>::res_x_;
using MarchingCubes<PointNT>::res_y_;
using MarchingCubes<PointNT>::res_z_;
- using MarchingCubes<PointNT>::min_p_;
- using MarchingCubes<PointNT>::max_p_;
+ using MarchingCubes<PointNT>::size_voxel_;
+ using MarchingCubes<PointNT>::upper_boundary_;
+ using MarchingCubes<PointNT>::lower_boundary_;
typedef typename pcl::PointCloud<PointNT>::Ptr PointCloudPtr;
/** \brief Constructor. */
- MarchingCubesHoppe ();
+ MarchingCubesHoppe (const float dist_ignore = -1.0f,
+ const float percentage_extend_grid = 0.0f,
+ const float iso_level = 0.0f) :
+ MarchingCubes<PointNT> (percentage_extend_grid, iso_level),
+ dist_ignore_ (dist_ignore)
+ {
+ }
/** \brief Destructor. */
~MarchingCubesHoppe ();
- /** \brief Convert the point cloud into voxel data. */
+ /** \brief Convert the point cloud into voxel data.
+ */
void
voxelizeData ();
+ /** \brief Method that sets the distance for ignoring voxels which are far from point cloud.
+ * If the distance is negative, then the distance functions would be calculated in all voxels;
+ * otherwise, only voxels with distance lower than dist_ignore would be involved in marching cube.
+ * \param[in] threshold of distance. Default value is -1.0. Set to negative if all voxels are
+ * to be involved.
+ */
+ inline void
+ setDistanceIgnore (const float dist_ignore)
+ { dist_ignore_ = dist_ignore; }
+
+ /** \brief get the distance for ignoring voxels which are far from point cloud.
+ * */
+ inline float
+ getDistanceIgnore () const
+ { return dist_ignore_; }
+
+ protected:
+ /** \brief ignore the distance function
+ * if it is negative
+ * or distance between voxel centroid and point are larger that it. */
+ float dist_ignore_;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
using MarchingCubes<PointNT>::res_x_;
using MarchingCubes<PointNT>::res_y_;
using MarchingCubes<PointNT>::res_z_;
- using MarchingCubes<PointNT>::min_p_;
- using MarchingCubes<PointNT>::max_p_;
+ using MarchingCubes<PointNT>::size_voxel_;
+ using MarchingCubes<PointNT>::upper_boundary_;
+ using MarchingCubes<PointNT>::lower_boundary_;
typedef typename pcl::PointCloud<PointNT>::Ptr PointCloudPtr;
/** \brief Constructor. */
- MarchingCubesRBF ();
+ MarchingCubesRBF (const float off_surface_epsilon = 0.1f,
+ const float percentage_extend_grid = 0.0f,
+ const float iso_level = 0.0f) :
+ MarchingCubes<PointNT> (percentage_extend_grid, iso_level),
+ off_surface_epsilon_ (off_surface_epsilon)
+ {
+ }
/** \brief Destructor. */
~MarchingCubesRBF ();
- /** \brief Convert the point cloud into voxel data. */
+ /** \brief Convert the point cloud into voxel data.
+ */
void
voxelizeData ();
#include <pcl/surface/eigen.h>
#include <pcl/surface/processing.h>
#include <map>
+#include <boost/function.hpp>
namespace pcl
{
- /** \brief MovingLeastSquares represent an implementation of the MLS (Moving Least Squares) algorithm
- * for data smoothing and improved normal estimation. It also contains methods for upsampling the
+
+ /** \brief Data structure used to store the results of the MLS fitting */
+ struct MLSResult
+ {
+ enum ProjectionMethod
+ {
+ NONE, /**< \brief Project to the mls plane. */
+ SIMPLE, /**< \brief Project along the mls plane normal to the polynomial surface. */
+ ORTHOGONAL /**< \brief Project to the closest point on the polynonomial surface. */
+ };
+
+ /** \brief Data structure used to store the MLS polynomial partial derivatives */
+ struct PolynomialPartialDerivative
+ {
+ double z; /**< \brief The z component of the polynomial evaluated at z(u, v). */
+ double z_u; /**< \brief The partial derivative dz/du. */
+ double z_v; /**< \brief The partial derivative dz/dv. */
+ double z_uu; /**< \brief The partial derivative d^2z/du^2. */
+ double z_vv; /**< \brief The partial derivative d^2z/dv^2. */
+ double z_uv; /**< \brief The partial derivative d^2z/dudv. */
+ };
+
+ /** \brief Data structure used to store the MLS projection results */
+ struct MLSProjectionResults
+ {
+ MLSProjectionResults () : u (0), v (0) {}
+
+ double u; /**< \brief The u-coordinate of the projected point in local MLS frame. */
+ double v; /**< \brief The u-coordinate of the projected point in local MLS frame. */
+ Eigen::Vector3d point; /**< \brief The projected point. */
+ Eigen::Vector3d normal; /**< \brief The projected point's normal. */
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ };
+
+ inline
+ MLSResult () : num_neighbors (0), curvature (0.0f), order (0), valid (false) {}
+
+ inline
+ MLSResult (const Eigen::Vector3d &a_query_point,
+ const Eigen::Vector3d &a_mean,
+ const Eigen::Vector3d &a_plane_normal,
+ const Eigen::Vector3d &a_u,
+ const Eigen::Vector3d &a_v,
+ const Eigen::VectorXd &a_c_vec,
+ const int a_num_neighbors,
+ const float a_curvature,
+ const int a_order);
+
+ /** \brief Given a point calculate it's 3D location in the MLS frame.
+ * \param[in] pt The point
+ * \param[out] u The u-coordinate of the point in local MLS frame.
+ * \param[out] v The v-coordinate of the point in local MLS frame.
+ * \param[out] w The w-coordinate of the point in local MLS frame.
+ */
+ inline void
+ getMLSCoordinates (const Eigen::Vector3d &pt, double &u, double &v, double &w) const;
+
+ /** \brief Given a point calculate it's 2D location in the MLS frame.
+ * \param[in] pt The point
+ * \param[out] u The u-coordinate of the point in local MLS frame.
+ * \param[out] v The v-coordinate of the point in local MLS frame.
+ */
+ inline void
+ getMLSCoordinates (const Eigen::Vector3d &pt, double &u, double &v) const;
+
+ /** \brief Calculate the polynomial
+ * \param[in] u The u-coordinate of the point in local MLS frame.
+ * \param[in] v The v-coordinate of the point in local MLS frame.
+ * \return The polynomial value at the provide uv coordinates.
+ */
+ inline double
+ getPolynomialValue (const double u, const double v) const;
+
+ /** \brief Calculate the polynomial's first and second partial derivatives.
+ * \param[in] u The u-coordinate of the point in local MLS frame.
+ * \param[in] v The v-coordinate of the point in local MLS frame.
+ * \return The polynomial partial derivatives at the provide uv coordinates.
+ */
+ inline PolynomialPartialDerivative
+ getPolynomialPartialDerivative (const double u, const double v) const;
+
+ /** \brief Calculate the principle curvatures using the polynomial surface.
+ * \param[in] u The u-coordinate of the point in local MLS frame.
+ * \param[in] v The v-coordinate of the point in local MLS frame.
+ * \return The principle curvature [k1, k2] at the provided ub coordinates.
+ * \note If an error occurs the MLS_MINIMUM_PRINCIPLE_CURVATURE is returned.
+ */
+ inline Eigen::Vector2f
+ calculatePrincipleCurvatures (const double u, const double v) const;
+
+ /** \brief Project a point orthogonal to the polynomial surface.
+ * \param[in] u The u-coordinate of the point in local MLS frame.
+ * \param[in] v The v-coordinate of the point in local MLS frame.
+ * \param[in] w The w-coordinate of the point in local MLS frame.
+ * \return The MLSProjectionResults for the input data.
+ * \note If the MLSResults does not contain polynomial data it projects the point onto the mls plane.
+ * \note If the optimization diverges it performs a simple projection on to the polynomial surface.
+ * \note This was implemented based on this https://math.stackexchange.com/questions/1497093/shortest-distance-between-point-and-surface
+ */
+ inline MLSProjectionResults
+ projectPointOrthogonalToPolynomialSurface (const double u, const double v, const double w) const;
+
+ /** \brief Project a point onto the MLS plane.
+ * \param[in] u The u-coordinate of the point in local MLS frame.
+ * \param[in] v The v-coordinate of the point in local MLS frame.
+ * \return The MLSProjectionResults for the input data.
+ */
+ inline MLSProjectionResults
+ projectPointToMLSPlane (const double u, const double v) const;
+
+ /** \brief Project a point along the MLS plane normal to the polynomial surface.
+ * \param[in] u The u-coordinate of the point in local MLS frame.
+ * \param[in] v The v-coordinate of the point in local MLS frame.
+ * \return The MLSProjectionResults for the input data.
+ * \note If the MLSResults does not contain polynomial data it projects the point onto the mls plane.
+ */
+ inline MLSProjectionResults
+ projectPointSimpleToPolynomialSurface (const double u, const double v) const;
+
+ /**
+ * \brief Project a point using the specified method.
+ * \param[in] pt The point to be project.
+ * \param[in] method The projection method to be used.
+ * \param[in] required_neighbors The minimum number of neighbors required.
+ * \note If required_neighbors then any number of neighbors is allowed.
+ * \note If required_neighbors is not satisfied it projects to the mls plane.
+ * \return The MLSProjectionResults for the input data.
+ */
+ inline MLSProjectionResults
+ projectPoint (const Eigen::Vector3d &pt, ProjectionMethod method, int required_neighbors = 0) const;
+
+ /**
+ * \brief Project the query point used to generate the mls surface about using the specified method.
+ * \param[in] method The projection method to be used.
+ * \param[in] required_neighbors The minimum number of neighbors required.
+ * \note If required_neighbors then any number of neighbors is allowed.
+ * \note If required_neighbors is not satisfied it projects to the mls plane.
+ * \return The MLSProjectionResults for the input data.
+ */
+ inline MLSProjectionResults
+ projectQueryPoint (ProjectionMethod method, int required_neighbors = 0) const;
+
+ /** \brief Smooth a given point and its neighborghood using Moving Least Squares.
+ * \param[in] index the index of the query point in the input cloud
+ * \param[in] nn_indices the set of nearest neighbors indices for pt
+ * \param[in] search_radius the search radius used to find nearest neighbors for pt
+ * \param[in] polynomial_order the order of the polynomial to fit to the nearest neighbors
+ * \param[in] weight_func defines the weight function for the polynomial fit
+ */
+ template <typename PointT> void
+ computeMLSSurface (const pcl::PointCloud<PointT> &cloud,
+ int index,
+ const std::vector<int> &nn_indices,
+ double search_radius,
+ int polynomial_order = 2,
+ boost::function<double(const double)> weight_func = 0);
+
+ Eigen::Vector3d query_point; /**< \brief The query point about which the mls surface was generated */
+ Eigen::Vector3d mean; /**< \brief The mean point of all the neighbors. */
+ Eigen::Vector3d plane_normal; /**< \brief The normal of the local plane of the query point. */
+ Eigen::Vector3d u_axis; /**< \brief The axis corresponding to the u-coordinates of the local plane of the query point. */
+ Eigen::Vector3d v_axis; /**< \brief The axis corresponding to the v-coordinates of the local plane of the query point. */
+ Eigen::VectorXd c_vec; /**< \brief The polynomial coefficients Example: z = c_vec[0] + c_vec[1]*v + c_vec[2]*v^2 + c_vec[3]*u + c_vec[4]*u*v + c_vec[5]*u^2 */
+ int num_neighbors; /**< \brief The number of neighbors used to create the mls surface. */
+ float curvature; /**< \brief The curvature at the query point. */
+ int order; /**< \brief The order of the polynomial. If order > 1 then use polynomial fit */
+ bool valid; /**< \brief If True, the mls results data is valid, otherwise False. */
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ private:
+ /**
+ * \brief The default weight function used when fitting a polynomial surface
+ * \param sq_dist the squared distance from a point to origin of the mls frame
+ * \param sq_mls_radius the squraed mls search radius used
+ * \return The weight for a point at squared distance from the origin of the mls frame
+ */
+ inline
+ double computeMLSWeight (const double sq_dist, const double sq_mls_radius) { return (exp (-sq_dist / sq_mls_radius)); }
+
+ };
+
+ /** \brief MovingLeastSquares represent an implementation of the MLS (Moving Least Squares) algorithm
+ * for data smoothing and improved normal estimation. It also contains methods for upsampling the
* resulting cloud based on the parametric fit.
- * Reference paper: "Computing and Rendering Point Set Surfaces" by Marc Alexa, Johannes Behr,
+ * Reference paper: "Computing and Rendering Point Set Surfaces" by Marc Alexa, Johannes Behr,
* Daniel Cohen-Or, Shachar Fleishman, David Levin and Claudio T. Silva
* www.sci.utah.edu/~shachar/Publications/crpss.pdf
- * \author Zoltan Csaba Marton, Radu B. Rusu, Alexandru E. Ichim, Suat Gedikli
+ * \note There is a parallelized version of the processing step, using the OpenMP standard.
+ * Compared to the standard version, an overhead is incurred in terms of runtime and memory usage.
+ * The upsampling methods DISTINCT_CLOUD and VOXEL_GRID_DILATION are not parallelized completely,
+ * i.e. parts of the algorithm run on a single thread only.
+ * \author Zoltan Csaba Marton, Radu B. Rusu, Alexandru E. Ichim, Suat Gedikli, Robert Huitl
* \ingroup surface
*/
template <typename PointInT, typename PointOutT>
- class MovingLeastSquares: public CloudSurfaceProcessing<PointInT, PointOutT>
+ class MovingLeastSquares : public CloudSurfaceProcessing<PointInT, PointOutT>
{
public:
typedef boost::shared_ptr<MovingLeastSquares<PointInT, PointOutT> > Ptr;
typedef boost::function<int (int, double, std::vector<int> &, std::vector<float> &)> SearchMethod;
- enum UpsamplingMethod {NONE, DISTINCT_CLOUD, SAMPLE_LOCAL_PLANE, RANDOM_UNIFORM_DENSITY, VOXEL_GRID_DILATION};
+ enum UpsamplingMethod
+ {
+ NONE, /**< \brief No upsampling will be done, only the input points will be projected
+ to their own MLS surfaces. */
+ DISTINCT_CLOUD, /**< \brief Project the points of the distinct cloud to the MLS surface. */
+ SAMPLE_LOCAL_PLANE, /**< \brief The local plane of each input point will be sampled in a circular fashion
+ using the \ref upsampling_radius_ and the \ref upsampling_step_ parameters. */
+ RANDOM_UNIFORM_DENSITY, /**< \brief The local plane of each input point will be sampled using an uniform random
+ distribution such that the density of points is constant throughout the
+ cloud - given by the \ref desired_num_points_in_radius_ parameter. */
+ VOXEL_GRID_DILATION /**< \brief The input cloud will be inserted into a voxel grid with voxels of
+ size \ref voxel_size_; this voxel grid will be dilated \ref dilation_iteration_num_
+ times and the resulting points will be projected to the MLS surface
+ of the closest point in the input cloud; the result is a point cloud
+ with filled holes and a constant point density. */
+ };
/** \brief Empty constructor. */
MovingLeastSquares () : CloudSurfaceProcessing<PointInT, PointOutT> (),
search_method_ (),
tree_ (),
order_ (2),
- polynomial_fit_ (true),
search_radius_ (0.0),
sqr_gauss_param_ (0.0),
compute_normals_ (false),
upsampling_radius_ (0.0),
upsampling_step_ (0.0),
desired_num_points_in_radius_ (0),
+ cache_mls_results_ (true),
mls_results_ (),
+ projection_method_ (MLSResult::SIMPLE),
+ threads_ (1),
voxel_size_ (1.0),
dilation_iteration_num_ (0),
nr_coeff_ (),
rng_alg_ (),
rng_uniform_distribution_ ()
{};
-
+
/** \brief Empty destructor */
virtual ~MovingLeastSquares () {}
}
/** \brief Get a pointer to the search method used. */
- inline KdTreePtr
- getSearchMethod () { return (tree_); }
+ inline KdTreePtr
+ getSearchMethod () const { return (tree_); }
/** \brief Set the order of the polynomial to be fit.
* \param[in] order the order of the polynomial
+ * \note Setting order > 1 indicates using a polynomial fit.
*/
- inline void
+ inline void
setPolynomialOrder (int order) { order_ = order; }
/** \brief Get the order of the polynomial to be fit. */
- inline int
- getPolynomialOrder () { return (order_); }
+ inline int
+ getPolynomialOrder () const { return (order_); }
/** \brief Sets whether the surface and normal are approximated using a polynomial, or only via tangent estimation.
* \param[in] polynomial_fit set to true for polynomial fit
*/
- inline void
- setPolynomialFit (bool polynomial_fit) { polynomial_fit_ = polynomial_fit; }
+ PCL_DEPRECATED ("[pcl::surface::MovingLeastSquares::setPolynomialFit] setPolynomialFit is deprecated. Please use setPolynomialOrder instead.")
+ inline void
+ setPolynomialFit (bool polynomial_fit)
+ {
+ if (polynomial_fit)
+ {
+ if (order_ < 2)
+ {
+ order_ = 2;
+ }
+ }
+ else
+ {
+ order_ = 0;
+ }
+ }
/** \brief Get the polynomial_fit value (true if the surface and normal are approximated using a polynomial). */
- inline bool
- getPolynomialFit () { return (polynomial_fit_); }
+ PCL_DEPRECATED ("[pcl::surface::MovingLeastSquares::getPolynomialFit] getPolynomialFit is deprecated. Please use getPolynomialOrder instead.")
+ inline bool
+ getPolynomialFit () const { return (order_ > 1); }
/** \brief Set the sphere radius that is to be used for determining the k-nearest neighbors used for fitting.
* \param[in] radius the sphere radius that is to contain all k-nearest neighbors
* \note Calling this method resets the squared Gaussian parameter to radius * radius !
*/
- inline void
+ inline void
setSearchRadius (double radius) { search_radius_ = radius; sqr_gauss_param_ = search_radius_ * search_radius_; }
/** \brief Get the sphere radius used for determining the k-nearest neighbors. */
- inline double
- getSearchRadius () { return (search_radius_); }
+ inline double
+ getSearchRadius () const { return (search_radius_); }
/** \brief Set the parameter used for distance based weighting of neighbors (the square of the search radius works
* best in general).
* \param[in] sqr_gauss_param the squared Gaussian parameter
*/
- inline void
+ inline void
setSqrGaussParam (double sqr_gauss_param) { sqr_gauss_param_ = sqr_gauss_param; }
/** \brief Get the parameter for distance based weighting of neighbors. */
- inline double
+ inline double
getSqrGaussParam () const { return (sqr_gauss_param_); }
/** \brief Set the upsampling method to be used
* \param method
- * \note Options are: * NONE - no upsampling will be done, only the input points will be projected to their own
- * MLS surfaces
- * * DISTINCT_CLOUD - will project the points of the distinct cloud to the closest point on
- * the MLS surface
- * * SAMPLE_LOCAL_PLANE - the local plane of each input point will be sampled in a circular
- * fashion using the \ref upsampling_radius_ and the \ref upsampling_step_
- * parameters
- * * RANDOM_UNIFORM_DENSITY - the local plane of each input point will be sampled using an
- * uniform random distribution such that the density of points is
- * constant throughout the cloud - given by the \ref desired_num_points_in_radius_
- * parameter
- * * VOXEL_GRID_DILATION - the input cloud will be inserted into a voxel grid with voxels of
- * size \ref voxel_size_; this voxel grid will be dilated \ref dilation_iteration_num_
- * times and the resulting points will be projected to the MLS surface
- * of the closest point in the input cloud; the result is a point cloud
- * with filled holes and a constant point density
*/
inline void
setUpsamplingMethod (UpsamplingMethod method) { upsample_method_ = method; }
/** \brief Get the distinct cloud used for the DISTINCT_CLOUD upsampling method. */
inline PointCloudInConstPtr
- getDistinctCloud () { return distinct_cloud_; }
+ getDistinctCloud () const { return (distinct_cloud_); }
/** \brief Set the radius of the circle in the local point plane that will be sampled
* \note Used only in the case of SAMPLE_LOCAL_PLANE upsampling
*/
inline double
- getUpsamplingRadius () { return upsampling_radius_; }
+ getUpsamplingRadius () const { return (upsampling_radius_); }
/** \brief Set the step size for the local plane sampling
* \note Used only in the case of SAMPLE_LOCAL_PLANE upsampling
* \note Used only in the case of SAMPLE_LOCAL_PLANE upsampling
*/
inline double
- getUpsamplingStepSize () { return upsampling_step_; }
+ getUpsamplingStepSize () const { return (upsampling_step_); }
/** \brief Set the parameter that specifies the desired number of points within the search radius
* \note Used only in the case of RANDOM_UNIFORM_DENSITY upsampling
* \note Used only in the case of RANDOM_UNIFORM_DENSITY upsampling
*/
inline int
- getPointDensity () { return desired_num_points_in_radius_; }
+ getPointDensity () const { return (desired_num_points_in_radius_); }
/** \brief Set the voxel size for the voxel grid
* \note Used only in the VOXEL_GRID_DILATION upsampling method
* \note Used only in the VOXEL_GRID_DILATION upsampling method
*/
inline float
- getDilationVoxelSize () { return voxel_size_; }
+ getDilationVoxelSize () const { return (voxel_size_); }
/** \brief Set the number of dilation steps of the voxel grid
* \note Used only in the VOXEL_GRID_DILATION upsampling method
* \note Used only in the VOXEL_GRID_DILATION upsampling method
*/
inline int
- getDilationIterations () { return dilation_iteration_num_; }
+ getDilationIterations () const { return (dilation_iteration_num_); }
+
+ /** \brief Set whether the mls results should be stored for each point in the input cloud
+ * \param[in] True if the mls results should be stored, otherwise false.
+ * \note The cache_mls_results_ is forced to true when using upsampling method VOXEL_GRID_DILATION or DISTINCT_CLOUD.
+ * \note If memory consumption is a concern set to false when not using upsampling method VOXEL_GRID_DILATION or DISTINCT_CLOUD.
+ */
+ inline void
+ setCacheMLSResults (bool cache_mls_results) { cache_mls_results_ = cache_mls_results; }
+
+ /** \brief Get the cache_mls_results_ value (True if the mls results should be stored, otherwise false). */
+ inline bool
+ getCacheMLSResults () const { return (cache_mls_results_); }
+
+ /** \brief Set the method to be used when projection the point on to the MLS surface.
+ * \param method
+ * \note This is only used when polynomial fit is enabled.
+ */
+ inline void
+ setProjectionMethod (MLSResult::ProjectionMethod method) { projection_method_ = method; }
+
+
+ /** \brief Get the current projection method being used. */
+ inline MLSResult::ProjectionMethod
+ getProjectionMethod () const { return (projection_method_); }
+
+ /** \brief Get the MLSResults for input cloud
+ * \note The results are only stored if setCacheMLSResults(true) was called or when using the upsampling method DISTINCT_CLOUD or VOXEL_GRID_DILATION.
+ * \note This vector is align with the input cloud indices, so use getCorrespondingIndices to get the correct results when using output cloud indices.
+ */
+ inline const std::vector<MLSResult>&
+ getMLSResults () const { return (mls_results_); }
+
+ /** \brief Set the maximum number of threads to use
+ * \param threads the maximum number of hardware threads to use (0 sets the value to 1)
+ */
+ inline void
+ setNumberOfThreads (unsigned int threads = 1)
+ {
+ threads_ = threads;
+ }
/** \brief Base method for surface reconstruction for all points given in <setInputCloud (), setIndices ()>
* \param[out] output the resultant reconstructed surface model
*/
- void
+ void
process (PointCloudOut &output);
- /** \brief Get the set of indices with each point in output having the
+ /** \brief Get the set of indices with each point in output having the
* corresponding point in input */
inline PointIndicesPtr
- getCorrespondingIndices () { return (corresponding_input_indices_); }
+ getCorrespondingIndices () const { return (corresponding_input_indices_); }
protected:
/** \brief The point cloud that will hold the estimated normals, if set. */
/** \brief The order of the polynomial to be fit. */
int order_;
- /** True if the surface and normal be approximated using a polynomial, false if tangent estimation is sufficient. */
- bool polynomial_fit_;
-
/** \brief The nearest neighbors search radius for each point. */
double search_radius_;
*/
int desired_num_points_in_radius_;
-
- /** \brief Data structure used to store the results of the MLS fitting
- * \note Used only in the case of VOXEL_GRID_DILATION or DISTINCT_CLOUD upsampling
+ /** \brief True if the mls results for the input cloud should be stored
+ * \note This is forced to true when using upsampling methods VOXEL_GRID_DILATION or DISTINCT_CLOUD.
*/
- struct MLSResult
- {
- MLSResult () : mean (), plane_normal (), u_axis (), v_axis (), c_vec (), num_neighbors (), curvature (), valid (false) {}
-
- MLSResult (const Eigen::Vector3d &a_mean,
- const Eigen::Vector3d &a_plane_normal,
- const Eigen::Vector3d &a_u,
- const Eigen::Vector3d &a_v,
- const Eigen::VectorXd a_c_vec,
- const int a_num_neighbors,
- const float &a_curvature);
-
- Eigen::Vector3d mean, plane_normal, u_axis, v_axis;
- Eigen::VectorXd c_vec;
- int num_neighbors;
- float curvature;
- bool valid;
- };
+ bool cache_mls_results_;
/** \brief Stores the MLS result for each point in the input cloud
* \note Used only in the case of VOXEL_GRID_DILATION or DISTINCT_CLOUD upsampling
*/
std::vector<MLSResult> mls_results_;
-
+ /** \brief Parameter that specifies the projection method to be used. */
+ MLSResult::ProjectionMethod projection_method_;
+
+ /** \brief The maximum number of threads the scheduler should use. */
+ unsigned int threads_;
+
+
/** \brief A minimalistic implementation of a voxel grid, necessary for the point cloud upsampling
* \note Used only in the case of VOXEL_GRID_DILATION upsampling
*/
getCellIndex (const Eigen::Vector3f &p, Eigen::Vector3i& index) const
{
for (int i = 0; i < 3; ++i)
- index[i] = static_cast<Eigen::Vector3i::Scalar> ((p[i] - bounding_min_(i)) / voxel_size_);
+ index[i] = static_cast<Eigen::Vector3i::Scalar> ((p[i] - bounding_min_ (i)) / voxel_size_);
}
inline void
Eigen::Vector4f bounding_min_, bounding_max_;
uint64_t data_size_;
float voxel_size_;
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
float voxel_size_;
/** \brief Number of dilation steps for the VOXEL_GRID_DILATION upsampling method */
- int dilation_iteration_num_;
+ int dilation_iteration_num_;
/** \brief Number of coefficients, to be computed from the requested order.*/
int nr_coeff_;
}
/** \brief Smooth a given point and its neighborghood using Moving Least Squares.
- * \param[in] index the inex of the query point in the input cloud
+ * \param[in] index the index of the query point in the input cloud
* \param[in] nn_indices the set of nearest neighbors indices for pt
- * \param[in] nn_sqr_dists the set of nearest neighbors squared distances for pt
* \param[out] projected_points the set of points projected points around the query point
* (in the case of upsampling method NONE, only the query point projected to its own fitted surface will be returned,
* in the case of the other upsampling methods, multiple points will be returned)
void
computeMLSPointNormal (int index,
const std::vector<int> &nn_indices,
- std::vector<float> &nn_sqr_dists,
PointCloudOut &projected_points,
NormalCloud &projected_points_normals,
PointIndices &corresponding_input_indices,
MLSResult &mls_result) const;
- /** \brief Fits a point (sample point) given in the local plane coordinates of an input point (query point) to
- * the MLS surface of the input point
- * \param[in] u_disp the u coordinate of the sample point in the local plane of the query point
- * \param[in] v_disp the v coordinate of the sample point in the local plane of the query point
- * \param[in] u_axis the axis corresponding to the u-coordinates of the local plane of the query point
- * \param[in] v_axis the axis corresponding to the v-coordinates of the local plane of the query point
- * \param[in] n_axis
- * \param mean
- * \param[in] curvature the curvature of the surface at the query point
- * \param[in] c_vec the coefficients of the polynomial fit on the MLS surface of the query point
- * \param[in] num_neighbors the number of neighbors of the query point in the input cloud
- * \param[out] result_point the absolute 3D position of the resulting projected point
- * \param[out] result_normal the normal of the resulting projected point
+
+ /** \brief This is a helper function for add projected points
+ * \param[in] index the index of the query point in the input cloud
+ * \param[in] point the projected point to be added
+ * \param[in] normal the projected point's normal to be added
+ * \param[in] curvature the projected point's curvature
+ * \param[out] projected_points the set of projected points around the query point
+ * \param[out] projected_points_normals the normals corresponding to the projected points
+ * \param[out] corresponding_input_indices the set of indices with each point in output having the corresponding point in input
*/
void
- projectPointToMLSSurface (float &u_disp, float &v_disp,
- Eigen::Vector3d &u_axis, Eigen::Vector3d &v_axis,
- Eigen::Vector3d &n_axis,
- Eigen::Vector3d &mean,
- float &curvature,
- Eigen::VectorXd &c_vec,
- int num_neighbors,
- PointOutT &result_point,
- pcl::Normal &result_normal) const;
+ addProjectedPointNormal (int index,
+ const Eigen::Vector3d &point,
+ const Eigen::Vector3d &normal,
+ double curvature,
+ PointCloudOut &projected_points,
+ NormalCloud &projected_points_normals,
+ PointIndices &corresponding_input_indices) const;
+
void
copyMissingFields (const PointInT &point_in,
PointOutT &point_out) const;
- /** \brief Abstract surface reconstruction method.
- * \param[out] output the result of the reconstruction
+ /** \brief Abstract surface reconstruction method.
+ * \param[out] output the result of the reconstruction
*/
- virtual void performProcessing (PointCloudOut &output);
+ virtual void
+ performProcessing (PointCloudOut &output);
/** \brief Perform upsampling for the distinct-cloud and voxel-grid methods
* \param[out] output the result of the reconstruction
*/
- void performUpsampling (PointCloudOut &output);
+ void
+ performUpsampling (PointCloudOut &output);
private:
/** \brief Boost-based random number generator algorithm. */
/** \brief Random number generator using an uniform distribution of floats
* \note Used only in the case of RANDOM_UNIFORM_DENSITY upsampling
*/
- boost::shared_ptr<boost::variate_generator<boost::mt19937&,
- boost::uniform_real<float> >
+ boost::shared_ptr<boost::variate_generator<boost::mt19937&,
+ boost::uniform_real<float> >
> rng_uniform_distribution_;
/** \brief Abstract class get name method. */
- std::string getClassName () const { return ("MovingLeastSquares"); }
-
- public:
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ std::string
+ getClassName () const { return ("MovingLeastSquares"); }
};
-#ifdef _OPENMP
- /** \brief MovingLeastSquaresOMP is a parallelized version of MovingLeastSquares, using the OpenMP standard.
- * \note Compared to MovingLeastSquares, an overhead is incurred in terms of runtime and memory usage.
- * \note The upsampling methods DISTINCT_CLOUD and VOXEL_GRID_DILATION are not parallelized completely, i.e. parts of the algorithm run on a single thread only.
- * \author Robert Huitl
- * \ingroup surface
- */
+ /** \brief MovingLeastSquaresOMP implementation has been merged into MovingLeastSquares for better maintainability.
+ * \note Keeping this empty child class for backwards compatibility.
+ * \author Robert Huitl
+ * \ingroup surface
+ */
template <typename PointInT, typename PointOutT>
- class MovingLeastSquaresOMP: public MovingLeastSquares<PointInT, PointOutT>
+ class MovingLeastSquaresOMP : public MovingLeastSquares<PointInT, PointOutT>
{
public:
- typedef boost::shared_ptr<MovingLeastSquares<PointInT, PointOutT> > Ptr;
- typedef boost::shared_ptr<const MovingLeastSquares<PointInT, PointOutT> > ConstPtr;
-
- using PCLBase<PointInT>::input_;
- using PCLBase<PointInT>::indices_;
- using MovingLeastSquares<PointInT, PointOutT>::normals_;
- using MovingLeastSquares<PointInT, PointOutT>::corresponding_input_indices_;
- using MovingLeastSquares<PointInT, PointOutT>::nr_coeff_;
- using MovingLeastSquares<PointInT, PointOutT>::order_;
- using MovingLeastSquares<PointInT, PointOutT>::compute_normals_;
- using MovingLeastSquares<PointInT, PointOutT>::upsample_method_;
- using MovingLeastSquares<PointInT, PointOutT>::VOXEL_GRID_DILATION;
- using MovingLeastSquares<PointInT, PointOutT>::DISTINCT_CLOUD;
-
- typedef pcl::PointCloud<pcl::Normal> NormalCloud;
- typedef pcl::PointCloud<pcl::Normal>::Ptr NormalCloudPtr;
-
- typedef pcl::PointCloud<PointOutT> PointCloudOut;
- typedef typename PointCloudOut::Ptr PointCloudOutPtr;
- typedef typename PointCloudOut::ConstPtr PointCloudOutConstPtr;
-
/** \brief Constructor for parallelized Moving Least Squares
- * \param threads the maximum number of hardware threads to use (0 sets the value to 1)
- */
- MovingLeastSquaresOMP (unsigned int threads = 0) : threads_ (threads)
- {
-
- }
-
- /** \brief Set the maximum number of threads to use
- * \param threads the maximum number of hardware threads to use (0 sets the value to 1)
- */
- inline void
- setNumberOfThreads (unsigned int threads = 0)
+ * \param threads the maximum number of hardware threads to use (0 sets the value to 1)
+ */
+ MovingLeastSquaresOMP (unsigned int threads = 1)
{
- threads_ = threads;
+ this->setNumberOfThreads (threads);
}
-
- protected:
- /** \brief Abstract surface reconstruction method.
- * \param[out] output the result of the reconstruction
- */
- virtual void performProcessing (PointCloudOut &output);
-
- /** \brief The maximum number of threads the scheduler should use. */
- unsigned int threads_;
};
-#endif
}
#ifdef PCL_NO_PRECOMPILE
inline void
setSolverDivide (int solver_divide) { solver_divide_ = solver_divide; }
- /** \brief Get the the depth at which a block Gauss-Seidel solver is used to solve the Laplacian equation */
+ /** \brief Get the depth at which a block Gauss-Seidel solver is used to solve the Laplacian equation */
inline int
getSolverDivide () { return solver_divide_; }
{
// 22 January 2004 Dale Lear
// Disable crc checking when writing the
- // unknow user data block.
+ // unknown user data block.
// This has to be done so we don't get an extra
// 32 bit CRC calculated on the block that
// ON_UnknownUserData::Write() writes. The
ON_3dPoint F1, F2;
if ( ellipse.GetFoci(F1,F2) )
{
- P = ( F1.DistanceTo(Q) <= F1.DistanceTo(Q)) ? F1 : F2;
+ P = ( F1.DistanceTo(Q) <= F2.DistanceTo(Q)) ? F1 : F2;
rc = true;
}
}
*
*/
-#include <pcl/impl/instantiate.hpp>
-#include <pcl/point_types.h>
#include <pcl/surface/mls.h>
#include <pcl/surface/impl/mls.hpp>
-// Instantiations of specific point types
-PCL_INSTANTIATE_PRODUCT(MovingLeastSquares, ((pcl::PointXYZ)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointXYZRGBNormal)(pcl::PointNormal))
- ((pcl::PointXYZ)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointXYZRGBNormal)(pcl::PointNormal)))
-
-#ifdef _OPENMP
-PCL_INSTANTIATE_PRODUCT(MovingLeastSquaresOMP, ((pcl::PointXYZ)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointXYZRGBNormal)(pcl::PointNormal))
- ((pcl::PointXYZ)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointXYZRGBNormal)(pcl::PointNormal)))
+#ifndef PCL_NO_PRECOMPILE
+#include <pcl/point_types.h>
+#include <pcl/impl/instantiate.hpp>
+#ifdef PCL_ONLY_CORE_POINT_TYPES
+ // Instantiations of specific point types
+ PCL_INSTANTIATE_PRODUCT(MovingLeastSquares, ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointXYZRGBNormal)(pcl::PointNormal))
+ ((pcl::PointXYZ)(pcl::PointXYZI)(pcl::PointXYZRGB)(pcl::PointXYZRGBA)(pcl::PointXYZRGBNormal)(pcl::PointNormal)))
+#else
+ PCL_INSTANTIATE_PRODUCT(MovingLeastSquares, (PCL_XYZ_POINT_TYPES)(PCL_XYZ_POINT_TYPES))
#endif
-/// Ideally, we should instantiate like below, but it takes large amounts of main memory for compilation
-//PCL_INSTANTIATE_PRODUCT(MovingLeastSquares, (PCL_XYZ_POINT_TYPES)(PCL_XYZ_POINT_TYPES))
+#endif // PCL_NO_PRECOMPILE
throw std::runtime_error ("[SequentialFitter::setCorners] Error: Empty or invalid pcl-point-cloud.\n");
if (corners->indices.size () < 4)
- throw std::runtime_error ("[SequentialFitter::setCorners] Error: to few corners (<4)\n");
+ throw std::runtime_error ("[SequentialFitter::setCorners] Error: too few corners (<4)\n");
if (corners->indices.size () > 4)
- printf ("[SequentialFitter::setCorners] Warning: to many corners (>4)\n");
+ printf ("[SequentialFitter::setCorners] Warning: too many corners (>4)\n");
bool flip = false;
pcl::PointXYZRGB &pt0 = m_cloud->at (corners->indices[0]);
-# .PCD v.5 - Point Cloud Data file format
-VERSION .5
-FIELDS x y z
-SIZE 4 4 4
-TYPE F F F
-COUNT 1 1 1
+# .PCD v0.7 - Point Cloud Data file format
+VERSION 0.7
+FIELDS x y z normal_x normal_y normal_z curvature
+SIZE 4 4 4 4 4 4 4
+TYPE F F F F F F F
+COUNT 1 1 1 1 1 1 1
WIDTH 397
HEIGHT 1
+VIEWPOINT 0 0 0 1 0 0 0
POINTS 397
DATA ascii
-0.0054216 0.11349 0.040749
--0.0017447 0.11425 0.041273
--0.010661 0.11338 0.040916
-0.026422 0.11499 0.032623
-0.024545 0.12284 0.024255
-0.034137 0.11316 0.02507
-0.02886 0.11773 0.027037
-0.02675 0.12234 0.017605
-0.03575 0.1123 0.019109
-0.015982 0.12307 0.031279
-0.0079813 0.12438 0.032798
-0.018101 0.11674 0.035493
-0.0086687 0.11758 0.037538
-0.01808 0.12536 0.026132
-0.0080861 0.12866 0.02619
-0.02275 0.12146 0.029671
--0.0018689 0.12456 0.033184
--0.011168 0.12376 0.032519
--0.0020063 0.11937 0.038104
--0.01232 0.11816 0.037427
--0.0016659 0.12879 0.026782
--0.011971 0.12723 0.026219
-0.016484 0.12828 0.01928
-0.0070921 0.13103 0.018415
-0.0014615 0.13134 0.017095
--0.013821 0.12886 0.019265
--0.01725 0.11202 0.040077
--0.074556 0.13415 0.051046
--0.065971 0.14396 0.04109
--0.071925 0.14545 0.043266
--0.06551 0.13624 0.042195
--0.071112 0.13767 0.047518
--0.079528 0.13416 0.051194
--0.080421 0.14428 0.042793
--0.082672 0.1378 0.046806
--0.08813 0.13514 0.042222
--0.066325 0.12347 0.050729
--0.072399 0.12662 0.052364
--0.066091 0.11973 0.050881
--0.072012 0.11811 0.052295
--0.062433 0.12627 0.043831
--0.068326 0.12998 0.048875
--0.063094 0.11811 0.044399
--0.071301 0.11322 0.04841
--0.080515 0.12741 0.052034
--0.078179 0.1191 0.051116
--0.085216 0.12609 0.049001
--0.089538 0.12621 0.044589
--0.082659 0.11661 0.04797
--0.089536 0.11784 0.04457
--0.0565 0.15248 0.030132
--0.055517 0.15313 0.026915
--0.03625 0.17198 0.00017688
--0.03775 0.17198 0.00022189
--0.03625 0.16935 0.00051958
--0.033176 0.15711 0.0018682
--0.051913 0.1545 0.011273
--0.041707 0.16642 0.0030522
--0.049468 0.16414 0.0041988
--0.041892 0.15669 0.0054879
--0.051224 0.15878 0.0080283
--0.062417 0.15317 0.033161
--0.07167 0.15319 0.033701
--0.062543 0.15524 0.027405
--0.07211 0.1555 0.027645
--0.078663 0.15269 0.032268
--0.081569 0.15374 0.026085
--0.08725 0.1523 0.022135
--0.05725 0.15568 0.010325
--0.057888 0.1575 0.0073225
--0.0885 0.15223 0.019215
--0.056129 0.14616 0.03085
--0.054705 0.13555 0.032127
--0.054144 0.14714 0.026275
--0.046625 0.13234 0.021909
--0.05139 0.13694 0.025787
--0.018278 0.12238 0.030773
--0.021656 0.11643 0.035209
--0.031921 0.11566 0.032851
--0.021348 0.12421 0.024562
--0.03241 0.12349 0.023293
--0.024869 0.12094 0.028745
--0.031747 0.12039 0.028229
--0.052912 0.12686 0.034968
--0.041672 0.11564 0.032998
--0.052037 0.1168 0.034582
--0.042495 0.12488 0.024082
--0.047946 0.12736 0.028108
--0.042421 0.12035 0.028633
--0.047661 0.12024 0.028871
--0.035964 0.1513 0.0005395
--0.050598 0.1474 0.013881
--0.046375 0.13293 0.018289
--0.049125 0.13856 0.016269
--0.042976 0.14915 0.0054003
--0.047965 0.14659 0.0086783
--0.022926 0.1263 0.018077
--0.031583 0.1259 0.017804
--0.041733 0.12796 0.01665
--0.061482 0.14698 0.036168
--0.071729 0.15026 0.038328
--0.060526 0.1368 0.035999
--0.082619 0.14823 0.035955
--0.087824 0.14449 0.033779
--0.089 0.13828 0.037774
--0.085662 0.15095 0.028208
--0.089601 0.14725 0.025869
--0.090681 0.13748 0.02369
--0.058722 0.12924 0.038992
--0.060075 0.11512 0.037685
--0.091812 0.12767 0.038703
--0.091727 0.11657 0.039619
--0.093164 0.12721 0.025211
--0.093938 0.12067 0.024399
--0.091583 0.14522 0.01986
--0.090929 0.13667 0.019817
--0.093094 0.11635 0.018959
-0.024948 0.10286 0.041418
-0.0336 0.092627 0.040463
-0.02742 0.096386 0.043312
-0.03392 0.086911 0.041034
-0.028156 0.086837 0.045084
-0.03381 0.078604 0.040854
-0.028125 0.076874 0.045059
-0.0145 0.093279 0.05088
-0.0074817 0.09473 0.052315
-0.017407 0.10535 0.043139
-0.0079536 0.10633 0.042968
-0.018511 0.097194 0.047253
-0.0086436 0.099323 0.048079
--0.0020197 0.095698 0.053906
--0.011446 0.095169 0.053862
--0.001875 0.10691 0.043455
--0.011875 0.10688 0.043019
--0.0017622 0.10071 0.046648
--0.012498 0.10008 0.045916
-0.016381 0.085894 0.051642
-0.0081167 0.08691 0.055228
-0.017644 0.076955 0.052372
-0.008125 0.076853 0.055536
-0.020575 0.088169 0.049006
-0.022445 0.075721 0.049563
--0.0017931 0.086849 0.056843
--0.011943 0.086771 0.057009
--0.0019567 0.076863 0.057803
--0.011875 0.076964 0.057022
-0.03325 0.067541 0.040033
-0.028149 0.066829 0.042953
-0.026761 0.057829 0.042588
-0.023571 0.04746 0.040428
-0.015832 0.067418 0.051639
-0.0080431 0.066902 0.055006
-0.013984 0.058886 0.050416
-0.0080973 0.056888 0.05295
-0.020566 0.065958 0.0483
-0.018594 0.056539 0.047879
-0.012875 0.052652 0.049689
--0.0017852 0.066712 0.056503
--0.011785 0.066885 0.055015
--0.001875 0.056597 0.05441
--0.01184 0.057054 0.052714
--0.015688 0.052469 0.049615
-0.0066154 0.04993 0.051259
-0.018088 0.046655 0.043321
-0.008841 0.045437 0.046623
-0.017688 0.039719 0.043084
-0.008125 0.039516 0.045374
--0.0016111 0.049844 0.05172
--0.01245 0.046773 0.050903
--0.013851 0.039778 0.051036
--0.0020294 0.044874 0.047587
--0.011653 0.04686 0.048661
--0.0018611 0.039606 0.047339
--0.0091545 0.03958 0.049415
-0.043661 0.094028 0.02252
-0.034642 0.10473 0.031831
-0.028343 0.1072 0.036339
-0.036339 0.096552 0.034843
-0.031733 0.099372 0.038505
-0.036998 0.10668 0.026781
-0.032875 0.11108 0.02959
-0.040938 0.097132 0.026663
-0.044153 0.086466 0.024241
-0.05375 0.072221 0.020429
-0.04516 0.076574 0.023594
-0.038036 0.086663 0.035459
-0.037861 0.076625 0.035658
-0.042216 0.087237 0.028254
-0.042355 0.076747 0.02858
-0.043875 0.096228 0.015269
-0.044375 0.096797 0.0086445
-0.039545 0.1061 0.017655
-0.042313 0.10009 0.017237
-0.045406 0.087417 0.015604
-0.055118 0.072639 0.017944
-0.048722 0.07376 0.017434
-0.045917 0.086298 0.0094211
-0.019433 0.1096 0.039063
-0.01097 0.11058 0.039648
-0.046657 0.057153 0.031337
-0.056079 0.066335 0.024122
-0.048168 0.06701 0.026298
-0.056055 0.057253 0.024902
-0.051163 0.056662 0.029137
-0.036914 0.067032 0.036122
-0.033 0.06472 0.039903
-0.038004 0.056507 0.033119
-0.030629 0.054915 0.038484
-0.041875 0.066383 0.028357
-0.041434 0.06088 0.029632
-0.044921 0.049904 0.031243
-0.054635 0.050167 0.022044
-0.04828 0.04737 0.025845
-0.037973 0.048347 0.031456
-0.028053 0.047061 0.035991
-0.025595 0.040346 0.03415
-0.038455 0.043509 0.028278
-0.032031 0.043278 0.029253
-0.036581 0.040335 0.025144
-0.03019 0.039321 0.026847
-0.059333 0.067891 0.017361
-0.0465 0.071452 0.01971
-0.059562 0.057747 0.01834
-0.055636 0.049199 0.019173
-0.0505 0.045064 0.019181
-0.023 0.047803 0.039776
-0.022389 0.03886 0.038795
--0.019545 0.0939 0.052205
--0.021462 0.10618 0.042059
--0.031027 0.10395 0.041228
--0.022521 0.097723 0.045194
--0.031858 0.097026 0.043878
--0.043262 0.10412 0.040891
--0.052154 0.10404 0.040972
--0.041875 0.096944 0.042424
--0.051919 0.096967 0.043563
--0.021489 0.086672 0.054767
--0.027 0.083087 0.050284
--0.02107 0.077249 0.054365
--0.026011 0.089634 0.048981
--0.031893 0.087035 0.044169
--0.025625 0.074892 0.047102
--0.03197 0.0769 0.042177
--0.041824 0.086954 0.043295
--0.051825 0.086844 0.044933
--0.041918 0.076728 0.042564
--0.051849 0.076877 0.042992
--0.061339 0.10393 0.041164
--0.072672 0.10976 0.044294
--0.061784 0.096825 0.043327
--0.070058 0.096203 0.041397
--0.080439 0.11091 0.044343
--0.061927 0.086724 0.04452
--0.070344 0.087352 0.041908
--0.06141 0.077489 0.042178
--0.068579 0.080144 0.041024
--0.019045 0.067732 0.052388
--0.017742 0.058909 0.050809
--0.023548 0.066382 0.045226
--0.03399 0.067795 0.040929
--0.02169 0.056549 0.045164
--0.036111 0.060706 0.040407
--0.041231 0.066951 0.041392
--0.048588 0.070956 0.040357
--0.0403 0.059465 0.040446
--0.02192 0.044965 0.052258
--0.029187 0.043585 0.051088
--0.021919 0.039826 0.053521
--0.030331 0.039749 0.052133
--0.021998 0.049847 0.046725
--0.031911 0.046848 0.045187
--0.035276 0.039753 0.047529
--0.042016 0.044823 0.041594
--0.05194 0.044707 0.043498
--0.041928 0.039327 0.043582
--0.051857 0.039252 0.046212
--0.059453 0.04424 0.042862
--0.060765 0.039087 0.044363
--0.024273 0.11038 0.039129
--0.032379 0.10878 0.037952
--0.041152 0.10853 0.037969
--0.051698 0.10906 0.038258
--0.062091 0.10877 0.038274
--0.071655 0.10596 0.037516
--0.074634 0.097746 0.038347
--0.07912 0.10508 0.032308
--0.080203 0.096758 0.033592
--0.08378 0.10568 0.025985
--0.087292 0.10314 0.020825
--0.08521 0.097079 0.02781
--0.088082 0.096456 0.022985
--0.07516 0.08604 0.038816
--0.064577 0.073455 0.03897
--0.072279 0.076416 0.036413
--0.076375 0.072563 0.02873
--0.080031 0.087076 0.03429
--0.078919 0.079371 0.032477
--0.084834 0.086686 0.026974
--0.087891 0.089233 0.022611
--0.081048 0.077169 0.025829
--0.086393 0.10784 0.018635
--0.087672 0.10492 0.017264
--0.089333 0.098483 0.01761
--0.086375 0.083067 0.018607
--0.089179 0.089186 0.018947
--0.082879 0.076109 0.017794
--0.0825 0.074674 0.0071175
--0.026437 0.064141 0.039321
--0.030035 0.06613 0.038942
--0.026131 0.056531 0.038882
--0.031664 0.056657 0.037742
--0.045716 0.064541 0.039166
--0.051959 0.066869 0.036733
--0.042557 0.055545 0.039026
--0.049406 0.056892 0.034344
--0.0555 0.062391 0.029498
--0.05375 0.058574 0.026313
--0.03406 0.050137 0.038577
--0.041741 0.04959 0.03929
--0.050975 0.049435 0.036965
--0.053 0.051065 0.029209
--0.054145 0.054568 0.012257
--0.055848 0.05417 0.0083272
--0.054844 0.049295 0.011462
--0.05615 0.050619 0.0092929
--0.061451 0.068257 0.035376
--0.069725 0.069958 0.032788
--0.062823 0.063322 0.026886
--0.071037 0.066787 0.025228
--0.060857 0.060568 0.022643
--0.067 0.061558 0.020109
--0.0782 0.071279 0.021032
--0.062116 0.045145 0.037802
--0.065473 0.039513 0.037964
--0.06725 0.03742 0.033413
--0.072702 0.065008 0.018701
--0.06145 0.059165 0.018731
--0.0675 0.061479 0.019221
--0.057411 0.054114 0.0038257
--0.079222 0.070654 0.017735
--0.062473 0.04468 0.01111
--0.06725 0.042258 0.010414
--0.066389 0.040515 0.01316
--0.068359 0.038502 0.011958
--0.061381 0.04748 0.007607
--0.068559 0.043549 0.0081576
--0.070929 0.03983 0.0085888
--0.016625 0.18375 -0.019735
--0.015198 0.17471 -0.018868
--0.015944 0.16264 -0.0091037
--0.015977 0.1607 -0.0088072
--0.013251 0.16708 -0.015264
--0.014292 0.16098 -0.011252
--0.013986 0.184 -0.023739
--0.011633 0.17699 -0.023349
--0.0091029 0.16988 -0.021457
--0.025562 0.18273 -0.0096247
--0.02725 0.18254 -0.0094384
--0.025736 0.17948 -0.0089653
--0.031216 0.17589 -0.0051154
--0.020399 0.1845 -0.014943
--0.021339 0.17645 -0.014566
--0.027125 0.17234 -0.010156
--0.03939 0.1733 -0.0023575
--0.022876 0.16406 -0.0078103
--0.031597 0.16651 -0.0049292
--0.0226 0.15912 -0.003799
--0.030372 0.15767 -0.0012672
--0.021158 0.16849 -0.012383
--0.027 0.1712 -0.01022
--0.041719 0.16813 -0.00074958
--0.04825 0.16748 -0.00015191
--0.03725 0.16147 -7.2628e-05
--0.066429 0.15783 -0.0085673
--0.071284 0.15839 -0.005998
--0.065979 0.16288 -0.017792
--0.071623 0.16384 -0.01576
--0.066068 0.16051 -0.013567
--0.073307 0.16049 -0.011832
--0.077 0.16204 -0.019241
--0.077179 0.15851 -0.01495
--0.073691 0.17286 -0.037944
--0.07755 0.17221 -0.039175
--0.065921 0.16586 -0.025022
--0.072095 0.16784 -0.024725
--0.066 0.16808 -0.030916
--0.073448 0.17051 -0.032045
--0.07777 0.16434 -0.025938
--0.077893 0.16039 -0.021299
--0.078211 0.169 -0.034566
--0.034667 0.15131 -0.00071029
--0.066117 0.17353 -0.047453
--0.071986 0.17612 -0.045384
--0.06925 0.182 -0.055026
--0.064992 0.17802 -0.054645
--0.069935 0.17983 -0.051988
--0.07793 0.17516 -0.0444
+0.0054215998 0.11349 0.040748999 -0.16884723 -0.45159745 -0.87609947 0.0030943851
+-0.0017447 0.11425 0.041273002 -0.011541721 -0.46529692 -0.88507944 0.0056861709
+-0.010661 0.11338 0.040916 0.14614862 -0.496488 -0.85565197 0.0056441575
+0.026422 0.11499 0.032623 -0.52052444 -0.53684396 -0.66396749 0.0069778459
+0.024545001 0.12284 0.024255 -0.50764346 -0.74296999 -0.4362267 0.012873111
+0.034136999 0.11316 0.02507 -0.74675208 -0.59207439 -0.30300051 0.00816618
+0.028860001 0.11773 0.027037 -0.59785169 -0.67759848 -0.42829144 0.015595004
+0.02675 0.12234 0.017604999 -0.59158164 -0.74482262 -0.30865923 0.0096910615
+0.035750002 0.1123 0.019109 -0.79553264 -0.56393856 -0.22158764 0.0077297003
+0.015982 0.12307 0.031279001 -0.31086302 -0.74870217 -0.58549905 0.010855215
+0.0079813004 0.12438 0.032798 -0.15485749 -0.70128393 -0.69585913 0.0058102743
+0.018100999 0.11674 0.035493001 -0.32298067 -0.56194073 -0.76151562 0.0089428844
+0.0086687002 0.11758 0.037537999 -0.17445859 -0.523031 -0.83426785 0.0039783465
+0.01808 0.12536 0.026132001 -0.40127173 -0.79735625 -0.45078158 0.011859259
+0.0080861002 0.12865999 0.02619 -0.14846934 -0.86061466 -0.48713383 0.01193497
+0.02275 0.12146 0.029671 -0.50815374 -0.73633546 -0.44675478 0.013027777
+-0.0018689 0.12456 0.033183999 0.021843739 -0.73292792 -0.6799556 0.0062952945
+-0.011168 0.12376 0.032519002 0.21974316 -0.7375769 -0.63850862 0.0058935038
+-0.0020063 0.11937 0.038104001 0.0033577818 -0.55838519 -0.82957506 0.0055112438
+-0.01232 0.11816 0.037427001 0.18408856 -0.58010685 -0.79346544 0.0045971852
+-0.0016659 0.12879001 0.026782 0.060926694 -0.89214551 -0.44762078 0.0071681938
+-0.011971 0.12723 0.026218999 0.25322157 -0.84287763 -0.474801 0.004851752
+0.016484 0.12828 0.01928 -0.30896688 -0.90670973 -0.28708377 0.0081693754
+0.0070921001 0.13102999 0.018415 -0.18857701 -0.8954969 -0.40314254 0.0061773811
+0.0014615 0.13134 0.017095 -0.030506011 -0.93418866 -0.35547286 0.0078295255
+-0.013821 0.12886 0.019265 0.20082667 -0.90866685 -0.36605063 0.0041274121
+-0.01725 0.11202 0.040077001 0.17915623 -0.52085388 -0.83463424 0.0074276463
+-0.074556001 0.13415 0.051045999 -0.13361748 -0.37986907 -0.91533923 0.053038858
+-0.065971002 0.14396 0.04109 -0.52148068 -0.439255 -0.73151416 0.026551403
+-0.071924999 0.14545 0.043265998 -0.10976201 -0.60856271 -0.78587765 0.030716548
+-0.065509997 0.13624001 0.042195 -0.64690953 -0.27790511 -0.71012449 0.0061066193
+-0.071111999 0.13767 0.047518 -0.23028101 -0.50395799 -0.8324644 0.024193831
+-0.079527996 0.13416 0.051194001 0.20600085 -0.40636119 -0.89018774 0.042999495
+-0.080421001 0.14428 0.042792998 0.39670554 -0.63132799 -0.66637051 0.020071341
+-0.082672 0.13779999 0.046806 0.49222195 -0.44529268 -0.74795187 0.027716169
+-0.088129997 0.13514 0.042222001 0.71588868 -0.35794571 -0.59948176 0.0061506582
+-0.066325001 0.12347 0.050728999 -0.44702551 0.026569208 -0.89412653 0.049122065
+-0.072398998 0.12661999 0.052363999 -0.15109582 -0.14565089 -0.97772992 0.012987013
+-0.066091001 0.11973 0.050880998 0.49363372 -0.27387455 0.82542014 0.063380361
+-0.072012 0.11811 0.052294999 0.089953512 -0.40322584 0.91066861 0.043889515
+-0.062433001 0.12627 0.043830998 0.7461043 0.10399755 0.65765715 0.025209237
+-0.068325996 0.12998 0.048875 -0.43632671 -0.33852753 -0.83367741 0.025434997
+-0.063093998 0.11811 0.044399001 0.73735356 -0.31851915 0.59569734 0.030531738
+-0.071300998 0.11322 0.048409998 0.16830848 -0.65331757 0.7381385 0.032480057
+-0.080514997 0.12740999 0.052034002 0.38682958 -0.01568961 -0.92201769 0.017833892
+-0.078179002 0.1191 0.051116001 0.15518065 0.31381333 -0.93671775 0.021380307
+-0.085216001 0.12609001 0.049001001 0.66413164 -0.060075819 -0.74519807 0.016970368
+-0.089538001 0.12621 0.044589002 0.75927979 -0.1181355 -0.63995177 0.015361789
+-0.082658999 0.11661 0.047970001 0.4467158 0.38261694 -0.80873305 0.015345791
+-0.089535996 0.11784 0.044569999 0.67966002 0.12636337 -0.72256106 0.032478429
+-0.056499999 0.15248001 0.030131999 -0.33762839 -0.71236515 -0.61525851 0.0429281
+-0.055516999 0.15312999 0.026915001 -0.83237761 -0.38691509 -0.39679241 0.023426346
+-0.036249999 0.17197999 0.00017688 -0.29537779 -0.14131801 -0.94487101 0.06837023
+-0.037749998 0.17197999 0.00022189 -0.29537809 -0.14131688 -0.94487107 0.068370357
+-0.036249999 0.16935 0.00051957997 -0.29537809 -0.14131688 -0.94487107 0.068370357
+-0.033176001 0.15711001 0.0018682 -0.43437883 -0.10727591 -0.89431924 0.027991556
+-0.051913001 0.15449999 0.011273 -0.66571194 -0.53041917 -0.52486485 0.10940588
+-0.041707002 0.16642 0.0030522 -0.088684477 -0.32659593 -0.9409942 0.04067177
+-0.049467999 0.16414 0.0041987998 -0.19820534 -0.51164192 -0.83602464 0.018989481
+-0.041892 0.15669 0.0054879002 -0.46768439 -0.15497798 -0.8702029 0.021614425
+-0.051224001 0.15877999 0.0080282995 -0.36611214 -0.40681309 -0.83693784 0.031957328
+-0.062417001 0.15317 0.033160999 -0.34849492 -0.73624218 -0.58008516 0.044005789
+-0.071670003 0.15319 0.033700999 0.037774783 -0.84826654 -0.52822059 0.019414406
+-0.062542997 0.15524 0.027404999 -0.34841642 -0.73644722 -0.57987207 0.043948863
+-0.072109997 0.15549999 0.027644999 0.084375083 -0.85251749 -0.51584369 0.023248179
+-0.078662999 0.15268999 0.032267999 0.36376482 -0.81080192 -0.4585579 0.0071089235
+-0.081569001 0.15374 0.026085 0.41848749 -0.81323707 -0.40436816 0.011017915
+-0.087250002 0.1523 0.022135001 0.79803133 -0.46404663 -0.38445634 0.034933198
+-0.057250001 0.15568 0.010325 -0.66573882 -0.53037977 -0.52487051 0.10940783
+-0.057888001 0.1575 0.0073225 -0.36614218 -0.40679416 -0.83693397 0.031956732
+-0.088500001 0.15222999 0.019215001 0.8514818 -0.36151975 -0.37984514 0.047997151
+-0.056129001 0.14616001 0.030850001 -0.71406651 -0.25552902 -0.65177757 0.026650187
+-0.054705001 0.13555001 0.032127 -0.75563037 -0.25465611 -0.6034674 0.0062280181
+-0.054143999 0.14714 0.026275 -0.83619982 -0.33200502 -0.43651178 0.027516434
+-0.046624999 0.13234 0.021909 -0.77167147 -0.45983508 -0.43940294 0.015568241
+-0.05139 0.13694 0.025787 -0.82502484 -0.29802176 -0.48012191 0.016481433
+-0.018278001 0.12238 0.030773001 0.2189856 -0.77467448 -0.59323251 0.011681316
+-0.021655999 0.11643 0.035209 0.25612333 -0.60931838 -0.75042123 0.0029822814
+-0.031920999 0.11566 0.032850999 0.089117572 -0.69171697 -0.71664894 0.007182973
+-0.021348 0.12421 0.024561999 0.21852617 -0.88355941 -0.41420898 0.0038163862
+-0.03241 0.12349 0.023293 -0.0098979417 -0.83467448 -0.55065459 0.0082057444
+-0.024869001 0.12094 0.028744999 0.23928009 -0.82670623 -0.50921685 0.0083035426
+-0.031746998 0.12039 0.028229 0.033354413 -0.81189781 -0.58284593 0.0085460329
+-0.052912001 0.12685999 0.034968 -0.6821875 -0.30016235 -0.6667254 0.010716889
+-0.041671999 0.11564 0.032997999 -0.055118669 -0.64711553 -0.7603969 0.0040871259
+-0.052037001 0.1168 0.034582 -0.21478797 -0.3984302 -0.89169478 0.035277426
+-0.042495001 0.12488 0.024081999 -0.28772661 -0.71870273 -0.63299263 0.036527757
+-0.047945999 0.12736 0.028108001 -0.72714043 -0.35215625 -0.58928162 0.024528533
+-0.042420998 0.12035 0.028633 -0.28699586 -0.60819656 -0.74008805 0.046943106
+-0.047660999 0.12024 0.028871 -0.63170999 -0.40763944 -0.65937287 0.035298273
+-0.035964001 0.1513 0.00053949998 0.42597631 0.069760799 0.90204078 0.013660887
+-0.050597999 0.14740001 0.013881 -0.79792613 -0.50649345 -0.3267695 0.056300875
+-0.046374999 0.13293 0.018289 -0.64400738 -0.55972904 -0.52149576 0.027122065
+-0.049125001 0.13856 0.016269 -0.82856554 -0.35174406 -0.4356091 0.011342637
+-0.042975999 0.14915 0.0054003 -0.53052694 -0.1433237 -0.83546358 0.019582529
+-0.047965001 0.14658999 0.0086783003 -0.43282154 -0.42147359 -0.79688495 0.028641256
+-0.022926001 0.12630001 0.018076999 0.21852911 -0.8835566 -0.41421345 0.0038136169
+-0.031583 0.1259 0.017804001 -0.084133193 -0.80922121 -0.58144879 0.018533347
+-0.041733 0.12796 0.016650001 -0.36518443 -0.63293225 -0.68266904 0.027313044
+-0.061482001 0.14698 0.036168002 -0.50408536 -0.44549564 -0.73988628 0.039870977
+-0.071728997 0.15026 0.038327999 -0.015496265 -0.7730304 -0.63417977 0.01780135
+-0.060525998 0.13680001 0.035999 -0.71306747 -0.24283816 -0.65769631 0.0063271122
+-0.082618997 0.14823 0.035955001 0.47669819 -0.71192718 -0.51567286 0.013532816
+-0.087824002 0.14449 0.033778999 0.78517169 -0.52193189 -0.33330569 0.024797359
+-0.089000002 0.13828 0.037774 0.84258002 -0.39418849 -0.36697993 0.014366952
+-0.085662 0.15095 0.028208001 0.5251044 -0.78348589 -0.33228782 0.025516272
+-0.089601003 0.14725 0.025869001 0.89614761 -0.32351896 -0.30373508 0.034004856
+-0.090681002 0.13748001 0.02369 0.95984846 -0.22861007 -0.16256762 0.007631606
+-0.058722001 0.12924001 0.038991999 -0.70580089 -0.12846732 -0.69666433 0.016850151
+-0.060075 0.11512 0.037684999 -0.49193445 -0.058857333 -0.86864048 0.040929805
+-0.091812 0.12767 0.038702998 0.93956327 -0.17970741 -0.2914207 0.014043191
+-0.091727003 0.11657 0.039618999 0.86413795 0.33086988 -0.37919742 0.048940618
+-0.093163997 0.12721001 0.025211001 0.98552257 -0.12056172 -0.11920663 0.0043064994
+-0.093938001 0.12067 0.024398999 0.98427206 0.16453789 0.064309932 0.029838379
+-0.091582999 0.14522 0.019859999 0.89614254 -0.32352197 -0.30374691 0.034012903
+-0.090929002 0.13666999 0.019817 0.95805603 -0.20726846 -0.1979098 0.0057622446
+-0.093093999 0.11635 0.018959001 0.98243999 0.18117557 0.044576686 0.032072492
+0.024948001 0.10286 0.041418001 -0.50217587 -0.45149371 -0.73754513 0.0049769632
+0.033599999 0.092626996 0.040463001 -0.66844445 -0.23813689 -0.70460826 0.0084011387
+0.027419999 0.096386001 0.043311998 -0.52449149 -0.31101868 -0.79257548 0.0042440598
+0.033920001 0.086911 0.041034002 -0.68985844 -0.097785927 -0.71730965 0.0065412661
+0.028155999 0.086837001 0.045084 -0.5732041 -0.084137343 -0.81508148 0.0041391035
+0.033810001 0.078603998 0.040854 -0.6749531 0.049827036 -0.73617643 0.0027250601
+0.028124999 0.076874003 0.045058999 -0.61608005 0.092481732 -0.78223556 0.0023465508
+0.0145 0.093278997 0.05088 -0.37729472 -0.35353231 -0.85595769 0.0052667796
+0.0074816998 0.094729997 0.052315 -0.18586323 -0.50761348 -0.8412987 0.012646539
+0.017407 0.10535 0.043138999 -0.24998525 -0.50104308 -0.82853079 0.0050569442
+0.0079536 0.10633 0.042968001 -0.12115002 -0.45767561 -0.88082665 0.005781922
+0.018510999 0.097194001 0.047253001 -0.37112617 -0.34566584 -0.8618471 0.0054108021
+0.0086436002 0.099322997 0.048078999 -0.11644173 -0.59730524 -0.7935161 0.0055130087
+-0.0020196999 0.095697999 0.053906001 -0.024833232 -0.57589561 -0.81714606 0.012671551
+-0.011446 0.095169 0.053862002 0.13523138 -0.59376568 -0.7931928 0.014543219
+-0.001875 0.10691 0.043455001 0.026278881 -0.39993814 -0.91616535 0.002705948
+-0.011875 0.10688 0.043019 0.1192934 -0.39521939 -0.91080779 0.0015582
+-0.0017622 0.10071 0.046647999 -0.0069597415 -0.58737713 -0.80928338 0.015852297
+-0.012498 0.10008 0.045915999 0.11381002 -0.60800987 -0.78572983 0.015490185
+0.016380999 0.085894004 0.051642001 -0.40546137 -0.17009516 -0.89814734 0.0030322804
+0.0081166998 0.086910002 0.055227999 -0.29560381 -0.25577238 -0.92043406 0.0076185586
+0.017643999 0.076954998 0.052372001 -0.44164523 0.016793245 -0.89703256 0.0038809509
+0.0081249997 0.076853 0.055535998 -0.28453448 0.014439251 -0.95855707 0.0030511376
+0.020575 0.088169001 0.049006 -0.48251823 -0.14953297 -0.86302727 0.0021532676
+0.022445001 0.075721003 0.049563002 -0.54274106 0.042393196 -0.83882946 0.0043320926
+-0.0017931 0.086848997 0.056843001 -0.085129447 -0.29672778 -0.95116013 0.021261316
+-0.011943 0.086770996 0.057009 0.19515742 -0.19225521 -0.96174401 0.012458436
+-0.0019567001 0.076862998 0.057803001 -0.055512048 -0.015349915 -0.99833995 0.007448623
+-0.011875 0.076963998 0.057022002 0.2755622 0.10759915 -0.95524234 0.020187844
+0.03325 0.067541003 0.040033001 -0.64387792 0.13396259 -0.75330955 0.0024370786
+0.028148999 0.066829003 0.042952999 -0.59509897 0.15709978 -0.78814769 0.001263492
+0.026760999 0.057829 0.042587999 -0.56684518 0.27811375 -0.77546078 0.0032190143
+0.023571 0.047460001 0.040428001 -0.60811114 0.31426507 -0.72899812 0.0063965297
+0.015831999 0.067418002 0.051639002 -0.46439239 0.15635081 -0.87171906 0.0033340643
+0.0080431001 0.066901997 0.055006001 -0.29502016 0.15021715 -0.943609 0.0037481887
+0.013984 0.058885999 0.050416 -0.44073677 0.28497621 -0.85119891 0.0053655636
+0.0080973003 0.056887999 0.052949999 -0.29052511 0.28976738 -0.91193748 0.0064742533
+0.020566 0.065958001 0.048300002 -0.51491582 0.17084281 -0.84004426 0.0017491818
+0.018594 0.056538999 0.047878999 -0.51175618 0.28225034 -0.81144345 0.0029353874
+0.012875 0.052652001 0.049688999 -0.38840196 0.3981083 -0.83105576 0.0085874954
+-0.0017852 0.066711999 0.056503002 -0.020316985 0.17890212 -0.98365712 0.004140513
+-0.011785 0.066885002 0.055015001 0.32926905 0.21563224 -0.91928482 0.01968986
+-0.001875 0.056597002 0.054409999 -0.0052823606 0.27603641 -0.96113271 0.0068042353
+-0.01184 0.057054002 0.052714001 0.32840732 0.26231068 -0.90738177 0.016386257
+-0.015688 0.052469 0.049614999 0.24083789 0.01177044 -0.97049403 0.059673704
+0.0066153998 0.049929999 0.051259 -0.19795869 0.38478413 -0.90152842 0.0084427586
+0.018088 0.046654999 0.043320999 -0.48477679 0.32723331 -0.8111164 0.012247019
+0.0088409996 0.045437001 0.046622999 -0.21786378 0.42153558 -0.88025171 0.010199225
+0.017688001 0.039719 0.043083999 -0.50713331 0.31902084 -0.80065072 0.014214325
+0.0081249997 0.039515998 0.045373999 -0.24138522 0.43614018 -0.86689961 0.0099169947
+-0.0016111 0.049844 0.051720001 -0.038185336 0.39012834 -0.91996837 0.010735004
+-0.01245 0.046773002 0.050903 0.0094410116 0.079695806 -0.99677449 0.047492072
+-0.013851 0.039778002 0.051036 -0.20715107 -0.25438344 -0.94465739 0.025480686
+-0.0020294001 0.044874001 0.047587 -0.14519539 0.31627905 -0.93748909 0.018671323
+-0.011653 0.046859998 0.048661001 0.0094404668 0.079705797 -0.99677372 0.047494438
+-0.0018611 0.039606001 0.047339 -0.14519632 0.31627876 -0.93748909 0.018673668
+-0.0091545004 0.039579999 0.049415 -0.18494596 0.016573779 -0.98260897 0.025298525
+0.043660998 0.094028004 0.02252 -0.92041755 -0.28093216 -0.27186149 0.0066030738
+0.034642 0.10473 0.031831 -0.69015884 -0.40015957 -0.60295367 0.0044354568
+0.028343 0.1072 0.036339 -0.52822232 -0.47807541 -0.70173007 0.0067754826
+0.036339 0.096551999 0.034843002 -0.7790432 -0.26857656 -0.56653172 0.0041775848
+0.031732999 0.099372 0.038504999 -0.65142018 -0.3041302 -0.69509459 0.0061181071
+0.036998 0.10668 0.026781 -0.8241421 -0.39830247 -0.40267223 0.0091458764
+0.032875001 0.11108 0.029589999 -0.74014568 -0.52058494 -0.42564726 0.011109083
+0.040938001 0.097131997 0.026663 -0.88692129 -0.31068045 -0.34183076 0.0038380553
+0.044153001 0.086465999 0.024241 -0.95414311 -0.11537059 -0.27622551 0.0076777046
+0.053750001 0.072221003 0.020429 -0.51798862 -0.50113165 -0.69322062 0.06503357
+0.045159999 0.076573998 0.023593999 -0.69614828 -0.084045157 -0.71296144 0.044977665
+0.038036 0.086663 0.035459001 -0.81002659 -0.061518699 -0.58315742 0.0046865996
+0.037861001 0.076624997 0.035657998 -0.77414429 0.054391067 -0.63066804 0.0025961238
+0.042215999 0.087237 0.028254 -0.89833516 -0.10155091 -0.42741233 0.0056578321
+0.042355001 0.076747 0.028580001 -0.8671717 -0.077556886 -0.49193311 0.013927608
+0.043875001 0.096228004 0.015269 -0.95895141 -0.22659501 -0.1704898 0.0043888544
+0.044374999 0.096796997 0.0086444998 -0.94115782 -0.30727333 -0.14073028 0.0059855459
+0.039545 0.1061 0.017655 -0.88866729 -0.41421592 -0.19671176 0.0021756196
+0.042312998 0.10009 0.017237 -0.92437547 -0.33019742 -0.19104898 0.0041756001
+0.045405999 0.087416999 0.015604 -0.96485788 -0.19427004 -0.17694214 0.0036036982
+0.055117998 0.072639003 0.017944001 -0.56342161 -0.42417717 -0.70896399 0.053529784
+0.048721999 0.073760003 0.017433999 -0.5870927 -0.5095039 -0.62906915 0.043783061
+0.045917001 0.086297996 0.0094210999 -0.97563714 -0.16882955 -0.14010231 0.0032495963
+0.019432999 0.1096 0.039062999 -0.25648993 -0.49896565 -0.82779592 0.0047740079
+0.01097 0.11058 0.039648 -0.14828649 -0.48850796 -0.85986692 0.004848307
+0.046657 0.057153001 0.031337 -0.35205206 -0.072589144 -0.93316144 0.031428069
+0.056079 0.066335 0.024122 -0.52020526 -0.44320381 -0.730039 0.049573082
+0.048168 0.06701 0.026298 -0.2865766 -0.49326757 -0.82131654 0.032226685
+0.056054998 0.057252999 0.024901999 -0.70871705 0.068796292 -0.7021305 0.029180726
+0.051162999 0.056662001 0.029137 -0.57538748 0.071726911 -0.81472969 0.043009475
+0.036913998 0.067032002 0.036122002 -0.7568534 0.066869825 -0.65015489 0.0050594709
+0.033 0.064719997 0.039903 -0.668118 0.14268312 -0.73024642 0.0029397116
+0.038004 0.056506999 0.033119 -0.49821573 0.01963944 -0.86683065 0.042904783
+0.030629 0.054915 0.038484 -0.526851 0.27108574 -0.80556852 0.0028973371
+0.041875001 0.066382997 0.028356999 -0.78373605 -0.10993145 -0.61128795 0.036790907
+0.041434001 0.060880002 0.029632 -0.53708231 -0.022425119 -0.84323174 0.045972284
+0.044921 0.049904 0.031243 -0.3528904 0.34862784 -0.86828965 0.037123516
+0.054634999 0.050167002 0.022043999 -0.70719039 0.31820387 -0.63136995 0.016325338
+0.048280001 0.047370002 0.025845001 -0.52652866 0.51785374 -0.67423666 0.0098816361
+0.037973002 0.048347 0.031456001 -0.36187053 0.47896025 -0.7997793 0.015337758
+0.028053001 0.047061 0.035990998 -0.57399821 0.45164311 -0.68304068 0.016997997
+0.025595 0.040346 0.034150001 -0.68097848 0.28570601 -0.67427027 0.0093491748
+0.038454998 0.043508999 0.028278001 -0.37806544 0.51105171 -0.77194083 0.022927471
+0.032031 0.043278001 0.029253 -0.53197962 0.50747716 -0.67783815 0.019725965
+0.036580998 0.040335 0.025144 -0.39942452 0.69131386 -0.60211724 0.007815362
+0.03019 0.039321002 0.026846999 -0.53198594 0.50746638 -0.67784125 0.019722477
+0.059333 0.067891002 0.017361 -0.56344873 -0.42420113 -0.70892805 0.053521059
+0.046500001 0.071451999 0.019710001 -0.51798266 -0.50117189 -0.693196 0.065022051
+0.059562001 0.057746999 0.018340001 -0.88730741 0.07214395 -0.45550054 0.044594601
+0.055636 0.049199 0.019173 -0.70719075 0.31820732 -0.6313678 0.016323969
+0.050500002 0.045063999 0.019181 -0.58164734 0.58193195 -0.56836736 0.014211843
+0.023 0.047803 0.039776001 -0.60811114 0.31426507 -0.72899812 0.0063965297
+0.022389 0.038860001 0.038795002 -0.59490627 0.43003449 -0.67908525 0.018004857
+-0.019545 0.093900003 0.052205 0.34927082 -0.48287472 -0.80302054 0.018088751
+-0.021462001 0.10618 0.042059001 0.18082748 -0.437738 -0.88073081 0.0027749946
+-0.031027 0.10395 0.041228 0.11291887 -0.47436607 -0.87305558 0.0067759664
+-0.022521 0.097723 0.045194 0.2705847 -0.41650814 -0.86793143 0.023760239
+-0.031858001 0.097025998 0.043878 0.17280744 -0.2860494 -0.94250375 0.014363435
+-0.043262001 0.10412 0.040890999 0.00073640002 -0.45958629 -0.88813281 0.0045937961
+-0.052154001 0.10404 0.040972002 -0.03633894 -0.34159034 -0.93914616 0.00271941
+-0.041875001 0.096943997 0.042424001 -0.0044234376 -0.22387859 -0.97460699 0.0070257671
+-0.051918998 0.096966997 0.043563001 -0.04023103 -0.22149211 -0.97433192 0.0042004804
+-0.021489 0.086672001 0.054767001 0.50618094 -0.16235481 -0.84700757 0.034249049
+-0.027000001 0.083086997 0.050283998 0.68004489 -0.089362115 -0.72770423 0.030443884
+-0.02107 0.077248998 0.054365002 0.53606218 0.17085041 -0.82670885 0.018211767
+-0.026010999 0.089634001 0.048981 0.68004316 -0.089353099 -0.72770685 0.030443544
+-0.031893 0.087035 0.044169001 0.2828007 -0.035433222 -0.95852399 0.027427558
+-0.025625 0.074891999 0.047102001 0.69481319 0.2732133 -0.66527379 0.01750415
+-0.031970002 0.076899998 0.042176999 0.29757643 0.22898653 -0.92682981 0.031569399
+-0.041824002 0.086953998 0.043295 0.106451 0.0045626885 -0.99430746 0.028152194
+-0.051825002 0.086843997 0.044932999 -0.038542189 0.067814387 -0.99695325 0.006826716
+-0.041917998 0.076728001 0.042564001 -0.0079677086 0.18697172 -0.98233294 0.0034264468
+-0.051849 0.076876998 0.042992 0.038096964 0.28818578 -0.95681643 0.0087176375
+-0.061338998 0.10393 0.041164 -0.029402535 -0.25662446 -0.9660638 0.036180034
+-0.072672002 0.10976 0.044294 0.084962435 -0.63697439 0.76618856 0.040219717
+-0.061783999 0.096825004 0.043327 0.16092302 -0.22269997 -0.96151358 0.0058681369
+-0.070058003 0.096202999 0.041397002 0.4036282 -0.0080862371 -0.91488731 0.038544029
+-0.080439001 0.11091 0.044342998 -0.31796536 -0.75363255 0.57527047 0.0096438928
+-0.061926998 0.086723998 0.044520002 0.17248975 0.045638345 -0.98395348 0.0073326589
+-0.070344001 0.087352 0.041908 0.42582259 0.091916025 -0.90012586 0.01168427
+-0.061409999 0.077489004 0.042178001 0.19923167 0.37411079 -0.90573061 0.0082584694
+-0.068579003 0.080144003 0.041023999 0.41895124 0.42133331 -0.80433702 0.0084476769
+-0.019045001 0.067731999 0.052388001 0.65199536 0.23585072 -0.72060847 0.018073827
+-0.017742001 0.058908999 0.050809 0.55176026 0.18901981 -0.8123005 0.01541355
+-0.023548 0.066381998 0.045226 0.69483125 0.28809536 -0.65894651 0.018995659
+-0.033989999 0.067795001 0.040929001 0.05322047 0.19476742 -0.97940451 0.032201033
+-0.02169 0.056549001 0.045164 0.69635779 0.11462907 -0.70848149 0.021707475
+-0.036111001 0.060706001 0.040406998 -0.061588861 0.12001347 -0.99085999 0.010013676
+-0.041230999 0.066950999 0.041391999 0.10316385 0.13051817 -0.98606396 0.020321144
+-0.048588 0.070955999 0.040357001 0.18421111 0.32899684 -0.92618972 0.015567646
+-0.0403 0.059464999 0.040445998 0.16084772 0.15765952 -0.97430557 0.030772509
+-0.021919999 0.044964999 0.052258 0.1091383 -0.44714984 -0.88777572 0.033779215
+-0.029186999 0.043584999 0.051088002 0.29156253 -0.59619606 -0.74802518 0.039049324
+-0.021919001 0.039825998 0.053521 -0.075991563 -0.25113225 -0.96496522 0.025803825
+-0.030331001 0.039749 0.052133001 0.30103204 -0.48792616 -0.81933367 0.032826744
+-0.021997999 0.049846999 0.046725001 0.29242998 -0.44415969 -0.84688061 0.077966951
+-0.031911001 0.046847999 0.045187 0.36673963 -0.60464549 -0.70704025 0.0094668921
+-0.035275999 0.039753001 0.047529001 0.39517462 -0.55422795 -0.73257649 0.01399758
+-0.042016 0.044822998 0.041593999 0.10855728 -0.51931661 -0.84765881 0.029278623
+-0.051940002 0.044707 0.043497998 0.082651727 -0.48762557 -0.86913168 0.025466334
+-0.041928001 0.039326999 0.043582 0.15547548 -0.66415334 -0.73125088 0.02137457
+-0.051856998 0.039252002 0.046211999 0.14657496 -0.59871596 -0.78743577 0.031854171
+-0.059452999 0.044240002 0.042862002 0.35994217 -0.78289491 -0.50746149 0.050245311
+-0.060764998 0.039087001 0.044362999 0.27816954 -0.61602265 -0.73697889 0.070127271
+-0.024273001 0.11038 0.039129 0.21418424 -0.47806239 -0.85181069 0.0033514106
+-0.032379001 0.10878 0.037951998 0.10414454 -0.5102818 -0.85367811 0.00511677
+-0.041152 0.10853 0.037969001 -0.038321562 -0.47359723 -0.87990749 0.0054788273
+-0.051697999 0.10906 0.038258001 -0.077510841 -0.42872471 -0.90010399 0.0054551302
+-0.062091 0.10877 0.038274001 -0.19225384 -0.14292401 -0.97088164 0.072213292
+-0.071654998 0.10596 0.037516002 -0.12120762 -0.54344511 0.83064801 0.11271713
+-0.074634001 0.097746 0.038346998 0.57305831 -0.0035732505 -0.81950676 0.033460639
+-0.079120003 0.10508 0.032308001 0.7911132 0.17815721 -0.58514953 0.055837024
+-0.080202997 0.096758001 0.033592001 0.70376015 -0.11795598 -0.7005769 0.0059455447
+-0.083779998 0.10568 0.025985001 0.84119576 0.021250656 -0.54031295 0.033550162
+-0.087292001 0.10314 0.020825 0.87406546 -0.16717812 -0.45613703 0.0070871506
+-0.085210003 0.097079001 0.02781 0.84734458 -0.091733746 -0.52306026 0.0057304609
+-0.088082001 0.096455999 0.022985 0.90336299 -0.11470471 -0.41325316 0.0070019392
+-0.075159997 0.086039998 0.038816001 0.56156623 0.1044868 -0.82080805 0.0091117797
+-0.064576998 0.073454998 0.038970001 0.26235282 0.46205065 -0.8471601 0.0077266204
+-0.072278999 0.076416001 0.036412999 0.53422672 0.41966563 -0.73381364 0.0055706957
+-0.076375 0.072563 0.028729999 0.67109746 0.6318242 -0.38784823 0.0044638016
+-0.080031 0.087076001 0.034290001 0.73211843 0.15737711 -0.6627481 0.010024117
+-0.078919001 0.079370998 0.032476999 0.69276303 0.45652202 -0.55827153 0.0087372195
+-0.084834002 0.086686 0.026974 0.83963245 0.18885627 -0.50926477 0.0076140678
+-0.087890998 0.089233004 0.022611 0.8909108 0.13929515 -0.43229026 0.012848299
+-0.081047997 0.077169001 0.025829 0.78641486 0.46373811 -0.40804234 0.0062469114
+-0.086392999 0.10784 0.018634999 0.82173651 0.22532307 -0.52342957 0.049227774
+-0.087672003 0.10492 0.017263999 0.86681664 0.0054754531 -0.498597 0.033453744
+-0.089332998 0.098483004 0.01761 0.90338933 -0.11469468 -0.41319811 0.0069744457
+-0.086374998 0.083067 0.018607 0.89179856 0.414648 -0.18100342 0.0088002766
+-0.089179002 0.089185998 0.018947 0.97110236 0.13235515 -0.19860105 0.021748984
+-0.082878999 0.076109 0.017794 0.84889227 0.48539269 -0.20922697 0.0035174063
+-0.082500003 0.074674003 0.0071175001 0.82750958 0.52928513 -0.18731017 0.0049080662
+-0.026436999 0.064140998 0.039321002 0.43814281 0.22409004 -0.87052548 0.040726949
+-0.030035 0.066129997 0.038942002 0.042627886 0.20684171 -0.97744536 0.036731411
+-0.026131 0.056531001 0.038881999 0.46868646 -0.18745917 -0.86324513 0.057940684
+-0.031663999 0.056657001 0.037742 -0.011200796 -0.10971908 -0.99389958 0.049078658
+-0.045715999 0.064540997 0.039166 0.27066356 0.20376664 -0.94086152 0.014609119
+-0.051959001 0.066868998 0.036733001 0.45068917 0.51399606 -0.72985429 0.025667811
+-0.042557001 0.055544998 0.039026 0.1486076 -0.041601405 -0.98802084 0.041527726
+-0.049405999 0.056892 0.034343999 0.6727168 0.11576855 -0.73078698 0.042428575
+-0.055500001 0.062391002 0.029498 0.59951371 0.53357315 -0.59655923 0.043033648
+-0.053750001 0.058573999 0.026312999 0.83461094 0.44335803 -0.32689163 0.075001374
+-0.034060001 0.050136998 0.038577002 0.062156912 -0.34150508 -0.9378224 0.049006518
+-0.041740999 0.049589999 0.03929 0.18698144 -0.34636065 -0.9192782 0.045123689
+-0.050974999 0.049435001 0.036965001 0.24271692 -0.67756188 -0.69426107 0.051133763
+-0.052999999 0.051065002 0.029208999 0.3495166 -0.61714137 -0.70496428 0.042339273
+-0.054145001 0.054568 0.012257 0.73811674 -0.5837099 0.33832872 0.10117272
+-0.055847999 0.054170001 0.0083272001 0.55382234 -0.62194115 0.55359733 0.074788764
+-0.054843999 0.049295001 0.011462 0.71343958 -0.5019455 0.48893222 0.076894961
+-0.056150001 0.050618999 0.0092928996 0.58453882 -0.60774499 0.53755039 0.087734967
+-0.061450999 0.068256997 0.035376001 -0.29371658 -0.70654899 0.64383167 0.0069489204
+-0.069724999 0.069958001 0.032788001 0.51250762 0.6654157 -0.54273182 0.0082743121
+-0.062822998 0.063322 0.026885999 -0.36954576 -0.80908924 0.45695794 0.0015373469
+-0.071037002 0.066786997 0.025227999 -0.53203923 -0.76704764 0.35856968 0.0046437387
+-0.060857002 0.060568001 0.022643 -0.36452094 -0.86008775 0.3568942 0.0045510111
+-0.067000002 0.061558001 0.020109 -0.45134526 -0.75177139 0.48075694 0.0056222733
+-0.078199998 0.071278997 0.021032 0.72853959 0.59948909 -0.33142552 0.0062416438
+-0.062116001 0.045145001 0.037802 0.53365648 -0.68499517 -0.49597609 0.040642425
+-0.065472998 0.039512999 0.037964001 0.53365141 -0.68499339 -0.49598423 0.04063797
+-0.067249998 0.037420001 0.033413 0.58283192 -0.78510177 -0.20957632 0.040994368
+-0.072701998 0.065008 0.018701 -0.52006978 -0.77749521 0.35359377 0.0054407413
+-0.061450001 0.059165001 0.018731 -0.34448564 -0.88180333 0.32210648 0.0041377745
+-0.067500003 0.061478999 0.019221 -0.49395499 -0.764835 0.41356477 0.0061281542
+-0.057411 0.054113999 0.0038256999 0.58451724 -0.60774255 0.53757656 0.087737001
+-0.079222001 0.070653997 0.017735001 0.73846012 0.65186685 -0.17247072 0.0068075666
+-0.062472999 0.044679999 0.01111 0.46874011 -0.60717469 -0.64157742 0.005866745
+-0.067249998 0.042257998 0.010414 0.46878213 -0.60720211 -0.64152074 0.0058589075
+-0.066389002 0.040514998 0.01316 0.46878806 -0.60721052 -0.64150858 0.0058575436
+-0.068359002 0.038502 0.011958 0.46882352 -0.60723507 -0.64145941 0.0058502122
+-0.061381001 0.047479998 0.0076069999 0.5694347 -0.66778845 -0.47937754 0.016703486
+-0.068558998 0.043549001 0.0081575997 0.47598499 -0.67020458 -0.56944197 0.0028901543
+-0.070928998 0.039829999 0.0085888002 0.47598499 -0.67020458 -0.56944197 0.0028901543
+-0.016625 0.18375 -0.019734999 -0.69538468 -0.23898028 -0.67773783 0.0056980988
+-0.015198 0.17471001 -0.018867999 -0.5923745 -0.24637972 -0.76706547 0.01349183
+-0.015944 0.16264001 -0.0091036996 -0.43222958 -0.51712793 -0.73875326 0.00369721
+-0.015977001 0.16069999 -0.0088072 -0.39357325 -0.48935169 -0.77822554 0.0063953851
+-0.013251 0.16708 -0.015264 -0.52248943 -0.46689072 -0.71345484 0.006487147
+-0.014292 0.16098 -0.011252 -0.45074272 -0.5343765 -0.71503335 0.0045233937
+-0.013986 0.184 -0.023739001 -0.69996727 -0.29157621 -0.65194255 0.0068006245
+-0.011633 0.17699 -0.023349 -0.64671504 -0.3023999 -0.70022422 0.011895906
+-0.0091028996 0.16988 -0.021457 -0.59230733 -0.37863559 -0.71120119 0.0092242574
+-0.025562 0.18273 -0.0096247001 0.67986989 0.016672963 0.73314315 0.0095390202
+-0.027249999 0.18254 -0.0094384002 0.67986888 0.016679294 0.73314404 0.0095392438
+-0.025736 0.17948 -0.0089653004 0.67986959 0.016676841 0.73314333 0.0095393462
+-0.031215999 0.17589 -0.0051154001 0.63255119 -0.029571004 0.7739538 0.017594624
+-0.020399 0.18449999 -0.014943 0.70246118 0.038495716 0.71068025 0.0076522529
+-0.021338999 0.17645 -0.014566 0.66117793 0.027738377 0.7497161 0.0099074291
+-0.027124999 0.17234001 -0.010156 0.60842365 0.096019991 0.78778219 0.021824442
+-0.039390001 0.1733 -0.0023574999 -0.29537752 -0.14131819 -0.94487101 0.06837032
+-0.022876 0.16406 -0.0078103002 -0.38129494 -0.48171195 -0.78902966 0.006038486
+-0.031597 0.16651 -0.0049291998 -0.62478781 -0.1951244 -0.75602031 0.019099949
+-0.022600001 0.15911999 -0.0037990001 -0.37730029 -0.50651139 -0.77530044 0.0046436773
+-0.030371999 0.15767001 -0.0012672 -0.46141326 -0.12661076 -0.87810451 0.027269369
+-0.021158 0.16848999 -0.012383 -0.43273494 -0.47357085 -0.76711875 0.0067598876
+-0.027000001 0.17120001 -0.01022 0.60842371 0.096020497 0.78778213 0.021823406
+-0.041719001 0.16813 -0.00074957998 -0.30108872 -0.091772109 -0.94916987 0.064206578
+-0.048250001 0.16748001 -0.00015191 -0.16683987 -0.35772914 -0.91880053 0.042867519
+-0.037250001 0.16147 -7.2627998e-05 -0.50185984 -0.16620237 -0.84883064 0.034010869
+-0.066428997 0.15783 -0.0085672997 0.2064072 -0.93305117 -0.29463804 0.015543612
+-0.071284004 0.15839 -0.005998 0.2064072 -0.93305117 -0.29463804 0.015543612
+-0.065978996 0.16288 -0.017792 0.19103831 -0.86545706 -0.46312907 0.031841654
+-0.071622998 0.16384 -0.015760001 0.19091524 -0.86551589 -0.46306965 0.031785745
+-0.066068001 0.16051 -0.013567 0.18612653 -0.91787857 -0.350508 0.025151037
+-0.073307 0.16049001 -0.011832 0.23880717 -0.85915995 -0.45256522 0.034051653
+-0.077 0.16204 -0.019241 0.22228232 -0.8792305 -0.42136011 0.03356503
+-0.077179 0.15851 -0.01495 0.18266542 -0.91310471 -0.36451766 0.025214965
+-0.073691003 0.17286 -0.037944 -0.025542269 -0.91648245 -0.39925876 0.019174108
+-0.077550001 0.17220999 -0.039175 -0.025476603 -0.91650516 -0.39921072 0.019112799
+-0.065921001 0.16586 -0.025022 0.15715888 -0.88196146 -0.44434786 0.02887252
+-0.072094999 0.16784 -0.024724999 0.1767879 -0.88837665 -0.42371312 0.027746443
+-0.066 0.16808 -0.030916 0.054610178 -0.91556865 -0.39843675 0.014395946
+-0.073448002 0.17050999 -0.032044999 0.098481193 -0.88386661 -0.45725399 0.010965914
+-0.077770002 0.16434 -0.025938001 0.21544355 -0.87500358 -0.43353528 0.026826311
+-0.077892996 0.16039 -0.021299001 0.2222922 -0.87915635 -0.42150956 0.033694346
+-0.078211002 0.169 -0.034566 0.18613139 -0.85368556 -0.4863908 0.012735247
+-0.034667 0.15131 -0.00071028998 -0.39771387 -0.18987525 -0.89764744 0.019185906
+-0.066117004 0.17353 -0.047453001 -0.15292998 -0.88486844 -0.44002312 0.012427152
+-0.071985997 0.17612 -0.045384001 -0.1860113 -0.85304689 -0.48755604 0.015654244
+-0.069250003 0.182 -0.055025999 -0.18646584 -0.85284752 -0.48773098 0.015722066
+-0.064992003 0.17802 -0.054644998 -0.18632874 -0.85291398 -0.48766747 0.015678046
+-0.069935001 0.17983 -0.051987998 -0.18646584 -0.85284752 -0.48773098 0.015722066
+-0.077930003 0.17516001 -0.044399999 -0.18629308 -0.85293317 -0.48764735 0.01566753
set(SUBSYS_NAME tests_common)
set(SUBSYS_DESC "Point cloud library common module unit tests")
PCL_SET_TEST_DEPENDENCIES(SUBSYS_DEPS common)
-set(OPT_DEPS io features search kdtree octree)
+set(OPT_DEPS io search kdtree octree)
set(DEFAULT ON)
set(build TRUE)
PCL_ADD_TEST(common_common test_common FILES test_common.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_geometry test_geometry FILES test_geometry.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_copy_point test_copy_point FILES test_copy_point.cpp LINK_WITH pcl_gtest pcl_common)
- PCL_ADD_TEST(common_centroid test_centroid FILES test_centroid.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_transforms test_transforms FILES test_transforms.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_int test_plane_intersection FILES test_plane_intersection.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_pca test_pca FILES test_pca.cpp LINK_WITH pcl_gtest pcl_common)
#PCL_ADD_TEST(common_spring test_spring FILES test_spring.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_bearing_angle_image test_bearing_angle_image FILES test_bearing_angle_image.cpp LINK_WITH pcl_gtest pcl_common)
PCL_ADD_TEST(common_point_type_conversion test_common_point_type_conversion FILES test_point_type_conversion.cpp LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(common_colors test_colors FILES test_colors.cpp LINK_WITH pcl_gtest pcl_common)
- if (BUILD_io AND BUILD_features)
- PCL_ADD_TEST(a_transforms_test test_transforms
- FILES test_transforms.cpp
+ if(BUILD_io)
+ PCL_ADD_TEST(common_centroid test_centroid
+ FILES test_centroid.cpp
LINK_WITH pcl_gtest pcl_io
ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
- endif (BUILD_io AND BUILD_features)
+ endif()
endif (build)
#include <pcl/common/eigen.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
+#include <pcl/io/pcd_io.h>
#include <pcl/pcl_tests.h>
#include <pcl/common/centroid.h>
using namespace pcl;
using pcl::test::EXPECT_EQ_VECTORS;
+pcl::PCLPointCloud2 cloud_blob;
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, compute3DCentroidFloat)
{
EXPECT_FLOAT_EQ (-500, centroid.curvature);
}
+TEST (PCL, demeanPointCloud)
+{
+ PointCloud<PointXYZ> cloud, cloud_demean;
+ fromPCLPointCloud2 (cloud_blob, cloud);
+
+ Eigen::Vector4f centroid;
+ compute3DCentroid (cloud, centroid);
+ EXPECT_NEAR (centroid[0], -0.0290809, 1e-4);
+ EXPECT_NEAR (centroid[1], 0.102653, 1e-4);
+ EXPECT_NEAR (centroid[2], 0.027302, 1e-4);
+ EXPECT_NEAR (centroid[3], 1, 1e-4);
+
+ // Check standard demean
+ demeanPointCloud (cloud, centroid, cloud_demean);
+ EXPECT_METADATA_EQ (cloud_demean, cloud);
+
+ EXPECT_XYZ_NEAR (cloud_demean[0], PointXYZ (0.034503, 0.010837, 0.013447), 1e-4);
+ EXPECT_XYZ_NEAR (cloud_demean[cloud_demean.size () - 1], PointXYZ (-0.048849, 0.072507, -0.071702), 1e-4);
+
+ std::vector<int> indices (cloud.size ());
+ for (int i = 0; i < static_cast<int> (indices.size ()); ++i) { indices[i] = i; }
+
+ // Check standard demean w/ indices
+ demeanPointCloud (cloud, indices, centroid, cloud_demean);
+ EXPECT_EQ (cloud_demean.is_dense, cloud.is_dense);
+ EXPECT_EQ (cloud_demean.width, indices.size ());
+ EXPECT_EQ (cloud_demean.height, 1);
+ EXPECT_EQ (cloud_demean.size (), cloud.size ());
+
+ EXPECT_XYZ_NEAR (cloud_demean[0], PointXYZ (0.034503, 0.010837, 0.013447), 1e-4);
+ EXPECT_XYZ_NEAR (cloud_demean[cloud_demean.size () - 1], PointXYZ (-0.048849, 0.072507, -0.071702), 1e-4);
+
+ // Check eigen demean
+ Eigen::MatrixXf mat_demean;
+ demeanPointCloud (cloud, centroid, mat_demean);
+ EXPECT_EQ (mat_demean.cols (), int (cloud.size ()));
+ EXPECT_EQ (mat_demean.rows (), 4);
+
+ EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
+ EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
+ EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
+
+ EXPECT_NEAR (mat_demean (0, cloud_demean.size () - 1), -0.048849, 1e-4);
+ EXPECT_NEAR (mat_demean (1, cloud_demean.size () - 1), 0.072507, 1e-4);
+ EXPECT_NEAR (mat_demean (2, cloud_demean.size () - 1), -0.071702, 1e-4);
+
+ // Check eigen demean + indices
+ demeanPointCloud (cloud, indices, centroid, mat_demean);
+ EXPECT_EQ (mat_demean.cols (), int (cloud.size ()));
+ EXPECT_EQ (mat_demean.rows (), 4);
+
+ EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
+ EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
+ EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
+
+ EXPECT_NEAR (mat_demean (0, cloud_demean.size () - 1), -0.048849, 1e-4);
+ EXPECT_NEAR (mat_demean (1, cloud_demean.size () - 1), 0.072507, 1e-4);
+ EXPECT_NEAR (mat_demean (2, cloud_demean.size () - 1), -0.071702, 1e-4);
+}
+
int
main (int argc, char** argv)
{
+ if (argc < 2)
+ {
+ std::cerr << "No test file given. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+ if (io::loadPCDFile (argv[1], cloud_blob) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
testing::InitGoogleTest (&argc, argv);
return (RUN_ALL_TESTS ());
}
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2018-, Open Perception, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <gtest/gtest.h>
+
+#include <pcl/pcl_tests.h>
+#include <pcl/common/colors.h>
+
+TEST (ColorLUT, Glasbey)
+{
+ ASSERT_EQ (pcl::GlasbeyLUT::size (), 256);
+ ASSERT_RGB_EQ (pcl::GlasbeyLUT::at (0), pcl::RGB (77, 175, 74));
+ ASSERT_RGB_EQ (pcl::GlasbeyLUT::at (255), pcl::RGB (117, 143, 207));
+}
+
+TEST (ColorLUT, Viridis)
+{
+ ASSERT_EQ (pcl::ViridisLUT::size (), 256);
+ ASSERT_RGB_EQ (pcl::ViridisLUT::at (0), pcl::RGB (68, 1, 84));
+ ASSERT_RGB_EQ (pcl::ViridisLUT::at (255), pcl::RGB (254, 231, 36));
+}
+
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+
PointXYZRGB p;
uint8_t r = 127, g = 64, b = 254;
- uint32_t rgb = (static_cast<uint32_t> (r) << 16 |
- static_cast<uint32_t> (g) << 8 |
- static_cast<uint32_t> (b));
- p.rgb = *reinterpret_cast<float*>(&rgb);
+ p.r = r;
+ p.g = g;
+ p.b = b;
- rgb = *reinterpret_cast<int*>(&p.rgb);
- uint8_t rr = (rgb >> 16) & 0x0000ff;
- uint8_t gg = (rgb >> 8) & 0x0000ff;
- uint8_t bb = (rgb) & 0x0000ff;
+ uint8_t rr = (p.rgba >> 16) & 0x0000ff;
+ uint8_t gg = (p.rgba >> 8) & 0x0000ff;
+ uint8_t bb = (p.rgba) & 0x0000ff;
EXPECT_EQ (r, rr);
EXPECT_EQ (g, gg);
EXPECT_EQ (bb, 254);
p.r = 0; p.g = 127; p.b = 0;
- rgb = *reinterpret_cast<int*>(&p.rgb);
+ uint32_t rgb = p.rgba;
rr = (rgb >> 16) & 0x0000ff;
gg = (rgb >> 8) & 0x0000ff;
bb = (rgb) & 0x0000ff;
uint32_t rgb = (static_cast<uint32_t> (r) << 16 |
static_cast<uint32_t> (g) << 8 |
static_cast<uint32_t> (b));
- p.rgb = *reinterpret_cast<float*>(&rgb);
+ p.rgba = rgb;
- rgb = *reinterpret_cast<int*>(&p.rgb);
- uint8_t rr = (rgb >> 16) & 0x0000ff;
- uint8_t gg = (rgb >> 8) & 0x0000ff;
- uint8_t bb = (rgb) & 0x0000ff;
+ uint8_t rr = (p.rgba >> 16) & 0x0000ff;
+ uint8_t gg = (p.rgba >> 8) & 0x0000ff;
+ uint8_t bb = (p.rgba) & 0x0000ff;
EXPECT_EQ (r, rr);
EXPECT_EQ (g, gg);
EXPECT_EQ (bb, 254);
p.r = 0; p.g = 127; p.b = 0;
- rgb = *reinterpret_cast<int*>(&p.rgb);
+ rgb = p.rgba;
rr = (rgb >> 16) & 0x0000ff;
gg = (rgb >> 8) & 0x0000ff;
bb = (rgb) & 0x0000ff;
EXPECT_EQ (z_val, 3.0);
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "rgb", is_rgb, rgb_val));
EXPECT_EQ (is_rgb, true);
- int rgb = *reinterpret_cast<int*>(&rgb_val);
+ uint32_t rgb;
+ std::memcpy (&rgb, &rgb_val, sizeof(rgb_val));
EXPECT_EQ (rgb, 0xff7f40fe); // alpha is 255
pcl::for_each_type<FieldList> (CopyIfFieldExists<PointXYZRGBNormal, float> (p, "normal_x", is_normal_x, normal_x_val));
EXPECT_EQ (is_normal_x, true);
// test if U * V * U^T = M
r_result = r_vectors * r_eigenvalues.asDiagonal () * r_vectors.transpose ();
r_error = r_result - r_matrix;
- diff = r_error.cwiseAbs (). sum ();
+ diff = r_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
// test if the eigenvalues are orthonormal
g_result = r_vectors * r_vectors.transpose ();
g_error = g_result - RMatrix::Identity ();
- diff = g_error.cwiseAbs (). sum ();
+ diff = g_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
// test if column major matrices are also calculated correctly
eigen22 (c_matrix, c_vectors, c_eigenvalues);
c_result = c_vectors * c_eigenvalues.asDiagonal () * c_vectors.transpose ();
c_error = c_result - c_matrix;
- diff = c_error.cwiseAbs (). sum ();
+ diff = c_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
g_result = c_vectors * c_vectors.transpose ();
g_error = g_result - CMatrix::Identity ();
- diff = g_error.cwiseAbs (). sum ();
+ diff = g_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
}
}
// test if U * V * U^T = M
r_result = r_vectors * r_eigenvalues.asDiagonal () * r_vectors.transpose ();
r_error = r_result - r_matrix;
- diff = r_error.cwiseAbs (). sum ();
+ diff = r_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
// test if the eigenvalues are orthonormal
g_result = r_vectors * r_vectors.transpose ();
g_error = g_result - RMatrix::Identity ();
- diff = g_error.cwiseAbs (). sum ();
+ diff = g_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
// test if column major matrices are also calculated correctly
eigen22 (c_matrix, c_vectors, c_eigenvalues);
c_result = c_vectors * c_eigenvalues.asDiagonal () * c_vectors.transpose ();
c_error = c_result - c_matrix;
- diff = c_error.cwiseAbs (). sum ();
+ diff = c_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
g_result = c_vectors * c_vectors.transpose ();
g_error = g_result - CMatrix::Identity ();
- diff = g_error.cwiseAbs (). sum ();
+ diff = g_error.cwiseAbs ().maxCoeff ();
EXPECT_LE (diff, epsilon);
}
}
val2 = val1;
val3 = val1;
}
- // 1%: 2 values are equal but none is set explicitely to 0
+ // 1%: 2 values are equal but none is set explicitly to 0
else if (test_case == 1)
{
val2 = val3;
transformationd.linear() = (Eigen::Matrix3d) Eigen::AngleAxisd(M_PI/7, Eigen::Vector3d::UnitY())
* Eigen::AngleAxisd(M_PI/4, Eigen::Vector3d::UnitZ());
test << 5.35315, 2.89914, 0.196848, -49.2788;
+ test /= test.head<3> ().norm ();
+
tolerance = 1e-4;
plane->values[0] = 5.4;
EXPECT_LE ((transformationd.matrix())(i,j) - test(i,j), tolerance);
}
+TEST (PCL, getTransFromUnitVectors)
+{
+ Eigen::Vector3f xaxis (1, 0, 0), yaxis (0, 1, 0), zaxis (0, 0, 1);
+ Eigen::Affine3f trans;
+
+ trans = pcl::getTransFromUnitVectorsZY (zaxis, yaxis);
+ Eigen::Vector3f xaxistrans = trans * xaxis, yaxistrans = trans * yaxis, zaxistrans = trans * zaxis;
+ EXPECT_NEAR ((xaxistrans - xaxis).norm (), 0.0f, 1e-6);
+ EXPECT_NEAR ((yaxistrans - yaxis).norm (), 0.0f, 1e-6);
+ EXPECT_NEAR ((zaxistrans - zaxis).norm (), 0.0f, 1e-6);
+
+ trans = pcl::getTransFromUnitVectorsXY (xaxis, yaxis);
+ xaxistrans = trans * xaxis, yaxistrans = trans * yaxis, zaxistrans = trans * zaxis;
+ EXPECT_NEAR ((xaxistrans-xaxis).norm (), 0.0f, 1e-6);
+ EXPECT_NEAR ((yaxistrans-yaxis).norm (), 0.0f, 1e-6);
+ EXPECT_NEAR ((zaxistrans-zaxis).norm (), 0.0f, 1e-6);
+}
+
+TEST (PCL, getTransformation)
+{
+ const float rot_x = 2.8827f;
+ const float rot_y = -0.31190f;
+ const float rot_z = -0.93058f;
+
+ Eigen::Affine3f affine;
+ pcl::getTransformation (0, 0, 0, rot_x, rot_y, rot_z, affine);
+
+ EXPECT_NEAR (affine (0, 0), 0.56854731f, 1e-4); EXPECT_NEAR (affine (0, 1), -0.82217032f, 1e-4); EXPECT_NEAR (affine (0, 2), -0.028107658f, 1e-4);
+ EXPECT_NEAR (affine (1, 0), -0.76327348f, 1e-4); EXPECT_NEAR (affine (1, 1), -0.51445758f, 1e-4); EXPECT_NEAR (affine (1, 2), -0.39082864f, 1e-4);
+ EXPECT_NEAR (affine (2, 0), 0.30686751f, 1e-4); EXPECT_NEAR (affine (2, 1), 0.24365838f, 1e-4); EXPECT_NEAR (affine (2, 2), -0.920034f, 1e-4);
+}
+
+TEST (PCL, getTranslationAndEulerAngles)
+{
+ const float trans_x = 0.1f;
+ const float trans_y = 0.2f;
+ const float trans_z = 0.3f;
+ const float rot_x = 2.8827f;
+ const float rot_y = -0.31190f;
+ const float rot_z = -0.93058f;
+
+ Eigen::Affine3f affine;
+ pcl::getTransformation (trans_x, trans_y, trans_z, rot_x, rot_y, rot_z, affine);
+
+ float tx, ty, tz, rx, ry, rz;
+ pcl::getTranslationAndEulerAngles (affine, tx, ty, tz, rx, ry, rz);
+ EXPECT_NEAR (tx, trans_x, 1e-4);
+ EXPECT_NEAR (ty, trans_y, 1e-4);
+ EXPECT_NEAR (tz, trans_z, 1e-4);
+ EXPECT_NEAR (rx, rot_x, 1e-4);
+ EXPECT_NEAR (ry, rot_y, 1e-4);
+ EXPECT_NEAR (rz, rot_z, 1e-4);
+}
+
/* ---[ */
int
main (int argc, char** argv)
CloudXYZRGBNormal cloud_xyz_rgb_normal;
pcl::copyPointCloud (cloud_xyz_rgba, cloud_xyz_rgb_normal);
- EXPECT_EQ (int (cloud_xyz_rgb_normal.size ()), 5);
+ ASSERT_METADATA_EQ (cloud_xyz_rgba, cloud_xyz_rgb_normal);
+ ASSERT_EQ (int (cloud_xyz_rgb_normal.size ()), 5);
for (int i = 0; i < 5; ++i)
{
EXPECT_XYZ_EQ (cloud_xyz_rgba[i], cloud_xyz_rgb_normal[i]);
vector<int> indices;
indices.push_back (0); indices.push_back (1);
pcl::copyPointCloud (cloud_xyz_rgba, indices, cloud_xyz_rgb_normal);
- EXPECT_EQ (int (cloud_xyz_rgb_normal.size ()), 2);
+ ASSERT_EQ (int (cloud_xyz_rgb_normal.size ()), 2);
for (int i = 0; i < 2; ++i)
{
EXPECT_XYZ_EQ (cloud_xyz_rgba[i], cloud_xyz_rgb_normal[i]);
vector<int, Eigen::aligned_allocator<int> > indices_aligned;
indices_aligned.push_back (1); indices_aligned.push_back (2); indices_aligned.push_back (3);
pcl::copyPointCloud (cloud_xyz_rgba, indices_aligned, cloud_xyz_rgb_normal);
- EXPECT_EQ (int (cloud_xyz_rgb_normal.size ()), 3);
+ ASSERT_EQ (int (cloud_xyz_rgb_normal.size ()), 3);
for (int i = 0; i < 3; ++i)
{
EXPECT_XYZ_EQ (cloud_xyz_rgba[i], cloud_xyz_rgb_normal[i]);
PointIndices pindices;
pindices.indices.push_back (0); pindices.indices.push_back (2); pindices.indices.push_back (4);
- EXPECT_EQ (int (cloud_xyz_rgb_normal.size ()), 3);
+ ASSERT_EQ (int (cloud_xyz_rgb_normal.size ()), 3);
for (int i = 0; i < 3; ++i)
{
EXPECT_XYZ_EQ (cloud_xyz_rgba[i], cloud_xyz_rgb_normal[i]);
/*
* Software License Agreement (BSD License)
*
+ * Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010, Willow Garage, Inc.
+ * Copyright (c) 2018-, Open Perception, Inc.
+ *
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*
*
*/
-#include <gtest/gtest.h>
-#include <iostream> // For debug
+#include <gtest/gtest.h>
-#include <pcl/PCLPointCloud2.h>
-#include <pcl/io/pcd_io.h>
-#include <pcl/features/feature.h>
-#include <pcl/common/eigen.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/common/transforms.h>
+#include <pcl/common/io.h>
#include <pcl/pcl_tests.h>
using namespace pcl;
-using namespace pcl::io;
-using namespace std;
-const float PI = 3.14159265f;
-const float rho = std::sqrt (2.0f) / 2.0f; // cos(PI/4) == sin(PI/4)
+typedef ::testing::Types<Eigen::Transform<float, 3, Eigen::Affine>,
+ Eigen::Transform<double, 3, Eigen::Affine>,
+ Eigen::Matrix<float, 4, 4>,
+ Eigen::Matrix<double, 4,4> > TransformTypes;
-PointCloud<PointXYZ> cloud;
-pcl::PCLPointCloud2 cloud_blob;
-
-void
-init ()
+template <typename Transform>
+class Transforms : public ::testing::Test
{
- PointXYZ p0 (0, 0, 0);
- PointXYZ p1 (1, 0, 0);
- PointXYZ p2 (0, 1, 0);
- PointXYZ p3 (0, 0, 1);
- cloud.points.push_back (p0);
- cloud.points.push_back (p1);
- cloud.points.push_back (p2);
- cloud.points.push_back (p3);
-}
+ public:
+ typedef typename Transform::Scalar Scalar;
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, DeMean)
+ Transforms ()
+ : ABS_ERROR (std::numeric_limits<Scalar>::epsilon () * 10)
+ , CLOUD_SIZE (100)
+ {
+ Eigen::Matrix<Scalar, 6, 1> r = Eigen::Matrix<Scalar, 6, 1>::Random ();
+ Eigen::Transform<Scalar, 3, Eigen::Affine> transform;
+ pcl::getTransformation (r[0], r[1], r[2], r[3], r[4], r[5], transform);
+ tf = transform.matrix ();
+
+ p_xyz_normal.resize (CLOUD_SIZE);
+ p_xyz_normal_trans.resize (CLOUD_SIZE);
+ for (size_t i = 0; i < CLOUD_SIZE; ++i)
+ {
+ Eigen::Vector3f xyz = Eigen::Vector3f::Random ();
+ Eigen::Vector3f normal = Eigen::Vector3f::Random ().normalized ();
+ p_xyz_normal[i].getVector3fMap () = xyz;
+ p_xyz_normal_trans[i].getVector3fMap () = (transform * xyz.template cast<typename Transform::Scalar> ()).template cast<float> ();
+ p_xyz_normal[i].getNormalVector3fMap () = normal;
+ p_xyz_normal_trans[i].getNormalVector3fMap () = (transform.rotation () * normal.template cast<typename Transform::Scalar> ()).template cast<float> ();
+ p_xyz_normal[i].rgb = Eigen::Matrix<float, 1, 1>::Random ()[0];
+ p_xyz_normal_trans[i].rgb = p_xyz_normal[i].rgb;
+ }
+
+ pcl::copyPointCloud(p_xyz_normal, p_xyz);
+ pcl::copyPointCloud(p_xyz_normal_trans, p_xyz_trans);
+
+ indices.resize (CLOUD_SIZE / 2);
+ for (size_t i = 0; i < indices.size(); ++i)
+ indices[i] = i * 2;
+ }
+
+ const Scalar ABS_ERROR;
+ const size_t CLOUD_SIZE;
+
+ Transform tf;
+
+ // Random point clouds and their expected transformed versions
+ pcl::PointCloud<pcl::PointXYZ> p_xyz, p_xyz_trans;
+ pcl::PointCloud<pcl::PointXYZRGBNormal> p_xyz_normal, p_xyz_normal_trans;
+
+ // Indices, every second point
+ std::vector<int> indices;
+
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
+};
+
+TYPED_TEST_CASE (Transforms, TransformTypes);
+
+TYPED_TEST (Transforms, PointCloudXYZDense)
{
- PointCloud<PointXYZ> cloud, cloud_demean;
- fromPCLPointCloud2 (cloud_blob, cloud);
-
- Eigen::Vector4f centroid;
- compute3DCentroid (cloud, centroid);
- EXPECT_NEAR (centroid[0], -0.0290809, 1e-4);
- EXPECT_NEAR (centroid[1], 0.102653, 1e-4);
- EXPECT_NEAR (centroid[2], 0.027302, 1e-4);
- EXPECT_NEAR (centroid[3], 1, 1e-4);
-
- // Check standard demean
- demeanPointCloud (cloud, centroid, cloud_demean);
- EXPECT_EQ (cloud_demean.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud_demean.width, cloud.width);
- EXPECT_EQ (cloud_demean.height, cloud.height);
- EXPECT_EQ (cloud_demean.points.size (), cloud.points.size ());
-
- EXPECT_NEAR (cloud_demean.points[0].x, 0.034503, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].y, 0.010837, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].z, 0.013447, 1e-4);
-
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].x, -0.048849, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].y, 0.072507, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].z, -0.071702, 1e-4);
-
- vector<int> indices (cloud.points.size ());
- for (int i = 0; i < static_cast<int> (indices.size ()); ++i) { indices[i] = i; }
-
- // Check standard demean w/ indices
- demeanPointCloud (cloud, indices, centroid, cloud_demean);
- EXPECT_EQ (cloud_demean.is_dense, cloud.is_dense);
- EXPECT_EQ (cloud_demean.width, cloud.width);
- EXPECT_EQ (cloud_demean.height, cloud.height);
- EXPECT_EQ (cloud_demean.points.size (), cloud.points.size ());
-
- EXPECT_NEAR (cloud_demean.points[0].x, 0.034503, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].y, 0.010837, 1e-4);
- EXPECT_NEAR (cloud_demean.points[0].z, 0.013447, 1e-4);
-
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].x, -0.048849, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].y, 0.072507, 1e-4);
- EXPECT_NEAR (cloud_demean.points[cloud_demean.points.size () - 1].z, -0.071702, 1e-4);
-
- // Check eigen demean
- Eigen::MatrixXf mat_demean;
- demeanPointCloud (cloud, centroid, mat_demean);
- EXPECT_EQ (mat_demean.cols (), int (cloud.points.size ()));
- EXPECT_EQ (mat_demean.rows (), 4);
-
- EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
- EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
- EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
-
- EXPECT_NEAR (mat_demean (0, cloud_demean.points.size () - 1), -0.048849, 1e-4);
- EXPECT_NEAR (mat_demean (1, cloud_demean.points.size () - 1), 0.072507, 1e-4);
- EXPECT_NEAR (mat_demean (2, cloud_demean.points.size () - 1), -0.071702, 1e-4);
-
- // Check eigen demean + indices
- demeanPointCloud (cloud, indices, centroid, mat_demean);
- EXPECT_EQ (mat_demean.cols (), int (cloud.points.size ()));
- EXPECT_EQ (mat_demean.rows (), 4);
-
- EXPECT_NEAR (mat_demean (0, 0), 0.034503, 1e-4);
- EXPECT_NEAR (mat_demean (1, 0), 0.010837, 1e-4);
- EXPECT_NEAR (mat_demean (2, 0), 0.013447, 1e-4);
-
- EXPECT_NEAR (mat_demean (0, cloud_demean.points.size () - 1), -0.048849, 1e-4);
- EXPECT_NEAR (mat_demean (1, cloud_demean.points.size () - 1), 0.072507, 1e-4);
- EXPECT_NEAR (mat_demean (2, cloud_demean.points.size () - 1), -0.071702, 1e-4);
+ pcl::PointCloud<pcl::PointXYZ> p;
+ pcl::transformPointCloud (this->p_xyz, p, this->tf);
+ ASSERT_METADATA_EQ (p, this->p_xyz);
+ ASSERT_EQ (p.size (), this->p_xyz.size ());
+ for (size_t i = 0; i < p.size (); ++i)
+ ASSERT_XYZ_NEAR (p[i], this->p_xyz_trans[i], this->ABS_ERROR);
}
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, Transform)
+TYPED_TEST (Transforms, PointCloudXYZDenseIndexed)
{
- Eigen::Vector3f offset (100, 0, 0);
- float angle = PI/4;
- Eigen::Quaternionf rotation (cos (angle / 2), 0, 0, sin (angle / 2));
-
- PointCloud<PointXYZ> cloud_out;
- const vector<PointXYZ, Eigen::aligned_allocator<PointXYZ> > &points (cloud_out.points);
- transformPointCloud (cloud, cloud_out, offset, rotation);
-
- EXPECT_EQ (cloud.points.size (), cloud_out.points.size ());
- EXPECT_EQ (100, points[0].x);
- EXPECT_EQ (0, points[0].y);
- EXPECT_EQ (0, points[0].z);
- EXPECT_NEAR (100+rho, points[1].x, 1e-4);
- EXPECT_NEAR (rho, points[1].y, 1e-4);
- EXPECT_EQ (0, points[1].z);
- EXPECT_NEAR (100-rho, points[2].x, 1e-4);
- EXPECT_NEAR (rho, points[2].y, 1e-4);
- EXPECT_EQ (0, points[2].z);
- EXPECT_EQ (100, points[3].x);
- EXPECT_EQ (0, points[3].y);
- EXPECT_EQ (1, points[3].z);
-
- PointCloud<PointXYZ> cloud_out2;
- const vector<PointXYZ, Eigen::aligned_allocator<PointXYZ> > &points2 (cloud_out2.points);
- Eigen::Translation3f translation (offset);
- Eigen::Affine3f transform;
- transform = translation * rotation;
- transformPointCloud (cloud, cloud_out2, transform);
-
- EXPECT_EQ (cloud.points.size (), cloud_out2.points.size ());
- EXPECT_EQ (100, points2[0].x);
- EXPECT_EQ (0, points2[0].y);
- EXPECT_EQ (0, points2[0].z);
- EXPECT_NEAR (100+rho, points2[1].x, 1e-4);
- EXPECT_NEAR (rho, points2[1].y, 1e-4);
- EXPECT_EQ (0, points2[1].z);
- EXPECT_NEAR (100-rho, points2[2].x, 1e-4);
- EXPECT_NEAR (rho, points2[2].y, 1e-4);
- EXPECT_EQ (0, points2[2].z);
- EXPECT_EQ (100, points2[3].x);
- EXPECT_EQ (0, points2[3].y);
- EXPECT_EQ (1, points2[3].z);
+ pcl::PointCloud<pcl::PointXYZ> p;
+ pcl::transformPointCloud (this->p_xyz, this->indices, p, this->tf);
+ ASSERT_EQ (p.size (), this->indices.size ());
+ ASSERT_EQ (p.width, this->indices.size ());
+ ASSERT_EQ (p.height, 1);
+ for (size_t i = 0; i < p.size (); ++i)
+ ASSERT_XYZ_NEAR (p[i], this->p_xyz_trans[i * 2], this->ABS_ERROR);
}
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, TransformCopyFields)
+TYPED_TEST (Transforms, PointCloudXYZSparse)
{
- Eigen::Affine3f transform;
- transform = Eigen::Translation3f (100, 0, 0);
-
- PointXYZRGBNormal empty_point;
- std::vector<int> indices (1);
-
- PointCloud<PointXYZRGBNormal> cloud (2, 1);
- cloud.points[0].rgba = 0xFF0000;
- cloud.points[1].rgba = 0x00FF00;
-
- // Preserve data in all fields
+ // Make first point infinite and point cloud not dense
+ this->p_xyz.is_dense = false;
+ this->p_xyz[0].x = std::numeric_limits<float>::quiet_NaN();
+
+ pcl::PointCloud<pcl::PointXYZ> p;
+ pcl::transformPointCloud (this->p_xyz, p, this->tf);
+ ASSERT_METADATA_EQ (p, this->p_xyz);
+ ASSERT_EQ (p.size (), this->p_xyz.size ());
+ ASSERT_FALSE (pcl::isFinite (p[0]));
+ for (size_t i = 1; i < p.size (); ++i)
{
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, cloud_out, transform, true);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
- EXPECT_RGBA_EQ (cloud.points[1], cloud_out.points[1]);
+ ASSERT_TRUE (pcl::isFinite (p[i]));
+ ASSERT_XYZ_NEAR (p[i], this->p_xyz_trans[i], this->ABS_ERROR);
}
- // Preserve data in all fields (with indices)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, indices, cloud_out, transform, true);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
- }
- // Do not preserve data in all fields
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, cloud_out, transform, false);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[1]);
- }
- // Do not preserve data in all fields (with indices)
- {
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloud (cloud, indices, cloud_out, transform, false);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
- }
- // Preserve data in all fields (with normals version)
+}
+
+TYPED_TEST (Transforms, PointCloudXYZRGBNormalDense)
+{
+ // Copy all fields
{
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, cloud_out, transform, true);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
- EXPECT_RGBA_EQ (cloud.points[1], cloud_out.points[1]);
+ pcl::PointCloud<pcl::PointXYZRGBNormal> p;
+ pcl::transformPointCloudWithNormals (this->p_xyz_normal, p, this->tf, true);
+ ASSERT_METADATA_EQ (p, this->p_xyz_normal);
+ ASSERT_EQ (p.size (), this->p_xyz_normal.size ());
+ for (size_t i = 0; i < p.size (); ++i)
+ {
+ ASSERT_XYZ_NEAR (p[i], this->p_xyz_normal_trans[i], this->ABS_ERROR);
+ ASSERT_NORMAL_NEAR (p[i], this->p_xyz_normal_trans[i], this->ABS_ERROR);
+ ASSERT_RGBA_EQ (p[i], this->p_xyz_normal_trans[i]);
+ }
}
- // Preserve data in all fields (with normals and indices version)
+ // Do not copy all fields
{
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, indices, cloud_out, transform, true);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (cloud.points[0], cloud_out.points[0]);
+ pcl::PointCloud<pcl::PointXYZRGBNormal> p;
+ pcl::transformPointCloudWithNormals (this->p_xyz_normal, p, this->tf, false);
+ ASSERT_METADATA_EQ (p, this->p_xyz_normal);
+ ASSERT_EQ (p.size (), this->p_xyz_normal.size ());
+ for (size_t i = 0; i < p.size (); ++i)
+ {
+ ASSERT_XYZ_NEAR (p[i], this->p_xyz_normal_trans[i], this->ABS_ERROR);
+ ASSERT_NORMAL_NEAR (p[i], this->p_xyz_normal_trans[i], this->ABS_ERROR);
+ ASSERT_NE (p[i].rgba, this->p_xyz_normal_trans[i].rgba);
+ }
}
- // Do not preserve data in all fields (with normals version)
+}
+
+TYPED_TEST (Transforms, PointCloudXYZRGBNormalDenseIndexed)
+{
+ // Copy all fields
{
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, cloud_out, transform, false);
- ASSERT_EQ (cloud.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[1]);
+ pcl::PointCloud<pcl::PointXYZRGBNormal> p;
+ pcl::transformPointCloudWithNormals (this->p_xyz_normal, this->indices, p, this->tf, true);
+ ASSERT_EQ (p.size (), this->indices.size ());
+ ASSERT_EQ (p.width, this->indices.size ());
+ ASSERT_EQ (p.height, 1);
+ for (size_t i = 0; i < p.size (); ++i)
+ {
+ ASSERT_XYZ_NEAR (p[i], this->p_xyz_normal_trans[i * 2], this->ABS_ERROR);
+ ASSERT_NORMAL_NEAR (p[i], this->p_xyz_normal_trans[i * 2], this->ABS_ERROR);
+ ASSERT_RGBA_EQ (p[i], this->p_xyz_normal_trans[i * 2]);
+ }
}
- // Do not preserve data in all fields (with normals and indices version)
+ // Do not copy all fields
{
- PointCloud<PointXYZRGBNormal> cloud_out;
- transformPointCloudWithNormals (cloud, indices, cloud_out, transform, false);
- ASSERT_EQ (indices.size (), cloud_out.size ());
- EXPECT_RGBA_EQ (empty_point, cloud_out.points[0]);
+ pcl::PointCloud<pcl::PointXYZRGBNormal> p;
+ pcl::transformPointCloudWithNormals (this->p_xyz_normal, this->indices, p, this->tf, false);
+ ASSERT_EQ (p.size (), this->indices.size ());
+ ASSERT_EQ (p.width, this->indices.size ());
+ ASSERT_EQ (p.height, 1);
+ for (size_t i = 0; i < p.size (); ++i)
+ {
+ ASSERT_XYZ_NEAR (p[i], this->p_xyz_normal_trans[i * 2], this->ABS_ERROR);
+ ASSERT_NORMAL_NEAR (p[i], this->p_xyz_normal_trans[i * 2], this->ABS_ERROR);
+ ASSERT_NE (p[i].rgba, this->p_xyz_normal_trans[i * 2].rgba);
+ }
}
}
float rot_y = -0.31190f;
float rot_z = -0.93058f;
Eigen::Affine3f affine;
- pcl::getTransformation (0, 0, 0, rot_x, rot_y, rot_z, affine);
-
- EXPECT_NEAR (affine (0, 0), 0.56854731f, 1e-4); EXPECT_NEAR (affine (0, 1), -0.82217032f, 1e-4); EXPECT_NEAR (affine (0, 2), -0.028107658f, 1e-4);
- EXPECT_NEAR (affine (1, 0), -0.76327348f, 1e-4); EXPECT_NEAR (affine (1, 1), -0.51445758f, 1e-4); EXPECT_NEAR (affine (1, 2), -0.39082864f, 1e-4);
- EXPECT_NEAR (affine (2, 0), 0.30686751f, 1e-4); EXPECT_NEAR (affine (2, 1), 0.24365838f, 1e-4); EXPECT_NEAR (affine (2, 2), -0.920034f, 1e-4);
-
- // Approximative!!! Uses SVD internally! See http://eigen.tuxfamily.org/dox/Transform_8h_source.html
- Eigen::Matrix3f rotation = affine.rotation ();
-
- EXPECT_NEAR (rotation (0, 0), 0.56854731f, 1e-4); EXPECT_NEAR (rotation (0, 1), -0.82217032f, 1e-4); EXPECT_NEAR (rotation (0, 2), -0.028107658f, 1e-4);
- EXPECT_NEAR (rotation (1, 0), -0.76327348f, 1e-4); EXPECT_NEAR (rotation (1, 1), -0.51445758f, 1e-4); EXPECT_NEAR (rotation (1, 2), -0.39082864f, 1e-4);
- EXPECT_NEAR (rotation (2, 0), 0.30686751f, 1e-4); EXPECT_NEAR (rotation (2, 1), 0.24365838f, 1e-4); EXPECT_NEAR (rotation (2, 2), -0.920034f, 1e-4);
-
- float trans_x, trans_y, trans_z;
pcl::getTransformation (0.1f, 0.2f, 0.3f, rot_x, rot_y, rot_z, affine);
- pcl::getTranslationAndEulerAngles (affine, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z);
- EXPECT_FLOAT_EQ (trans_x, 0.1f);
- EXPECT_FLOAT_EQ (trans_y, 0.2f);
- EXPECT_FLOAT_EQ (trans_z, 0.3f);
- EXPECT_FLOAT_EQ (rot_x, 2.8827f);
- EXPECT_FLOAT_EQ (rot_y, -0.31190f);
- EXPECT_FLOAT_EQ (rot_z, -0.93058f);
-
- Eigen::Matrix4f transformation (Eigen::Matrix4f::Identity ());
- transformation.block<3, 3> (0, 0) = affine.rotation ();
- transformation.block<3, 1> (0, 3) = affine.translation ();
+ Eigen::Matrix4f transformation = affine.matrix ();
PointXYZ p (1.f, 2.f, 3.f);
Eigen::Vector3f v3 = p.getVector3fMap ();
EXPECT_NEAR (pt.z, ct[0].z, 1e-4);
}
-//////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, commonTransform)
-{
- Eigen::Vector3f xaxis (1,0,0), yaxis (0,1,0), zaxis (0,0,1);
- Eigen::Affine3f trans = pcl::getTransFromUnitVectorsZY (zaxis, yaxis);
- Eigen::Vector3f xaxistrans=trans*xaxis, yaxistrans=trans*yaxis, zaxistrans=trans*zaxis;
- //std::cout << xaxistrans<<"\n"<<yaxistrans<<"\n"<<zaxistrans<<"\n";
- EXPECT_NEAR ((xaxistrans-xaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((yaxistrans-yaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((zaxistrans-zaxis).norm(), 0.0f, 1e-6);
-
- trans = pcl::getTransFromUnitVectorsXY (xaxis, yaxis);
- xaxistrans=trans*xaxis, yaxistrans=trans*yaxis, zaxistrans=trans*zaxis;
- //std::cout << xaxistrans<<"\n"<<yaxistrans<<"\n"<<zaxistrans<<"\n";
- EXPECT_NEAR ((xaxistrans-xaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((yaxistrans-yaxis).norm(), 0.0f, 1e-6);
- EXPECT_NEAR ((zaxistrans-zaxis).norm(), 0.0f, 1e-6);
-}
-
/* ---[ */
int
- main (int argc, char** argv)
+main (int argc, char** argv)
{
- if (argc < 2)
- {
- std::cerr << "No test file given. Please download `bun0.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
- if (loadPCDFile (argv[1], cloud_blob) < 0)
- {
- std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
- return (-1);
- }
-
testing::InitGoogleTest (&argc, argv);
- init();
return (RUN_ALL_TESTS ());
}
/* ]--- */
TEST (PointCloud, constructor_with_allocation_valued)
{
- PointXYZ nan_point (0.1f, 0.2f, 0.3f);
+ PointXYZ nan_point (0.1f, 0.2f, 0.3f);
PointCloud<PointXYZ> cloud2 (5, 80, nan_point);
EXPECT_EQ (cloud2.width, 5);
EXPECT_EQ (cloud2.height, 80);
EXPECT_EQ (cloud2.size (), 5*80);
- for (PointCloud<PointXYZ>::const_iterator pit = cloud2.begin ();
- pit != cloud2.end ();
- ++pit)
+ for (PointCloud<PointXYZ>::const_iterator pit = cloud2.begin (); pit != cloud2.end (); ++pit)
{
EXPECT_NEAR (pit->x, 0.1, 1e-3);
EXPECT_NEAR (pit->y, 0.2, 1e-3);
EXPECT_NEAR (pit->z, 0.3, 1e-3);
}
-
}
TEST (PointCloud, iterators)
{
- EXPECT_EQ_VECTORS (cloud.begin ()->getVector3fMap (),
+ EXPECT_EQ_VECTORS (cloud.begin ()->getVector3fMap (),
cloud.points.begin ()->getVector3fMap ());
- EXPECT_EQ_VECTORS (cloud.end ()->getVector3fMap (),
- cloud.points.end ()->getVector3fMap ());
+ EXPECT_EQ_VECTORS ((--cloud.end ())->getVector3fMap (),
+ (--cloud.points.end ())->getVector3fMap ());
PointCloud<PointXYZ>::const_iterator pit = cloud.begin ();
PointCloud<PointXYZ>::VectorType::const_iterator pit2 = cloud.points.begin ();
for (; pit < cloud.end (); ++pit2, ++pit)
FILES test_board_estimation.cpp
LINK_WITH pcl_gtest pcl_features pcl_io
ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
+ PCL_ADD_TEST(feature_flare_estimation test_flare_estimation
+ FILES test_flare_estimation.cpp
+ LINK_WITH pcl_gtest pcl_features pcl_io
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
PCL_ADD_TEST(feature_shot_lrf_estimation test_shot_lrf_estimation
FILES test_shot_lrf_estimation.cpp
LINK_WITH pcl_gtest pcl_features pcl_io
FILES test_rops_estimation.cpp
LINK_WITH pcl_gtest pcl_io pcl_features
ARGUMENTS "${PCL_SOURCE_DIR}/test/rops_cloud.pcd" "${PCL_SOURCE_DIR}/test/rops_indices.txt" "${PCL_SOURCE_DIR}/test/rops_triangles.txt")
+ PCL_ADD_TEST(feature_gasd_estimation test_gasd_estimation
+ FILES test_gasd_estimation.cpp
+ LINK_WITH pcl_gtest pcl_io pcl_features
+ ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd")
if (BUILD_keypoints)
PCL_ADD_TEST(feature_brisk test_brisk
FILES test_brisk.cpp
endif (BUILD_keypoints)
endif (BUILD_io)
endif (build)
-
#include <gtest/gtest.h>
#include <pcl/point_cloud.h>
-#include <pcl/features/normal_3d.h>
#include <pcl/features/cppf.h>
#include <pcl/io/pcd_io.h>
-using namespace pcl;
-using namespace pcl::io;
-using namespace std;
+typedef pcl::PointXYZRGBNormal PointT;
+static pcl::PointCloud<PointT>::Ptr cloud (new pcl::PointCloud<PointT> ());
-typedef search::KdTree<PointXYZRGBA>::Ptr KdTreePtr;
-
-PointCloud<PointXYZRGBA> cloud;
-vector<int> indices;
-KdTreePtr tree;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, CPPFEstimation)
{
- // Estimate normals
- NormalEstimation<PointXYZRGBA, Normal> normal_estimation;
- PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ());
- normal_estimation.setInputCloud (cloud.makeShared ());
- boost::shared_ptr<vector<int> > indicesptr (new vector<int> (indices));
- normal_estimation.setIndices (indicesptr);
- normal_estimation.setSearchMethod (tree);
- normal_estimation.setKSearch (10); // Use 10 nearest neighbors to estimate the normals
- normal_estimation.compute (*normals);
-
- CPPFEstimation <PointXYZRGBA, Normal, CPPFSignature> cppf_estimation;
- cppf_estimation.setInputCloud (cloud.makeShared ());
- cppf_estimation.setInputNormals (normals);
- PointCloud<CPPFSignature>::Ptr feature_cloud (new PointCloud<CPPFSignature> ());
+ pcl::CPPFEstimation <PointT, PointT, pcl::CPPFSignature> cppf_estimation;
+ cppf_estimation.setInputCloud (cloud);
+ cppf_estimation.setInputNormals (cloud);
+ pcl::PointCloud<pcl::CPPFSignature>::Ptr feature_cloud (new pcl::PointCloud<pcl::CPPFSignature> ());
cppf_estimation.compute (*feature_cloud);
// Check for size of output
- EXPECT_EQ (feature_cloud->points.size (), indices.size () * cloud.points.size ());
+ EXPECT_EQ (feature_cloud->size (), cloud->size () * cloud->size ());
// Now check for a few values in the feature cloud
EXPECT_TRUE (pcl_isnan (feature_cloud->points[0].f1));
EXPECT_TRUE (pcl_isnan (feature_cloud->points[0].f10));
EXPECT_TRUE (pcl_isnan (feature_cloud->points[0].alpha_m));
- EXPECT_NEAR (feature_cloud->points[2572].f1, 0.0669282, 1e-4);
+ EXPECT_NEAR (feature_cloud->points[2572].f1, 0.0568356, 1e-4);
EXPECT_NEAR (feature_cloud->points[2572].f2, -0.1988939, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].f3, 0.7856042, 1e-4);
+ EXPECT_NEAR (feature_cloud->points[2572].f3, 0.7854938, 1e-4);
EXPECT_NEAR (feature_cloud->points[2572].f4, 0.0533117, 1e-4);
EXPECT_NEAR (feature_cloud->points[2572].f5, 0.1875000, 1e-4);
EXPECT_NEAR (feature_cloud->points[2572].f6, 0.0733944, 1e-4);
EXPECT_NEAR (feature_cloud->points[2572].f8, 0.2380952, 1e-4);
EXPECT_NEAR (feature_cloud->points[2572].f9, 0.0619469, 1e-4);
EXPECT_NEAR (feature_cloud->points[2572].f10, 0.4431372, 1e-4);
- EXPECT_NEAR (feature_cloud->points[2572].alpha_m, -1.852997, 1e-4);
-
+ EXPECT_NEAR (feature_cloud->points[2572].alpha_m, -1.847514, 1e-4);
}
/* ---[ */
if (argc < 2)
{
std::cerr << "No test file given. Please download `colored_cloud.pcd` and pass its path to the test." << std::endl;
- return (-1);
+ return (1);
}
- if (loadPCDFile<PointXYZRGBA> (argv[1], cloud) < 0)
+ if (pcl::io::loadPCDFile<PointT> (argv[1], *cloud) < 0)
{
std::cerr << "Failed to read test file. Please download `colored_cloud.pcd` and pass its path to the test." << std::endl;
- return (-1);
+ return (1);
}
- indices.resize (cloud.points.size ());
- for (int i = 0; i < static_cast<int> (indices.size ()); ++i)
- indices[i] = i;
-
- tree.reset (new search::KdTree<PointXYZRGBA> (false));
- tree->setInputCloud (cloud.makeShared ());
-
testing::InitGoogleTest (&argc, argv);
return (RUN_ALL_TESTS ());
}
--- /dev/null
+/*
+* Software License Agreement (BSD License)
+*
+* Point Cloud Library (PCL) - www.pointclouds.org
+* Copyright (c) 2016-, Open Perception, Inc.
+*
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+* * Neither the name of the copyright holder(s) nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*
+* $Id$
+*
+*/
+
+#include <gtest/gtest.h>
+#include <pcl/point_cloud.h>
+#include <pcl/pcl_tests.h>
+#include <pcl/features/normal_3d.h>
+#include <pcl/io/pcd_io.h>
+#include <pcl/features/flare.h>
+
+typedef pcl::search::KdTree<pcl::PointXYZ>::Ptr KdTreePtr;
+typedef pcl::PointCloud<pcl::PointXYZ>::Ptr PointCloudPtr;
+
+PointCloudPtr cloud;
+KdTreePtr tree;
+
+//sampled surface for the computation of tangent X axis
+PointCloudPtr sampled_cloud;
+KdTreePtr sampled_tree;
+
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, FLARELocalReferenceFrameEstimation)
+{
+ pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal> ());
+ pcl::PointCloud<pcl::ReferenceFrame> bunny_LRF;
+
+ const float mesh_res = 0.005f;
+
+ // Compute normals
+ pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
+
+ ne.setRadiusSearch (2.0f*mesh_res);
+ ne.setViewPoint (1, 1, 10);
+ ne.setInputCloud (cloud);
+ ne.setSearchMethod (tree);
+
+ ne.compute (*normals);
+
+ // Compute FLARE LRF
+ pcl::FLARELocalReferenceFrameEstimation<pcl::PointXYZ, pcl::Normal, pcl::ReferenceFrame> lrf_estimator;
+
+ lrf_estimator.setRadiusSearch (5 * mesh_res);
+ lrf_estimator.setTangentRadius (20 * mesh_res);
+
+ lrf_estimator.setInputCloud (cloud);
+ lrf_estimator.setSearchSurface (cloud);
+ lrf_estimator.setInputNormals (normals);
+ lrf_estimator.setSearchMethod (tree);
+ lrf_estimator.setSearchMethodForSampledSurface (sampled_tree);
+ lrf_estimator.setSearchSampledSurface (sampled_cloud);
+
+ lrf_estimator.compute (bunny_LRF);
+
+ // TESTS
+ EXPECT_TRUE (bunny_LRF.is_dense);
+
+ // Expected Results
+ float score_15 = -0.0059431493f;
+ Eigen::Vector3f point_15_x (-0.46138301f, 0.75752199f, -0.46182927f);
+ Eigen::Vector3f point_15_y (-0.78785944f, -0.11049186f, 0.60586232f);
+ Eigen::Vector3f point_15_z (0.40792558f, 0.64339107f, 0.64779979f);
+ float score_45 = 0.018918669f;
+ Eigen::Vector3f point_45_x (0.63724411f, -0.74846953f, -0.18361199f);
+ Eigen::Vector3f point_45_y (0.76468521f, 0.58447874f, 0.27136898f);
+ Eigen::Vector3f point_45_z (-0.095794097f, -0.31333363f, 0.94479918f);
+ float score_163 = -0.050190225f;
+ Eigen::Vector3f point_163_x (-0.67064381f, 0.45722002f, 0.58411193f);
+ Eigen::Vector3f point_163_y (-0.58332449f, -0.81150508f, -0.034525186f);
+ Eigen::Vector3f point_163_z (0.45822418f, -0.36388087f, 0.81093854f);
+ float score_253 = -0.025943652f;
+ Eigen::Vector3f point_253_x (0.88240892f, -0.26585102f, 0.38817233f);
+ Eigen::Vector3f point_253_y (0.19853911f, 0.95840079f, 0.20506138f);
+ Eigen::Vector3f point_253_z (-0.42654046f, -0.10388060f, 0.89848322f);
+
+
+ //Test Results
+ for (int d = 0; d < 3; ++d)
+ {
+ EXPECT_NEAR (point_15_x[d], bunny_LRF.at (15).x_axis[d], 1E-3);
+ EXPECT_NEAR (point_15_y[d], bunny_LRF.at (15).y_axis[d], 1E-3);
+ EXPECT_NEAR (point_15_z[d], bunny_LRF.at (15).z_axis[d], 1E-3);
+
+ EXPECT_NEAR (point_45_x[d], bunny_LRF.at (45).x_axis[d], 1E-3);
+ EXPECT_NEAR (point_45_y[d], bunny_LRF.at (45).y_axis[d], 1E-3);
+ EXPECT_NEAR (point_45_z[d], bunny_LRF.at (45).z_axis[d], 1E-3);
+
+ EXPECT_NEAR (point_163_x[d], bunny_LRF.at (163).x_axis[d], 1E-3);
+ EXPECT_NEAR (point_163_y[d], bunny_LRF.at (163).y_axis[d], 1E-3);
+ EXPECT_NEAR (point_163_z[d], bunny_LRF.at (163).z_axis[d], 1E-3);
+
+ EXPECT_NEAR (point_253_x[d], bunny_LRF.at (253).x_axis[d], 1E-3);
+ EXPECT_NEAR (point_253_y[d], bunny_LRF.at (253).y_axis[d], 1E-3);
+ EXPECT_NEAR (point_253_z[d], bunny_LRF.at (253).z_axis[d], 1E-3);
+ }
+ EXPECT_NEAR (score_15, lrf_estimator.getSignedDistancesFromHighestPoints ()[15], 1E-4);
+ EXPECT_NEAR (score_45, lrf_estimator.getSignedDistancesFromHighestPoints ()[45], 1E-4);
+ EXPECT_NEAR (score_163, lrf_estimator.getSignedDistancesFromHighestPoints ()[163], 1E-4);
+ EXPECT_NEAR (score_253, lrf_estimator.getSignedDistancesFromHighestPoints ()[253], 1E-4);
+}
+
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ if (argc < 2)
+ {
+ std::cerr << "No test file given. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ cloud.reset (new pcl::PointCloud<pcl::PointXYZ> ());
+
+ if (pcl::io::loadPCDFile<pcl::PointXYZ> (argv[1], *cloud) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ tree.reset (new pcl::search::KdTree<pcl::PointXYZ> (false));
+ tree->setInputCloud (cloud);
+
+ //create and set sampled point cloud for computation of X axis
+ const float sampling_perc = 0.2f;
+ const float sampling_incr = 1.0f / sampling_perc;
+
+ sampled_cloud.reset (new pcl::PointCloud<pcl::PointXYZ> ());
+
+ std::vector<int> sampled_indices;
+ for (float sa = 0.0f; sa < (float)cloud->points.size (); sa += sampling_incr)
+ sampled_indices.push_back (static_cast<int> (sa));
+ copyPointCloud (*cloud, sampled_indices, *sampled_cloud);
+
+ sampled_tree.reset (new pcl::search::KdTree<pcl::PointXYZ> (false));
+ sampled_tree->setInputCloud (sampled_cloud);
+
+ //start tests
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2016-, Open Perception, Inc.
+ * Copyright (c) 2016, Voxar Labs, CIn-UFPE / DEINFO-UFRPE
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <gtest/gtest.h>
+#include <pcl/point_cloud.h>
+#include <pcl/features/gasd.h>
+#include <pcl/io/pcd_io.h>
+
+pcl::PointCloud<pcl::PointXYZ>::Ptr cloud;
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+void
+createColorCloud (pcl::PointCloud<pcl::PointXYZRGBA> &colorCloud)
+{
+ for (size_t i = 0; i < cloud->points.size (); ++i)
+ {
+ pcl::PointXYZRGBA p;
+ p.getVector3fMap () = cloud->points[i].getVector3fMap ();
+
+ p.rgba = ( (i % 255) << 16) + ( ( (255 - i) % 255) << 8) + ( (i * 37) % 255);
+ colorCloud.push_back (p);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST(PCL, GASDTransformEstimation)
+{
+ pcl::GASDEstimation<pcl::PointXYZ, pcl::GASDSignature512> gasd;
+ gasd.setInputCloud (cloud);
+
+ pcl::PointCloud<pcl::GASDSignature512> descriptor;
+ gasd.compute (descriptor);
+
+ Eigen::Matrix4f trans = gasd.getTransform ();
+
+ Eigen::Matrix4f ref_trans;
+ ref_trans << 0.661875, -0.704840, 0.255192, 0.0846344,
+ -0.748769, -0.605475, 0.269713, 0.0330151,
+ -0.035592, -0.369596, -0.928511, 0.0622551,
+ 0, 0, 0, 1;
+
+ for (int i = 0; i < trans.rows(); ++i)
+ {
+ for (int j = 0; j < trans.cols (); ++j)
+ {
+ EXPECT_NEAR (trans (i, j), ref_trans (i, j), 1e-5);
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, GASDShapeEstimationNoInterp)
+{
+ pcl::GASDEstimation<pcl::PointXYZ, pcl::GASDSignature512> gasd;
+ gasd.setInputCloud (cloud);
+ gasd.setShapeHistsInterpMethod (pcl::INTERP_NONE);
+
+ pcl::PointCloud<pcl::GASDSignature512> descriptor;
+ gasd.compute (descriptor);
+
+ const float ref_values[512] =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.0202, 0, 0, 0, 0, 0, 0, 1.76768, 1.26263, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 2.77778, 0, 0, 0, 0, 0, 0, 1.0101, 2.0202, 0, 0, 0, 0, 0, 0, 2.0202, 0.757576, 0, 0,
+ 0, 0, 0, 1.0101, 3.28283, 0, 0, 0, 0, 0, 0, 0, 1.51515, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.51515, 0, 0, 0, 0, 0, 0, 0, 1.51515, 0, 0, 0, 0, 0, 0,
+ 2.27273, 0.757576, 0, 0, 0, 0, 0, 2.0202, 2.0202, 0, 0, 0, 0, 0, 0, 1.0101, 2.27273, 2.27273, 0, 0, 0, 0, 0,
+ 0, 0, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1.0101, 0, 0, 0, 0, 0, 0, 0, 2.77778, 0.252525, 0, 0, 0, 0, 0, 0, 2.77778, 0, 0, 0, 0, 0, 0, 0, 2.0202,
+ 1.0101, 0.252525, 0, 0, 0, 0, 0, 0, 2.0202, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.757576, 0, 0, 0, 0, 0, 0, 0, 3.0303, 0, 0, 0, 0, 0, 0, 0, 2.77778, 0, 0, 0, 0, 0, 0, 0, 2.52525, 0, 0, 0, 0,
+ 0, 0, 0, 1.0101, 4.29293, 1.26263, 0, 0, 0, 0, 0, 0, 1.0101, 1.76768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0.757576, 1.26263, 0, 0, 0, 0, 0, 0, 2.77778, 0.252525, 0, 0, 0, 0, 0, 0, 2.52525, 0,
+ 0, 0, 0, 0, 0, 0, 2.77778, 0.252525, 0, 0, 0, 0, 0, 0, 0.50505, 3.53535, 0, 0, 0, 0, 0, 0, 0, 1.0101, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.51515, 3.53535, 0, 0, 0, 0,
+ 0, 0, 2.0202, 1.51515, 0, 0, 0, 0, 0, 0, 1.76768, 1.26263, 0, 0, 0, 0, 0, 0, 0.757576, 0.757576, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2.0202, 0.252525, 0, 0, 0, 0, 0, 0, 2.52525, 0, 0, 0, 0, 0, 0, 0, 0.50505, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ EXPECT_EQ (descriptor.points.size (), 1);
+ for (size_t i = 0; i < size_t (descriptor.points[0].descriptorSize ()); ++i)
+ {
+ EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST(PCL, GASDShapeEstimationTrilinearInterp)
+{
+ pcl::GASDEstimation<pcl::PointXYZ, pcl::GASDSignature512> gasd;
+ gasd.setInputCloud (cloud);
+
+ pcl::PointCloud<pcl::GASDSignature512> descriptor;
+ gasd.compute (descriptor);
+
+ const float ref_values[512] =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00836128, 0.00588401, 0, 0, 0, 0, 0, 0.107094, 0.662119, 0.175875, 0,
+ 0, 0, 0, 0.00971758, 0.839464, 0.58815, 0.0212094, 0, 0, 0, 0, 0.231226, 0.0734101, 0.0136519, 0, 0, 0, 0, 0,
+ 0.0148243, 0.00481199, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0175941,
+ 0.00421349, 0, 0, 0, 0, 0, 0.283015, 1.10961, 0.0363255, 0, 0, 0, 0, 0, 0.330514, 0.765326, 0.0400337, 0, 0,
+ 0, 0, 0.113547, 0.602405, 0.760736, 0.162908, 0, 0, 0, 0, 0.19996, 0.568878, 0.0862251, 0.00529312, 0, 0, 0,
+ 0, 0.029714, 0.0622333, 0.0943459, 0, 0, 0, 0, 0, 0, 0.000478281, 0.0027764, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0.0274657, 0.00816464, 0, 0, 0, 0, 0, 0.0368769, 0.609568, 0.0643177, 0, 0, 0, 0, 0, 0.231449,
+ 0.577855, 0.00143416, 0, 0, 0, 0, 0.150154, 1.03347, 0.499773, 0, 0, 0, 0, 0, 0.856571, 1.09567, 0.0753022, 0,
+ 0, 0, 0, 0, 0.589158, 1.39603, 1.21675, 0.000193459, 0, 0, 0, 0, 0, 0.0955702, 0.30635, 0.000289646, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.030262, 0.0172037, 0, 0, 0, 0, 0, 0.00457827,
+ 0.375081, 0.115732, 0, 0, 0, 0, 0, 0.0289331, 1.70761, 0.498999, 0, 0, 0, 0, 0, 0.0748183, 1.59429, 0.1418, 0,
+ 0, 0, 0, 0, 0.0890234, 1.12343, 1.32057, 0.192363, 2.03302e-005, 0, 0, 0, 0, 0.310746, 1.158, 0.215904,
+ 0.00137463, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.000985736, 0.525969, 0.173892, 0, 0, 0,
+ 0, 0, 0.03184, 1.10013, 0.177279, 0, 0, 0, 0, 0, 0.157355, 1.27302, 0.0196858, 0, 0, 0, 0, 0, 0.0968957,
+ 1.44605, 0.111663, 0, 0, 0, 0, 0, 0.00114074, 0.414239, 1.5723, 0.589681, 0.000551233, 0, 0, 0, 0, 0.0759757,
+ 0.837368, 0.721145, 0.0346183, 0, 0, 0, 0, 0, 0.0230347, 0.00829717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.000667462, 0.265466, 0.211503, 0.0281879, 0, 0, 0, 0, 0.130565, 1.38486, 0.168072, 0.020685, 0, 0, 0, 0,
+ 0.242921, 0.93304, 0, 0, 0, 0, 0, 0, 0.0796441, 1.00082, 0.289627, 0, 0, 0, 0, 0, 0.000704473, 0.596791,
+ 1.22236, 0.015159, 1.08117e-005, 0, 0, 0, 0, 0.0205081, 0.214601, 0.0495967, 0.000464472, 0, 0, 0, 0, 0,
+ 0.0129949, 0.0046808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00173052, 0.319008, 0.0513479, 0, 0, 0, 0,
+ 0.0156562, 0.232644, 1.43675, 0.106155, 0, 0, 0, 0, 0.0809344, 0.605434, 0.803232, 0.0557288, 0, 0, 0, 0,
+ 0.0272915, 0.607169, 0.366215, 0.0111251, 0, 0, 0, 0, 4.34266e-005, 0.141203, 0.0975747, 0, 0, 0, 0, 0, 0,
+ 0.000786102, 0.00152033, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.000134866,
+ 0.0595355, 0.00785339, 0, 0, 0, 0, 0, 0.0386555, 0.582852, 0.0295824, 0, 0, 0, 0, 0, 0.118361, 1.33305,
+ 0.209958, 0, 0, 0, 0, 0, 0.0603114, 0.412731, 0.0294292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ EXPECT_EQ (descriptor.points.size (), 1);
+ for (size_t i = 0; i < size_t (descriptor.points[0].descriptorSize ()); ++i)
+ {
+ EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, GASDShapeAndColorEstimationNoInterp)
+{
+ // Create fake point cloud with colors
+ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloudWithColors (new pcl::PointCloud<pcl::PointXYZRGBA>);
+ createColorCloud (*cloudWithColors);
+
+ pcl::GASDColorEstimation<pcl::PointXYZRGBA, pcl::GASDSignature984> gasd;
+ gasd.setInputCloud (cloudWithColors);
+
+ pcl::PointCloud<pcl::GASDSignature984> descriptor;
+ gasd.compute (descriptor);
+
+ const float ref_values[984] =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0101, 0, 0, 0, 0, 3.53535, 1.26263, 0, 0, 0,
+ 1.51515, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.757576, 0, 0, 0, 0, 0.757576, 6.06061, 0,
+ 0, 0, 0, 4.0404, 1.51515, 0, 0, 0, 0, 6.81818, 0, 0, 0, 0, 0, 1.76768, 1.76768, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0.50505, 0, 0, 0, 0, 0, 3.78788, 0.50505, 0, 0, 0, 0, 5.80808, 0, 0, 0, 0, 0, 2.0202,
+ 4.29293, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.78788, 0.252525, 0, 0, 0, 0, 4.79798, 0, 0, 0, 0,
+ 0, 4.0404, 1.26263, 0, 0, 0, 0, 0.50505, 6.56566, 2.77778, 0, 0, 0, 0, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2.27273, 3.0303, 0, 0, 0, 0, 4.79798, 0, 0, 0, 0, 0, 4.0404, 2.0202, 0, 0, 0, 0, 0.757576, 1.51515, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.27273, 0, 0, 0, 0, 0.50505, 5.80808, 0, 0, 0, 0, 0, 1.26263,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1.26263, 0.252525, 0.50505, 0.50505, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.757576, 1.0101, 0.757576, 0.50505, 0, 0, 0, 0, 0,
+ 0.252525, 1.51515, 1.51515, 0.252525, 0, 0.252525, 0.757576, 1.76768, 1.26263, 0, 0, 0.252525, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.50505, 1.26263, 0.757576, 1.51515, 0.757576,
+ 1.0101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.252525,
+ 0.252525, 0, 0.252525, 0.50505, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1.51515, 1.76768, 1.26263, 1.26263, 0.252525, 0, 0, 0, 0, 0, 0, 0.757576, 0.50505, 0,
+ 0, 0.757576, 0.252525, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0.252525, 0, 0.252525, 1.51515, 3.28283, 2.0202, 1.76768, 0.252525, 0, 0.50505, 1.26263, 1.0101, 0, 0, 0,
+ 0.757576, 0.757576, 1.51515, 0.50505, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.50505, 0.50505, 0.757576, 0.50505, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.252525, 0.252525, 0.757576,
+ 0, 0, 0, 0.252525, 0, 0, 0, 0, 0, 0, 0.252525, 0, 0, 0, 0, 0.252525, 0.50505, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0101, 1.26263, 1.0101, 0.252525, 2.27273, 1.51515, 0,
+ 0.50505, 1.26263, 0.757576, 1.0101, 0.252525, 0.252525, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.50505, 0.252525, 0, 0, 0.50505, 0.252525, 0, 0, 1.0101,
+ 0.757576, 1.76768, 1.76768, 0, 0, 0, 1.76768, 2.77778, 2.52525, 2.27273, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0101, 1.26263,
+ 1.0101, 0.50505, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.50505, 0.50505, 0.252525, 0.252525, 0, 0, 0.252525, 0.50505, 0, 0.757576, 0,
+ 0.50505, 2.52525, 0, 0, 0, 0, 0, 0, 0, 0, 1.26263, 2.52525, 3.53535, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.252525, 0.757576, 0, 0, 0.252525, 0.252525, 0, 0, 0.252525, 0.252525, 0.252525,
+ 0.252525, 0.50505, 0, 0, 0, 0, 0, 0, 0, 0, 1.26263, 0.757576, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ EXPECT_EQ (descriptor.points.size (), 1);
+ for (size_t i = 0; i < size_t (descriptor.points[0].descriptorSize ()); ++i)
+ {
+ EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST(PCL, GASDShapeAndColorEstimationQuadrilinearInterp)
+{
+ // Create fake point cloud with colors
+ pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloudWithColors (new pcl::PointCloud<pcl::PointXYZRGBA>);
+ createColorCloud (*cloudWithColors);
+
+ pcl::GASDColorEstimation<pcl::PointXYZRGBA, pcl::GASDSignature984> gasd;
+ gasd.setInputCloud (cloudWithColors);
+ gasd.setColorHistsInterpMethod (pcl::INTERP_QUADRILINEAR);
+
+ pcl::PointCloud<pcl::GASDSignature984> descriptor;
+ gasd.compute (descriptor);
+
+ const float ref_values[984] =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0101, 0, 0, 0, 0, 3.53535, 1.26263, 0, 0, 0,
+ 1.51515, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.757576, 0, 0, 0, 0, 0.757576, 6.06061, 0,
+ 0, 0, 0, 4.0404, 1.51515, 0, 0, 0, 0, 6.81818, 0, 0, 0, 0, 0, 1.76768, 1.76768, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0.50505, 0, 0, 0, 0, 0, 3.78788, 0.50505, 0, 0, 0, 0, 5.80808, 0, 0, 0, 0, 0, 2.0202,
+ 4.29293, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.78788, 0.252525, 0, 0, 0, 0, 4.79798, 0, 0, 0,
+ 0, 0, 4.0404, 1.26263, 0, 0, 0, 0, 0.50505, 6.56566, 2.77778, 0, 0, 0, 0, 0.252525, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2.27273, 3.0303, 0, 0, 0, 0, 4.79798, 0, 0, 0, 0, 0, 4.0404, 2.0202, 0, 0, 0, 0, 0.757576,
+ 1.51515, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.27273, 0, 0, 0, 0, 0.50505, 5.80808, 0, 0,
+ 0, 0, 0, 1.26263, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.0895643, 0.143288, 0.0720839, 0.125029, 0.171768, 0.140765, 0.00434639, 0.00200408, 0, 0, 0.00597724,
+ 0.0525044, 0.193269, 0.429334, 0.207777, 0.24761, 0.268739, 0.237959, 0.0413768, 0.00387127, 0,
+ 0.000931569, 0.00385942, 0.0404541, 0.00418768, 0, 0, 0, 0, 0.00142713, 0.0162888, 0, 0, 0.000403183,
+ 0, 0, 0, 0.00116143, 0.00354532, 0.00280258, 0.00148538, 0, 0, 0, 0, 0, 0, 0.0123895, 0.248346, 0.591086,
+ 0.636957, 0.597111, 0.656083, 0.378095, 0.0377745, 0.0129163, 0, 0, 0.130372, 0.659749, 0.927181, 0.498144,
+ 0.27824, 0.426372, 0.616247, 1.04577, 0.643063, 0.0249504, 0, 0.0606673, 0.0426988, 0.156632, 0.0643944, 0,
+ 0, 0, 0, 0.0177954, 0.0985798, 0, 0, 0.0262568, 0, 0, 0, 0.00271963, 0.0233101, 0.0331309, 0.0160384, 0,
+ 0, 0, 0, 0, 0, 0.00471289, 0.3579, 0.851654, 0.893212, 1.1593, 0.81276, 0.451746, 0.00573487,
+ 0.00665285, 0.0159345, 0.0197507, 0.0100332, 0.116356, 0.258904, 0.285916, 0.137536, 0.329754, 0.201282,
+ 0.276937, 0.0504019, 0.00140554, 0.00331221, 0.00406454, 0.00205266, 0.00332975, 0.00195687, 0, 0, 0, 0,
+ 3.03073e-005, 3.03073e-005, 0, 0, 0, 0, 0, 0, 0, 0.000609614, 0.000500804, 1.35678e-005, 0, 0, 0, 0, 0, 0,
+ 0, 0.0354586, 0.0530443, 0.0349205, 0.0647915, 0.0347517, 0.0334093, 0.000258137, 0.00104701, 0.00399398,
+ 0.00607182, 0, 0, 0.0145624, 0.0392641, 0.0252404, 0.0542372, 0.0210596, 0.0219414, 0.000380611,
+ 0.000221201, 0.000827126, 0.00124953, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0.000831429, 0, 0.0802922, 0.12617, 0.158971, 0.151476, 0.134574, 0.0953864, 0.00562285, 0.00363446,
+ 0.000513492, 0.000831429, 0.000155639, 0, 0.175686, 0.321095, 0.233626, 0.252716, 0.239946, 0.190063, 0.00987096,
+ 0.00659357, 0.000123905, 0.000155639, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0012772, 0.00426605, 0.00325023,
+ 0.00231641, 0, 0, 0, 0, 0, 0.0507473, 0.0557701, 0.296648, 0.941834, 1.30216, 1.22476, 1.00798, 0.328648,
+ 0.0418844, 0.0566177, 0.0853042, 0.107112, 0.0125218, 0.012783, 0.350977, 0.584441, 0.549991, 0.690193,
+ 0.674605, 0.354125, 0.0598277, 0.0514012, 0.0274164, 0.0305356, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0.0044642, 0.045621, 0.0482378, 0.0215847, 0, 0, 0, 0, 0, 0.149713, 0.0196222, 0.265814, 1.27681,
+ 2.13404, 2.0593, 1.40056, 0.405919, 0.00728233, 0.233705, 0.784621, 0.679261, 0.0656672, 0.00416686,
+ 0.167507, 0.862586, 1.01845, 1.07935, 0.827779, 0.200586, 0.00256227, 0.0732096, 0.320982, 0.275378, 0, 0,
+ 0.0120096, 0.0255571, 0.0166044, 0.0161996, 0.0177707, 0.00366082, 0, 0, 0, 0, 0, 0, 0, 0, 0.0010984,
+ 0.00114305, 1.79879e-005, 0, 0, 0, 0, 0, 0.0228719, 0, 0.0425383, 0.218586, 0.341904, 0.287174, 0.163829,
+ 0.0258371, 0.000214485, 0.0198714, 0.118435, 0.11307, 0.0161313, 0, 0.0829918, 0.553307, 0.643753,
+ 0.581184, 0.474243, 0.0471955, 0.000316249, 0.00722545, 0.0624794, 0.0518481, 0, 0, 0.0144017, 0.0326961,
+ 0.0204908, 0.0169458, 0.0223265, 0.00392622, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.132962,
+ 0.0193449, 0.0551697, 0.105996, 0.407031, 0.35539, 0.0566072, 0.0303696, 0.0243085, 0.144756, 0.15231,
+ 0.130866, 0.147091, 0.0111985, 0.0176582, 0.0786188, 0.24031, 0.179748, 0.0279387, 0.00883565, 0.0148111,
+ 0.134383, 0.172131, 0.180972, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.563728, 0.841891, 0.651939, 0.339866, 1.10015, 1.0062, 0.242513, 0.509944, 0.866722, 0.624862, 0.607641,
+ 0.594272, 0.328508, 0.189046, 0.152908, 0.161903, 0.442358, 0.383888, 0.109291, 0.132222, 0.157232,
+ 0.276749, 0.317518, 0.356862, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.436075, 0.432522, 0.0892119, 0.386335, 0.986563, 0.919872, 0.313085, 0.123551, 0.543859, 0.596885,
+ 0.920122, 1.01302, 0.249186, 0.112221, 0.100269, 0.778301, 1.36733, 1.47131, 0.97278, 0.0962166,
+ 0.12436, 0.241092, 0.422573, 0.432224, 0, 0, 0.0292873, 0.069272, 0.0241701, 0.0450751, 0.0621059,
+ 0.0105498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0145794, 0.000546371, 0.00160651, 0.232425,
+ 0.411777, 0.279927, 0.0993127, 0.00288046, 5.21399e-005, 0.00594067, 0.0722626, 0.0420616, 0.0115545,
+ 0.000641368, 0.101776, 0.773579, 1.03488, 0.850195, 0.654514, 0.0566626, 5.28226e-005, 0.00465405,
+ 0.0493028, 0.0260373, 0, 0, 0.035701, 0.0890744, 0.0369472, 0.0510598, 0.0810702, 0.0114321, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.155581, 0.0081056, 0.0392221, 0.0102823, 0.0121948, 0.0159764,
+ 0.00453647, 0.016245, 0.0122912, 0.113791, 0.0920094, 0.14147, 0.363948, 0.0102655, 0.0146145,
+ 0.00654133, 0.0112985, 0.0106292, 0.00172027, 0.00635388, 0.00840588, 0.165742, 0.242665, 0.403613, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 4.03759e-006, 3.95477e-006, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.718571,
+ 0.431673, 0.22117, 0.0441141, 0.0132298, 0.0152672, 0.0713271, 0.307082, 0.443776, 0.69396, 0.604915,
+ 0.694449, 1.17028, 0.209737, 0.0721899, 0.022148, 0.0104337, 0.0100016, 0.0245223, 0.12181, 0.152934,
+ 0.979424, 1.55304, 1.81121, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.000199128, 0.000195043, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0.486315, 0.325842, 0.0116061, 0.0347715, 0.180407, 0.21691, 0.0546041, 0.0337822,
+ 0.322526, 0.517204, 0.312276, 0.276607, 0.460413, 0.185895, 0.00199094, 0.0450161, 0.180782, 0.233176,
+ 0.0872505, 0.0137579, 0.177266, 0.554998, 0.503438, 0.351193, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00515609, 0.00142381, 0, 0.00913963, 0.0473098, 0.0779652, 0.0204231,
+ 0.000522999, 7.43773e-005, 0.000796905, 0.00721695, 0.00654336, 0.00568307, 0.00167137, 0, 0.0142623,
+ 0.0640188, 0.0967736, 0.0307571, 0.00109069, 7.53513e-005, 0.000807341, 0.007594, 0.00643415, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ EXPECT_EQ (descriptor.points.size (), 1);
+ for (size_t i = 0; i < size_t( descriptor.points[0].descriptorSize ()); ++i)
+ {
+ EXPECT_NEAR (descriptor.points[0].histogram[i], ref_values[i], 1e-5);
+ }
+}
+
+/* ---[ */
+int
+main (int argc,
+ char** argv)
+{
+ if (argc < 2)
+ {
+ std::cerr << "No test file given. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ cloud.reset (new pcl::PointCloud<pcl::PointXYZ>);
+
+ if (pcl::io::loadPCDFile < pcl::PointXYZ > (argv[1], *cloud) < 0)
+ {
+ std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
+ return (-1);
+ }
+
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
*
*/
+#include <pcl/pcl_config.h>
+#ifdef PCL_ONLY_CORE_POINT_TYPES
+ #define PCL_NO_PRECOMPILE
+#endif
+
#include <gtest/gtest.h>
#include <pcl/point_cloud.h>
-#include <pcl/features/normal_3d.h>
#include <pcl/features/pfh.h>
#include <pcl/features/fpfh.h>
#include <pcl/features/fpfh_omp.h>
#include <pcl/features/gfpfh.h>
#include <pcl/io/pcd_io.h>
-using namespace pcl;
-using namespace pcl::io;
-using namespace std;
-
-typedef search::KdTree<PointXYZ>::Ptr KdTreePtr;
+typedef pcl::PointNormal PointT;
+typedef pcl::search::KdTree<PointT>::Ptr KdTreePtr;
+using pcl::PointCloud;
-PointCloud<PointXYZ> cloud;
-vector<int> indices;
-KdTreePtr tree;
+static PointCloud<PointT>::Ptr cloud (new PointCloud<PointT> ());
+static std::vector<int> indices;
+static KdTreePtr tree;
///////////////////////////////////////////////////////////////////////////////////
-template <typename FeatureEstimation, typename PointT, typename NormalT, typename OutputT> void
+template<template<class, class, class> class FeatureEstimation, typename PointT, typename NormalT, typename OutputT> void
testIndicesAndSearchSurface (const typename PointCloud<PointT>::Ptr & points,
const typename PointCloud<NormalT>::Ptr & normals,
- const boost::shared_ptr<vector<int> > & indices, int ndims)
+ const boost::shared_ptr<std::vector<int> > & indices, int ndims)
+
{
+ typedef pcl::search::KdTree<PointT> KdTreeT;
+ typedef FeatureEstimation<PointT, NormalT, OutputT> FeatureEstimationT;
+
//
// Test setIndices and setSearchSurface
//
PointCloud<OutputT> full_output, output0, output1, output2;
// Compute for all points and then subsample the results
- FeatureEstimation est0;
- est0.setSearchMethod (typename search::KdTree<PointT>::Ptr (new search::KdTree<PointT>));
+ FeatureEstimationT est0;
+ est0.setSearchMethod (typename KdTreeT::Ptr (new KdTreeT));
est0.setKSearch (10);
est0.setInputCloud (points);
est0.setInputNormals (normals);
// Compute with all points as "search surface" and the specified sub-cloud as "input"
typename PointCloud<PointT>::Ptr subpoints (new PointCloud<PointT>);
copyPointCloud (*points, *indices, *subpoints);
- FeatureEstimation est1;
- est1.setSearchMethod (typename search::KdTree<PointT>::Ptr (new search::KdTree<PointT>));
+ FeatureEstimationT est1;
+ est1.setSearchMethod (typename KdTreeT::Ptr (new KdTreeT));
est1.setKSearch (10);
est1.setInputCloud (subpoints);
est1.setSearchSurface (points);
est1.compute (output1);
// Compute with all points as "input" and the specified indices
- FeatureEstimation est2;
- est2.setSearchMethod (typename search::KdTree<PointT>::Ptr (new search::KdTree<PointT>));
+ FeatureEstimationT est2;
+ est2.setSearchMethod (typename KdTreeT::Ptr (new KdTreeT));
est2.setKSearch (10);
est2.setInputCloud (points);
est2.setInputNormals (normals);
//
PointCloud<OutputT> output3, output4;
- boost::shared_ptr<vector<int> > indices2 (new vector<int> (0));
+ boost::shared_ptr<std::vector<int> > indices2 (new std::vector<int> (0));
for (size_t i = 0; i < (indices->size ()/2); ++i)
indices2->push_back (static_cast<int> (i));
// Compute with all points as search surface + the specified sub-cloud as "input" but for only a subset of indices
- FeatureEstimation est3;
- est3.setSearchMethod (typename search::KdTree<PointT>::Ptr (new search::KdTree<PointT>));
+ FeatureEstimationT est3;
+ est3.setSearchMethod (typename KdTreeT::Ptr (new KdTreeT));
est3.setKSearch (10);
est3.setSearchSurface (points);
est3.setInputNormals (normals);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, PFHEstimation)
{
- float f1, f2, f3, f4;
+ using pcl::PFHSignature125;
- // Estimate normals first
- NormalEstimation<PointXYZ, Normal> n;
- PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ());
- // set parameters
- n.setInputCloud (cloud.makeShared ());
- boost::shared_ptr<vector<int> > indicesptr (new vector<int> (indices));
- n.setIndices (indicesptr);
- n.setSearchMethod (tree);
- n.setKSearch (10); // Use 10 nearest neighbors to estimate the normals
- // estimate
- n.compute (*normals);
+ float f1, f2, f3, f4;
- PFHEstimation<PointXYZ, Normal, PFHSignature125> pfh;
- pfh.setInputNormals (normals);
- EXPECT_EQ (pfh.getInputNormals (), normals);
+ pcl::PFHEstimation<PointT, PointT, PFHSignature125> pfh;
+ pfh.setInputNormals (cloud);
+ EXPECT_EQ (pfh.getInputNormals (), cloud);
// computePairFeatures
- pfh.computePairFeatures (cloud, *normals, 0, 12, f1, f2, f3, f4);
+ pfh.computePairFeatures (*cloud, *cloud, 0, 12, f1, f2, f3, f4);
EXPECT_NEAR (f1, -0.072575, 1e-4);
EXPECT_NEAR (f2, -0.040221, 1e-4);
EXPECT_NEAR (f3, 0.068133, 1e-4);
// computePointPFHSignature
int nr_subdiv = 3;
Eigen::VectorXf pfh_histogram (nr_subdiv * nr_subdiv * nr_subdiv);
- pfh.computePointPFHSignature (cloud, *normals, indices, nr_subdiv, pfh_histogram);
+ pfh.computePointPFHSignature (*cloud, *cloud, indices, nr_subdiv, pfh_histogram);
EXPECT_NEAR (pfh_histogram[0], 0.932506, 1e-2);
EXPECT_NEAR (pfh_histogram[1], 2.32429 , 1e-2);
EXPECT_NEAR (pfh_histogram[2], 0.357477, 1e-2);
// Object
PointCloud<PFHSignature125>::Ptr pfhs (new PointCloud<PFHSignature125> ());
+ boost::shared_ptr<std::vector<int> > indicesptr (new std::vector<int> (indices));
// set parameters
- pfh.setInputCloud (cloud.makeShared ());
+ pfh.setInputCloud (cloud);
pfh.setIndices (indicesptr);
pfh.setSearchMethod (tree);
pfh.setKSearch (static_cast<int> (indices.size ()));
// Test results when setIndices and/or setSearchSurface are used
- boost::shared_ptr<vector<int> > test_indices (new vector<int> (0));
- for (size_t i = 0; i < cloud.size (); i+=3)
+ boost::shared_ptr<std::vector<int> > test_indices (new std::vector<int> (0));
+ for (size_t i = 0; i < cloud->size (); i+=3)
test_indices->push_back (static_cast<int> (i));
- testIndicesAndSearchSurface<PFHEstimation<PointXYZ, Normal, PFHSignature125>, PointXYZ, Normal, PFHSignature125>
- (cloud.makeShared (), normals, test_indices, 125);
+ testIndicesAndSearchSurface<pcl::PFHEstimation, PointT, PointT, PFHSignature125>
+ (cloud, cloud, test_indices, 125);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, FPFHEstimation)
+
+using pcl::FPFHEstimation;
+using pcl::FPFHEstimationOMP;
+using pcl::FPFHSignature33;
+
+// "Placeholder" for the type specialized test fixture
+template<typename T>
+struct FPFHTest;
+
+// Template specialization test for FPFHEstimation
+template<>
+struct FPFHTest<FPFHEstimation<PointT, PointT, FPFHSignature33> >
+ : public ::testing::Test
{
- // Estimate normals first
- NormalEstimation<PointXYZ, Normal> n;
- PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ());
- // set parameters
- n.setInputCloud (cloud.makeShared ());
- boost::shared_ptr<vector<int> > indicesptr (new vector<int> (indices));
- n.setIndices (indicesptr);
- n.setSearchMethod (tree);
- n.setKSearch (10); // Use 10 nearest neighbors to estimate the normals
- // estimate
- n.compute (*normals);
+ FPFHEstimation<PointT, PointT, FPFHSignature33> fpfh;
+};
- FPFHEstimation<PointXYZ, Normal, FPFHSignature33> fpfh;
- fpfh.setInputNormals (normals);
- EXPECT_EQ (fpfh.getInputNormals (), normals);
+// Template specialization test for FPFHEstimationOMP
+template<>
+struct FPFHTest<FPFHEstimationOMP<PointT, PointT, FPFHSignature33> >
+ : public ::testing::Test
+{
+ // Default Constructor is defined to instantiate 4 threads
+ FPFHTest<FPFHEstimationOMP<PointT, PointT, FPFHSignature33> > ()
+ : fpfh (4)
+ {}
+
+ FPFHEstimationOMP<PointT, PointT, FPFHSignature33> fpfh;
+};
+
+// Types which will be instantiated
+typedef ::testing::Types<FPFHEstimation<PointT, PointT, FPFHSignature33>,
+ FPFHEstimationOMP<PointT, PointT, FPFHSignature33> > FPFHEstimatorTypes;
+TYPED_TEST_CASE (FPFHTest, FPFHEstimatorTypes);
+
+// This is a copy of the old FPFHEstimation test which will now
+// be applied to both FPFHEstimation and FPFHEstimationOMP
+TYPED_TEST (FPFHTest, Estimation)
+{
+ // Create reference
+ TypeParam& fpfh = this->fpfh;
+ fpfh.setInputNormals (cloud);
+ EXPECT_EQ (fpfh.getInputNormals (), cloud);
// computePointSPFHSignature
int nr_subdiv = 11; // use the same number of bins for all three angular features
Eigen::MatrixXf hist_f1 (indices.size (), nr_subdiv), hist_f2 (indices.size (), nr_subdiv), hist_f3 (indices.size (), nr_subdiv);
hist_f1.setZero (); hist_f2.setZero (); hist_f3.setZero ();
for (int i = 0; i < static_cast<int> (indices.size ()); ++i)
- fpfh.computePointSPFHSignature (cloud, *normals, i, i, indices, hist_f1, hist_f2, hist_f3);
+ fpfh.computePointSPFHSignature (*cloud, *cloud, i, i, indices, hist_f1, hist_f2, hist_f3);
EXPECT_NEAR (hist_f1 (0, 0), 0.757576, 1e-4);
EXPECT_NEAR (hist_f1 (0, 1), 0.757576, 1e-4);
// weightPointSPFHSignature
Eigen::VectorXf fpfh_histogram (nr_subdiv + nr_subdiv + nr_subdiv);
fpfh_histogram.setZero ();
- vector<float> dists (indices.size ());
+ std::vector<float> dists (indices.size ());
for (size_t i = 0; i < dists.size (); ++i) dists[i] = static_cast<float> (i);
fpfh.weightPointSPFHSignature (hist_f1, hist_f2, hist_f3, indices, dists, fpfh_histogram);
EXPECT_NEAR (fpfh_histogram[15], 16.8062, 1e-2);
EXPECT_NEAR (fpfh_histogram[16], 16.2767, 1e-2);
EXPECT_NEAR (fpfh_histogram[17], 12.251 , 1e-2);
- //EXPECT_NEAR (fpfh_histogram[18], 10.354, 1e-1);
- //EXPECT_NEAR (fpfh_histogram[19], 6.65578, 1e-2);
+ EXPECT_NEAR (fpfh_histogram[18], 10.354, 1e-2);
+ EXPECT_NEAR (fpfh_histogram[19], 6.65578, 1e-2);
EXPECT_NEAR (fpfh_histogram[20], 6.1437 , 1e-2);
EXPECT_NEAR (fpfh_histogram[21], 5.83341, 1e-2);
EXPECT_NEAR (fpfh_histogram[22], 1.08809, 1e-2);
// Object
PointCloud<FPFHSignature33>::Ptr fpfhs (new PointCloud<FPFHSignature33> ());
+ boost::shared_ptr<std::vector<int> > indicesptr (new std::vector<int> (indices));
// set parameters
- fpfh.setInputCloud (cloud.makeShared ());
+ fpfh.setInputCloud (cloud);
fpfh.setNrSubdivisions (11, 11, 11);
fpfh.setIndices (indicesptr);
fpfh.setSearchMethod (tree);
EXPECT_NEAR (fpfhs->points[0].histogram[15], 17.963 , 1e-2);
EXPECT_NEAR (fpfhs->points[0].histogram[16], 18.2801, 1e-2);
EXPECT_NEAR (fpfhs->points[0].histogram[17], 14.2766, 1e-2);
- //EXPECT_NEAR (fpfhs->points[0].histogram[18], 10.8542, 1e-2);
- //EXPECT_NEAR (fpfhs->points[0].histogram[19], 6.07925, 1e-2);
+ EXPECT_NEAR (fpfhs->points[0].histogram[18], 10.8542, 1e-2);
+ EXPECT_NEAR (fpfhs->points[0].histogram[19], 6.07925, 1e-2);
EXPECT_NEAR (fpfhs->points[0].histogram[20], 5.28565, 1e-2);
EXPECT_NEAR (fpfhs->points[0].histogram[21], 4.73887, 1e-2);
EXPECT_NEAR (fpfhs->points[0].histogram[22], 0.56984, 1e-2);
// Test results when setIndices and/or setSearchSurface are used
- boost::shared_ptr<vector<int> > test_indices (new vector<int> (0));
- for (size_t i = 0; i < cloud.size (); i+=3)
+ boost::shared_ptr<std::vector<int> > test_indices (new std::vector<int> (0));
+ for (size_t i = 0; i < cloud->size (); i+=3)
test_indices->push_back (static_cast<int> (i));
- testIndicesAndSearchSurface<FPFHEstimation<PointXYZ, Normal, FPFHSignature33>, PointXYZ, Normal, FPFHSignature33>
- (cloud.makeShared (), normals, test_indices, 33);
-
+ testIndicesAndSearchSurface<FPFHEstimation, PointT, PointT, FPFHSignature33>
+ (cloud, cloud, test_indices, 33);
}
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, FPFHEstimationOpenMP)
-{
- // Estimate normals first
- NormalEstimation<PointXYZ, Normal> n;
- PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ());
- // set parameters
- n.setInputCloud (cloud.makeShared ());
- boost::shared_ptr<vector<int> > indicesptr (new vector<int> (indices));
- n.setIndices (indicesptr);
- n.setSearchMethod (tree);
- n.setKSearch (10); // Use 10 nearest neighbors to estimate the normals
- // estimate
- n.compute (*normals);
- FPFHEstimationOMP<PointXYZ, Normal, FPFHSignature33> fpfh (4); // instantiate 4 threads
- fpfh.setInputNormals (normals);
-
- // Object
- PointCloud<FPFHSignature33>::Ptr fpfhs (new PointCloud<FPFHSignature33> ());
-
- // set parameters
- fpfh.setInputCloud (cloud.makeShared ());
- fpfh.setNrSubdivisions (11, 11, 11);
- fpfh.setIndices (indicesptr);
- fpfh.setSearchMethod (tree);
- fpfh.setKSearch (static_cast<int> (indices.size ()));
-
- // estimate
- fpfh.compute (*fpfhs);
- EXPECT_EQ (fpfhs->points.size (), indices.size ());
-
- EXPECT_NEAR (fpfhs->points[0].histogram[0], 1.58591, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[1], 1.68365, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[2], 6.71 , 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[3], 23.073, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[4], 33.3828, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[5], 20.4002, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[6], 7.31067, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[7], 1.02635, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[8], 0.48591, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[9], 1.47069, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[10], 2.87061, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[11], 1.78321, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[12], 4.30795, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[13], 7.05514, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[14], 9.37615, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[15], 17.963 , 1e-3);
- //EXPECT_NEAR (fpfhs->points[0].histogram[16], 18.2801, 1e-3);
- //EXPECT_NEAR (fpfhs->points[0].histogram[17], 14.2766, 1e-3);
- //EXPECT_NEAR (fpfhs->points[0].histogram[18], 10.8542, 1e-3);
- //EXPECT_NEAR (fpfhs->points[0].histogram[19], 6.07925, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[20], 5.28991, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[21], 4.73438, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[22], 0.56984, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[23], 3.29826, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[24], 5.28156, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[25], 5.26939, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[26], 3.13191, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[27], 1.74453, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[28], 9.41971, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[29], 21.5894, 1e-2);
- EXPECT_NEAR (fpfhs->points[0].histogram[30], 24.6302, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[31], 17.7764, 1e-3);
- EXPECT_NEAR (fpfhs->points[0].histogram[32], 7.28878, 1e-3);
-
- // Test results when setIndices and/or setSearchSurface are used
-
- boost::shared_ptr<vector<int> > test_indices (new vector<int> (0));
- for (size_t i = 0; i < cloud.size (); i+=3)
- test_indices->push_back (static_cast<int> (i));
-
- testIndicesAndSearchSurface<FPFHEstimationOMP<PointXYZ, Normal, FPFHSignature33>, PointXYZ, Normal, FPFHSignature33>
- (cloud.makeShared (), normals, test_indices, 33);
-}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, VFHEstimation)
{
- // Estimate normals first
- NormalEstimation<PointXYZ, Normal> n;
- PointCloud<Normal>::Ptr normals (new PointCloud<Normal> ());
- // set parameters
- n.setInputCloud (cloud.makeShared ());
- boost::shared_ptr<vector<int> > indicesptr (new vector<int> (indices));
- n.setIndices (indicesptr);
- n.setSearchMethod (tree);
- n.setKSearch (10); // Use 10 nearest neighbors to estimate the normals
- // estimate
- n.compute (*normals);
- VFHEstimation<PointXYZ, Normal, VFHSignature308> vfh;
- vfh.setInputNormals (normals);
-
- // PointCloud<PointNormal> cloud_normals;
- // concatenateFields (cloud, normals, cloud_normals);
- // savePCDFile ("bun0_n.pcd", cloud_normals);
+ using pcl::VFHSignature308;
// Object
+ pcl::VFHEstimation<PointT, PointT, VFHSignature308> vfh;
PointCloud<VFHSignature308>::Ptr vfhs (new PointCloud<VFHSignature308> ());
+ boost::shared_ptr<std::vector<int> > indicesptr (new std::vector<int> (indices));
// set parameters
- vfh.setInputCloud (cloud.makeShared ());
+ vfh.setInputCloud (cloud);
+ vfh.setInputNormals (cloud);
vfh.setIndices (indicesptr);
vfh.setSearchMethod (tree);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (PCL, GFPFH)
{
+ using pcl::PointXYZL;
+ using pcl::GFPFHSignature16;
+
PointCloud<PointXYZL>::Ptr cloud (new PointCloud<PointXYZL>());
const unsigned num_classes = 3;
cloud->width = static_cast<uint32_t> (cloud->points.size ());
cloud->height = 1;
- GFPFHEstimation<PointXYZL, PointXYZL, GFPFHSignature16> gfpfh;
+ pcl::GFPFHEstimation<PointXYZL, PointXYZL, GFPFHSignature16> gfpfh;
gfpfh.setNumberOfClasses (num_classes);
gfpfh.setOctreeLeafSize (2);
gfpfh.setInputCloud (cloud);
if (argc < 2)
{
std::cerr << "No test file given. Please download `bun0.pcd` and pass its path to the test." << std::endl;
- return (-1);
+ return (1);
}
- if (loadPCDFile<PointXYZ> (argv[1], cloud) < 0)
+ if (pcl::io::loadPCDFile<PointT> (argv[1], *cloud) < 0)
{
std::cerr << "Failed to read test file. Please download `bun0.pcd` and pass its path to the test." << std::endl;
- return (-1);
+ return (1);
}
- indices.resize (cloud.points.size ());
- for (size_t i = 0; i < indices.size (); ++i)
- indices[i] = static_cast<int> (i);
- tree.reset (new search::KdTree<PointXYZ> (false));
- tree->setInputCloud (cloud.makeShared ());
+ indices.reserve (cloud->size ());
+ for (size_t i = 0; i < cloud->size (); ++i)
+ indices.push_back (static_cast<int> (i));
+
+ tree.reset (new pcl::search::KdTree<PointT> (false));
+ tree->setInputCloud (cloud);
testing::InitGoogleTest (&argc, argv);
return (RUN_ALL_TESTS ());
// Compare to independently verified values
const RIFTDescriptor &rift = rift_output.points[220];
- const float correct_rift_feature_values[32] = {0.0187f, 0.0349f, 0.0647f, 0.0881f, 0.0042f, 0.0131f, 0.0346f, 0.0030f,
- 0.0076f, 0.0218f, 0.0463f, 0.0030f, 0.0087f, 0.0288f, 0.0920f, 0.0472f,
- 0.0076f, 0.0420f, 0.0726f, 0.0669f, 0.0090f, 0.0901f, 0.1274f, 0.2185f,
- 0.0147f, 0.1222f, 0.3568f, 0.4348f, 0.0149f, 0.0806f, 0.2787f, 0.6864f};
+ float correct_rift_feature_values[32];
+
+ unsigned major, minor, patch;
+ std::sscanf (FLANN_VERSION_, "%u.%u.%u", &major, &minor, &patch);
+ if (PCL_VERSION_CALC (major, minor, patch) > PCL_VERSION_CALC (1, 8, 4))
+ {
+ const float data[32] = {0.0052f, 0.0349f, 0.0647f, 0.0881f, 0.0042f, 0.0131f, 0.0346f, 0.0030f,
+ 0.0076f, 0.0218f, 0.0463f, 0.0030f, 0.0087f, 0.0288f, 0.0920f, 0.0472f,
+ 0.0211f, 0.0420f, 0.0726f, 0.0669f, 0.0090f, 0.0901f, 0.1274f, 0.2185f,
+ 0.0147f, 0.1222f, 0.3568f, 0.4348f, 0.0149f, 0.0806f, 0.2787f, 0.6864f};
+ std::copy (data, data + 32, correct_rift_feature_values);
+ }
+ else
+ {
+ const float data[32] = {0.0187f, 0.0349f, 0.0647f, 0.0881f, 0.0042f, 0.0131f, 0.0346f, 0.0030f,
+ 0.0076f, 0.0218f, 0.0463f, 0.0030f, 0.0087f, 0.0288f, 0.0920f, 0.0472f,
+ 0.0076f, 0.0420f, 0.0726f, 0.0669f, 0.0090f, 0.0901f, 0.1274f, 0.2185f,
+ 0.0147f, 0.1222f, 0.3568f, 0.4348f, 0.0149f, 0.0806f, 0.2787f, 0.6864f};
+ std::copy (data, data + 32, correct_rift_feature_values);
+ }
for (int i = 0; i < 32; ++i)
EXPECT_NEAR (rift.histogram[i], correct_rift_feature_values[i], 1e-4);
}
EXPECT_TRUE (pcl_isnan (bunny_LRF.at (24).x_axis[0]));
// Expected Results
- // point 15: tanget disambiguation
+ // point 15: tangent disambiguation
//float point_15_conf = 0;
Eigen::Vector3f point_15_x (-0.849213f, 0.528016f, 0.00593846f);
Eigen::Vector3f point_15_y (0.274564f, 0.451135f, -0.849171f);
FILES test_filters.cpp
LINK_WITH pcl_gtest pcl_filters pcl_io pcl_sample_consensus pcl_kdtree pcl_features pcl_segmentation
ARGUMENTS "${PCL_SOURCE_DIR}/test/bun0.pcd" "${PCL_SOURCE_DIR}/test/milk_cartoon_all_small_clorox.pcd")
+ PCL_ADD_TEST(filters_clipper test_clipper
+ FILES test_clipper.cpp
+ LINK_WITH pcl_gtest pcl_filters)
endif (BUILD_segmentation)
endif (BUILD_features)
endif (BUILD_io)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2009-2012, Willow Garage, Inc.
+ * Copyright (c) 2012-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id$
+ *
+ */
+
+#include <gtest/gtest.h>
+#include <pcl/point_types.h>
+#include <pcl/filters/box_clipper3D.h>
+#include <pcl/filters/crop_box.h>
+#include <pcl/filters/extract_indices.h>
+
+#include <pcl/common/transforms.h>
+#include <pcl/common/eigen.h>
+
+using namespace pcl;
+using namespace std;
+using namespace Eigen;
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (BoxClipper3D, Filters)
+{
+ // PointCloud
+ // -------------------------------------------------------------------------
+
+ // Create cloud with center point and corner points
+ PointCloud<PointXYZ>::Ptr input (new PointCloud<PointXYZ> ());
+ input->push_back (PointXYZ (0.0f, 0.0f, 0.0f));
+ input->push_back (PointXYZ (0.9f, 0.9f, 0.9f));
+ input->push_back (PointXYZ (0.9f, 0.9f, -0.9f));
+ input->push_back (PointXYZ (0.9f, -0.9f, 0.9f));
+ input->push_back (PointXYZ (-0.9f, 0.9f, 0.9f));
+ input->push_back (PointXYZ (0.9f, -0.9f, -0.9f));
+ input->push_back (PointXYZ (-0.9f, -0.9f, 0.9f));
+ input->push_back (PointXYZ (-0.9f, 0.9f, -0.9f));
+ input->push_back (PointXYZ (-0.9f, -0.9f, -0.9f));
+
+ ExtractIndices<PointXYZ> extract_indices;
+ vector<int> indices;
+
+ BoxClipper3D<PointXYZ> boxClipper3D (Affine3f::Identity ());
+ boxClipper3D.clipPointCloud3D (*input, indices);
+
+ PointCloud<PointXYZ> cloud_out;
+
+ extract_indices.setInputCloud (input);
+ extract_indices.setIndices (boost::make_shared<vector<int> > (indices));
+ extract_indices.filter (cloud_out);
+
+ EXPECT_EQ (int (indices.size ()), 9);
+ EXPECT_EQ (int (cloud_out.size ()), 9);
+ EXPECT_EQ (int (cloud_out.width), 9);
+ EXPECT_EQ (int (cloud_out.height), 1);
+
+ // Translate points by 1 in Y-axis ...
+ Affine3f t (Translation3f (0.0f, 1.0f, 0.0f));
+ boxClipper3D.setTransformation (t);
+ boxClipper3D.clipPointCloud3D (*input, indices);
+
+ EXPECT_EQ (int (indices.size ()), 5);
+
+ // ... then rotate points +45 in Y-Axis
+ t.rotate (AngleAxisf (45.0f * float (M_PI) / 180.0f, Vector3f::UnitY ()));
+ boxClipper3D.setTransformation (t);
+ boxClipper3D.clipPointCloud3D (*input, indices);
+ EXPECT_EQ (int (indices.size ()), 1);
+
+ // ... then rotate points -45 in Z-axis
+ t.rotate (AngleAxisf (-45.0f * float (M_PI) / 180.0f, Vector3f::UnitZ ()));
+ boxClipper3D.setTransformation (t);
+ boxClipper3D.clipPointCloud3D (*input, indices);
+ EXPECT_EQ (int (indices.size ()), 3);
+
+ // ... then scale points by 2
+ t.scale (2.0f);
+ boxClipper3D.setTransformation (t);
+ boxClipper3D.clipPointCloud3D (*input, indices);
+ EXPECT_EQ (int (indices.size ()), 1);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (CropBox, Filters)
+{
+
+ // PointT
+ // -------------------------------------------------------------------------
+
+ // Create cloud with center point and corner points
+ PointCloud<PointXYZ>::Ptr input (new PointCloud<PointXYZ> ());
+
+ input->push_back (PointXYZ (0.0f, 0.0f, 0.0f));
+ input->push_back (PointXYZ (0.9f, 0.9f, 0.9f));
+ input->push_back (PointXYZ (0.9f, 0.9f, -0.9f));
+ input->push_back (PointXYZ (0.9f, -0.9f, 0.9f));
+ input->push_back (PointXYZ (-0.9f, 0.9f, 0.9f));
+ input->push_back (PointXYZ (0.9f, -0.9f, -0.9f));
+ input->push_back (PointXYZ (-0.9f, -0.9f, 0.9f));
+ input->push_back (PointXYZ (-0.9f, 0.9f, -0.9f));
+ input->push_back (PointXYZ (-0.9f, -0.9f, -0.9f));
+
+ // Test the PointCloud<PointT> method
+ CropBox<PointXYZ> cropBoxFilter (true);
+ cropBoxFilter.setInputCloud (input);
+ Eigen::Vector4f min_pt (-1.0f, -1.0f, -1.0f, 1.0f);
+ Eigen::Vector4f max_pt (1.0f, 1.0f, 1.0f, 1.0f);
+
+ // Cropbox slightly bigger then bounding box of points
+ cropBoxFilter.setMin (min_pt);
+ cropBoxFilter.setMax (max_pt);
+
+ // Indices
+ vector<int> indices;
+ cropBoxFilter.filter (indices);
+
+ // Cloud
+ PointCloud<PointXYZ> cloud_out;
+ cropBoxFilter.filter (cloud_out);
+
+ // Should contain all
+ EXPECT_EQ (int (indices.size ()), 9);
+ EXPECT_EQ (int (cloud_out.size ()), 9);
+ EXPECT_EQ (int (cloud_out.width), 9);
+ EXPECT_EQ (int (cloud_out.height), 1);
+
+ IndicesConstPtr removed_indices;
+ removed_indices = cropBoxFilter.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), 0);
+
+ // Test setNegative
+ PointCloud<PointXYZ> cloud_out_negative;
+ cropBoxFilter.setNegative (true);
+ cropBoxFilter.filter (cloud_out_negative);
+ EXPECT_EQ (int (cloud_out_negative.size ()), 0);
+
+ cropBoxFilter.filter (indices);
+ EXPECT_EQ (int (indices.size ()), 0);
+
+ cropBoxFilter.setNegative (false);
+ cropBoxFilter.filter (cloud_out);
+
+ // Translate crop box up by 1
+ cropBoxFilter.setTranslation(Eigen::Vector3f(0, 1, 0));
+ cropBoxFilter.filter (indices);
+ cropBoxFilter.filter (cloud_out);
+
+ EXPECT_EQ (int (indices.size ()), 5);
+ EXPECT_EQ (int (cloud_out.size ()), 5);
+
+ removed_indices = cropBoxFilter.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), 4);
+
+ // Test setNegative
+ cropBoxFilter.setNegative (true);
+ cropBoxFilter.filter (cloud_out_negative);
+ EXPECT_EQ (int (cloud_out_negative.size ()), 4);
+
+ cropBoxFilter.filter (indices);
+ EXPECT_EQ (int (indices.size ()), 4);
+
+ cropBoxFilter.setNegative (false);
+ cropBoxFilter.filter (cloud_out);
+
+ // Rotate crop box up by 45
+ cropBoxFilter.setRotation (Eigen::Vector3f (0.0f, 45.0f * float (M_PI) / 180.0f, 0.0f));
+ cropBoxFilter.filter (indices);
+ cropBoxFilter.filter (cloud_out);
+
+ EXPECT_EQ (int (indices.size ()), 1);
+ EXPECT_EQ (int (cloud_out.size ()), 1);
+ EXPECT_EQ (int (cloud_out.width), 1);
+ EXPECT_EQ (int (cloud_out.height), 1);
+
+ removed_indices = cropBoxFilter.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), 8);
+
+ // Test setNegative
+ cropBoxFilter.setNegative (true);
+ cropBoxFilter.filter (cloud_out_negative);
+ EXPECT_EQ (int (cloud_out_negative.size ()), 8);
+
+ cropBoxFilter.filter (indices);
+ EXPECT_EQ (int (indices.size ()), 8);
+
+ cropBoxFilter.setNegative (false);
+ cropBoxFilter.filter (cloud_out);
+
+ // Rotate point cloud by -45
+ cropBoxFilter.setTransform (getTransformation (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, -45.0f * float (M_PI) / 180.0f));
+ cropBoxFilter.filter (indices);
+ cropBoxFilter.filter (cloud_out);
+
+ EXPECT_EQ (int (indices.size ()), 3);
+ EXPECT_EQ (int (cloud_out.size ()), 3);
+ EXPECT_EQ (int (cloud_out.width), 3);
+ EXPECT_EQ (int (cloud_out.height), 1);
+
+ removed_indices = cropBoxFilter.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), 6);
+
+ // Test setNegative
+ cropBoxFilter.setNegative (true);
+ cropBoxFilter.filter (cloud_out_negative);
+ EXPECT_EQ (int (cloud_out_negative.size ()), 6);
+
+ cropBoxFilter.filter (indices);
+ EXPECT_EQ (int (indices.size ()), 6);
+
+ cropBoxFilter.setNegative (false);
+ cropBoxFilter.filter (cloud_out);
+
+ // Translate point cloud down by -1
+ cropBoxFilter.setTransform (getTransformation(0, -1, 0, 0, 0, -45.0 * float (M_PI) / 180.0));
+ cropBoxFilter.filter (indices);
+ cropBoxFilter.filter (cloud_out);
+
+ EXPECT_EQ (int (indices.size ()), 2);
+ EXPECT_EQ (int (cloud_out.size ()), 2);
+ EXPECT_EQ (int (cloud_out.width), 2);
+ EXPECT_EQ (int (cloud_out.height), 1);
+
+ removed_indices = cropBoxFilter.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), 7);
+
+ // Test setNegative
+ cropBoxFilter.setNegative (true);
+ cropBoxFilter.filter (cloud_out_negative);
+ EXPECT_EQ (int (cloud_out_negative.size ()), 7);
+
+ cropBoxFilter.filter (indices);
+ EXPECT_EQ (int (indices.size ()), 7);
+
+ cropBoxFilter.setNegative (false);
+ cropBoxFilter.filter (cloud_out);
+
+ // Remove point cloud rotation
+ cropBoxFilter.setTransform (getTransformation(0, -1, 0, 0, 0, 0));
+ cropBoxFilter.filter (indices);
+ cropBoxFilter.filter (cloud_out);
+
+ EXPECT_EQ (int (indices.size ()), 0);
+ EXPECT_EQ (int (cloud_out.size ()), 0);
+ EXPECT_EQ (int (cloud_out.width), 0);
+ EXPECT_EQ (int (cloud_out.height), 1);
+
+ removed_indices = cropBoxFilter.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices->size ()), 9);
+
+ // Test setNegative
+ cropBoxFilter.setNegative (true);
+ cropBoxFilter.filter (cloud_out_negative);
+ EXPECT_EQ (int (cloud_out_negative.size ()), 9);
+
+ cropBoxFilter.filter (indices);
+ EXPECT_EQ (int (indices.size ()), 9);
+
+ // PCLPointCloud2
+ // -------------------------------------------------------------------------
+
+ // Create cloud with center point and corner points
+ PCLPointCloud2::Ptr input2 (new PCLPointCloud2);
+ pcl::toPCLPointCloud2 (*input, *input2);
+
+ // Test the PointCloud<PointT> method
+ CropBox<PCLPointCloud2> cropBoxFilter2(true);
+ cropBoxFilter2.setInputCloud (input2);
+
+ // Cropbox slightly bigger then bounding box of points
+ cropBoxFilter2.setMin (min_pt);
+ cropBoxFilter2.setMax (max_pt);
+
+ // Indices
+ vector<int> indices2;
+ cropBoxFilter2.filter (indices2);
+
+ // Cloud
+ PCLPointCloud2 cloud_out2;
+ cropBoxFilter2.filter (cloud_out2);
+
+ // Should contain all
+ EXPECT_EQ (int (indices2.size ()), 9);
+ EXPECT_EQ (int (indices2.size ()), int (cloud_out2.width * cloud_out2.height));
+
+ IndicesConstPtr removed_indices2;
+ removed_indices2 = cropBoxFilter2.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices2->size ()), 0);
+
+ // Test setNegative
+ PCLPointCloud2 cloud_out2_negative;
+ cropBoxFilter2.setNegative (true);
+ cropBoxFilter2.filter (cloud_out2_negative);
+ EXPECT_EQ (int (cloud_out2_negative.width), 0);
+
+ cropBoxFilter2.filter (indices2);
+ EXPECT_EQ (int (indices2.size ()), 0);
+
+ cropBoxFilter2.setNegative (false);
+ cropBoxFilter2.filter (cloud_out2);
+
+ // Translate crop box up by 1
+ cropBoxFilter2.setTranslation (Eigen::Vector3f(0, 1, 0));
+ cropBoxFilter2.filter (indices2);
+ cropBoxFilter2.filter (cloud_out2);
+
+ EXPECT_EQ (int (indices2.size ()), 5);
+ EXPECT_EQ (int (indices2.size ()), int (cloud_out2.width * cloud_out2.height));
+
+ removed_indices2 = cropBoxFilter2.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices2->size ()), 4);
+
+ // Test setNegative
+ cropBoxFilter2.setNegative (true);
+ cropBoxFilter2.filter (cloud_out2_negative);
+ EXPECT_EQ (int (cloud_out2_negative.width), 4);
+
+ cropBoxFilter2.filter (indices2);
+ EXPECT_EQ (int (indices2.size ()), 4);
+
+ cropBoxFilter2.setNegative (false);
+ cropBoxFilter2.filter (cloud_out2);
+
+ // Rotate crop box up by 45
+ cropBoxFilter2.setRotation (Eigen::Vector3f (0.0f, 45.0f * float (M_PI) / 180.0f, 0.0f));
+ cropBoxFilter2.filter (indices2);
+ cropBoxFilter2.filter (cloud_out2);
+
+ EXPECT_EQ (int (indices2.size ()), 1);
+ EXPECT_EQ (int (indices2.size ()), int (cloud_out2.width * cloud_out2.height));
+
+ // Rotate point cloud by -45
+ cropBoxFilter2.setTransform (getTransformation (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, -45.0f * float (M_PI) / 180.0f));
+ cropBoxFilter2.filter (indices2);
+ cropBoxFilter2.filter (cloud_out2);
+
+ EXPECT_EQ (int (indices2.size ()), 3);
+ EXPECT_EQ (int (cloud_out2.width * cloud_out2.height), 3);
+
+ removed_indices2 = cropBoxFilter2.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices2->size ()), 6);
+
+ // Test setNegative
+ cropBoxFilter2.setNegative (true);
+ cropBoxFilter2.filter (cloud_out2_negative);
+ EXPECT_EQ (int (cloud_out2_negative.width), 6);
+
+ cropBoxFilter2.filter (indices2);
+ EXPECT_EQ (int (indices2.size ()), 6);
+
+ cropBoxFilter2.setNegative (false);
+ cropBoxFilter2.filter (cloud_out2);
+
+ // Translate point cloud down by -1
+ cropBoxFilter2.setTransform (getTransformation (0.0f, -1.0f, 0.0f, 0.0f, 0.0f, -45.0f * float (M_PI) / 180.0f));
+ cropBoxFilter2.filter (indices2);
+ cropBoxFilter2.filter (cloud_out2);
+
+ EXPECT_EQ (int (indices2.size ()), 2);
+ EXPECT_EQ (int (cloud_out2.width * cloud_out2.height), 2);
+
+ removed_indices2 = cropBoxFilter2.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices2->size ()), 7);
+
+ // Test setNegative
+ cropBoxFilter2.setNegative (true);
+ cropBoxFilter2.filter (cloud_out2_negative);
+ EXPECT_EQ (int (cloud_out2_negative.width), 7);
+
+ cropBoxFilter2.filter (indices2);
+ EXPECT_EQ (int (indices2.size ()), 7);
+
+ cropBoxFilter2.setNegative (false);
+ cropBoxFilter2.filter (cloud_out2);
+
+ // Remove point cloud rotation
+ cropBoxFilter2.setTransform (getTransformation(0, -1, 0, 0, 0, 0));
+ cropBoxFilter2.filter (indices2);
+ cropBoxFilter2.filter (cloud_out2);
+
+ EXPECT_EQ (int (indices2.size ()), 0);
+ EXPECT_EQ (int (cloud_out2.width * cloud_out2.height), 0);
+
+ removed_indices2 = cropBoxFilter2.getRemovedIndices ();
+ EXPECT_EQ (int (removed_indices2->size ()), 9);
+
+ // Test setNegative
+ cropBoxFilter2.setNegative (true);
+ cropBoxFilter2.filter (cloud_out2_negative);
+ EXPECT_EQ (int (cloud_out2_negative.width), 9);
+
+ cropBoxFilter2.filter (indices2);
+ EXPECT_EQ (int (indices2.size ()), 9);
+
+ cropBoxFilter2.setNegative (false);
+ cropBoxFilter2.filter (cloud_out2);
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
#include <pcl/filters/radius_outlier_removal.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/filters/conditional_removal.h>
-#include <pcl/filters/crop_box.h>
#include <pcl/filters/median_filter.h>
#include <pcl/filters/normal_refinement.h>
for (int i = 0; i < 10; ++i)
{
PointXYZRGB pt;
- int rgb = (col_r[i] << 16) | (col_g[i] << 8) | col_b[i];
- pt.x = 0.0f;
- pt.y = 0.0f;
- pt.z = 0.0f;
- pt.rgb = *reinterpret_cast<float*> (&rgb);
+ pt.r = col_r[i];
+ pt.g = col_g[i];
+ pt.b = col_b[i];
cloud_rgb_.points.push_back (pt);
}
PointCloud<PointNormal> output;
input->reserve (16);
input->is_dense = false;
-
+
PointNormal point;
PointNormal ground_truth[2][2][2];
for (unsigned zIdx = 0; zIdx < 2; ++zIdx)
// y = 1, z = 0 -> orthogonal normals
// y = 1, z = 1 -> random normals
PointNormal& voxel = ground_truth [xIdx][yIdx][zIdx];
-
+
point.x = xIdx * 1.99;
point.y = yIdx * 1.99;
point.z = zIdx * 1.99;
point.normal_x = getRandomNumber (1.0, -1.0);
point.normal_y = getRandomNumber (1.0, -1.0);
point.normal_z = getRandomNumber (1.0, -1.0);
-
+
float norm = 1.0f / sqrt (point.normal_x * point.normal_x + point.normal_y * point.normal_y + point.normal_z * point.normal_z );
point.normal_x *= norm;
point.normal_y *= norm;
point.normal_z *= norm;
-
+
// std::cout << "adding point: " << point.x << " , " << point.y << " , " << point.z
// << " -- " << point.normal_x << " , " << point.normal_y << " , " << point.normal_z << std::endl;
input->push_back (point);
-
+
voxel = point;
-
+
if (xIdx != 0)
{
point.x = getRandomNumber (0.99) + float (xIdx);
voxel.x += point.x;
voxel.y += point.y;
voxel.z += point.z;
-
+
voxel.x *= 0.5;
voxel.y *= 0.5;
voxel.z *= 0.5;
-
+
if (yIdx == 0 && zIdx == 0)
{
voxel.normal_x = std::numeric_limits<float>::quiet_NaN ();
point.normal_x *= norm;
point.normal_y *= norm;
point.normal_z *= norm;
-
+
voxel.normal_x += point.normal_x;
voxel.normal_y += point.normal_y;
voxel.normal_z += point.normal_z;
-
+
norm = 1.0f / sqrt (voxel.normal_x * voxel.normal_x + voxel.normal_y * voxel.normal_y + voxel.normal_z * voxel.normal_z );
-
+
voxel.normal_x *= norm;
voxel.normal_y *= norm;
voxel.normal_z *= norm;
input->push_back (point);
// std::cout << "voxel: " << voxel.x << " , " << voxel.y << " , " << voxel.z
// << " -- " << voxel.normal_x << " , " << voxel.normal_y << " , " << voxel.normal_z << std::endl;
-
+
}
}
}
-
+
VoxelGrid<PointNormal> grid;
grid.setLeafSize (1.0f, 1.0f, 1.0f);
grid.setFilterLimits (0.0, 2.0);
grid.setInputCloud (input);
grid.filter (output);
-
+
// check the output
for (unsigned idx = 0, zIdx = 0; zIdx < 2; ++zIdx)
{
EXPECT_EQ (voxel.x, point.x);
EXPECT_EQ (voxel.y, point.y);
EXPECT_EQ (voxel.z, point.z);
-
+
if (pcl_isfinite(voxel.normal_x) || pcl_isfinite (point.normal_x))
{
EXPECT_EQ (voxel.normal_x, point.normal_x);
}
}
}
-
+
toPCLPointCloud2 (*input, cloud_blob_);
cloud_blob_ptr_.reset (new PCLPointCloud2 (cloud_blob_));
-
+
VoxelGrid<PCLPointCloud2> grid2;
PCLPointCloud2 output_blob;
EXPECT_EQ (voxel.x, point.x);
EXPECT_EQ (voxel.y, point.y);
EXPECT_EQ (voxel.z, point.z);
-
+
if (pcl_isfinite(voxel.normal_x) || pcl_isfinite (point.normal_x))
{
EXPECT_EQ (voxel.normal_x, point.normal_x);
EXPECT_LE (fabs (output.points[neighbors.at (0)].y - output.points[centroidIdx].y), 0.02);
EXPECT_LE (output.points[neighbors.at (0)].z - output.points[centroidIdx].z, 0.02 * 2);
- // testing seach functions
+ // testing search functions
grid.setSaveLeafLayout (false);
grid.filter (output, true);
EXPECT_NEAR (cloud_out.points[cloud_out.points.size () - 1].z, -0.021299, 1e-4);
}
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (CropBox, Filters)
-{
-
- // PointT
- // -------------------------------------------------------------------------
-
- // Create cloud with center point and corner points
- PointCloud<PointXYZ>::Ptr input (new PointCloud<PointXYZ> ());
-
- input->push_back (PointXYZ (0.0f, 0.0f, 0.0f));
- input->push_back (PointXYZ (0.9f, 0.9f, 0.9f));
- input->push_back (PointXYZ (0.9f, 0.9f, -0.9f));
- input->push_back (PointXYZ (0.9f, -0.9f, 0.9f));
- input->push_back (PointXYZ (-0.9f, 0.9f, 0.9f));
- input->push_back (PointXYZ (0.9f, -0.9f, -0.9f));
- input->push_back (PointXYZ (-0.9f, -0.9f, 0.9f));
- input->push_back (PointXYZ (-0.9f, 0.9f, -0.9f));
- input->push_back (PointXYZ (-0.9f, -0.9f, -0.9f));
-
- // Test the PointCloud<PointT> method
- CropBox<PointXYZ> cropBoxFilter (true);
- cropBoxFilter.setInputCloud (input);
- Eigen::Vector4f min_pt (-1.0f, -1.0f, -1.0f, 1.0f);
- Eigen::Vector4f max_pt (1.0f, 1.0f, 1.0f, 1.0f);
-
- // Cropbox slighlty bigger then bounding box of points
- cropBoxFilter.setMin (min_pt);
- cropBoxFilter.setMax (max_pt);
-
- // Indices
- vector<int> indices;
- cropBoxFilter.filter (indices);
-
- // Cloud
- PointCloud<PointXYZ> cloud_out;
- cropBoxFilter.filter (cloud_out);
-
- // Should contain all
- EXPECT_EQ (int (indices.size ()), 9);
- EXPECT_EQ (int (cloud_out.size ()), 9);
- EXPECT_EQ (int (cloud_out.width), 9);
- EXPECT_EQ (int (cloud_out.height), 1);
-
- IndicesConstPtr removed_indices;
- removed_indices = cropBoxFilter.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices->size ()), 0);
-
- // Test setNegative
- PointCloud<PointXYZ> cloud_out_negative;
- cropBoxFilter.setNegative (true);
- cropBoxFilter.filter (cloud_out_negative);
- EXPECT_EQ (int (cloud_out_negative.size ()), 0);
-
- cropBoxFilter.filter (indices);
- EXPECT_EQ (int (indices.size ()), 0);
-
- cropBoxFilter.setNegative (false);
- cropBoxFilter.filter (cloud_out);
-
- // Translate crop box up by 1
- cropBoxFilter.setTranslation(Eigen::Vector3f(0, 1, 0));
- cropBoxFilter.filter (indices);
- cropBoxFilter.filter (cloud_out);
-
- EXPECT_EQ (int (indices.size ()), 5);
- EXPECT_EQ (int (cloud_out.size ()), 5);
-
- removed_indices = cropBoxFilter.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices->size ()), 4);
-
- // Test setNegative
- cropBoxFilter.setNegative (true);
- cropBoxFilter.filter (cloud_out_negative);
- EXPECT_EQ (int (cloud_out_negative.size ()), 4);
-
- cropBoxFilter.filter (indices);
- EXPECT_EQ (int (indices.size ()), 4);
-
- cropBoxFilter.setNegative (false);
- cropBoxFilter.filter (cloud_out);
-
- // Rotate crop box up by 45
- cropBoxFilter.setRotation (Eigen::Vector3f (0.0f, 45.0f * float (M_PI) / 180.0f, 0.0f));
- cropBoxFilter.filter (indices);
- cropBoxFilter.filter (cloud_out);
-
- EXPECT_EQ (int (indices.size ()), 1);
- EXPECT_EQ (int (cloud_out.size ()), 1);
- EXPECT_EQ (int (cloud_out.width), 1);
- EXPECT_EQ (int (cloud_out.height), 1);
-
- removed_indices = cropBoxFilter.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices->size ()), 8);
-
- // Test setNegative
- cropBoxFilter.setNegative (true);
- cropBoxFilter.filter (cloud_out_negative);
- EXPECT_EQ (int (cloud_out_negative.size ()), 8);
-
- cropBoxFilter.filter (indices);
- EXPECT_EQ (int (indices.size ()), 8);
-
- cropBoxFilter.setNegative (false);
- cropBoxFilter.filter (cloud_out);
-
- // Rotate point cloud by -45
- cropBoxFilter.setTransform (getTransformation (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, -45.0f * float (M_PI) / 180.0f));
- cropBoxFilter.filter (indices);
- cropBoxFilter.filter (cloud_out);
-
- EXPECT_EQ (int (indices.size ()), 3);
- EXPECT_EQ (int (cloud_out.size ()), 3);
- EXPECT_EQ (int (cloud_out.width), 3);
- EXPECT_EQ (int (cloud_out.height), 1);
-
- removed_indices = cropBoxFilter.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices->size ()), 6);
-
- // Test setNegative
- cropBoxFilter.setNegative (true);
- cropBoxFilter.filter (cloud_out_negative);
- EXPECT_EQ (int (cloud_out_negative.size ()), 6);
-
- cropBoxFilter.filter (indices);
- EXPECT_EQ (int (indices.size ()), 6);
-
- cropBoxFilter.setNegative (false);
- cropBoxFilter.filter (cloud_out);
-
- // Translate point cloud down by -1
- cropBoxFilter.setTransform (getTransformation(0, -1, 0, 0, 0, -45.0 * float (M_PI) / 180.0));
- cropBoxFilter.filter (indices);
- cropBoxFilter.filter (cloud_out);
-
- EXPECT_EQ (int (indices.size ()), 2);
- EXPECT_EQ (int (cloud_out.size ()), 2);
- EXPECT_EQ (int (cloud_out.width), 2);
- EXPECT_EQ (int (cloud_out.height), 1);
-
- removed_indices = cropBoxFilter.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices->size ()), 7);
-
- // Test setNegative
- cropBoxFilter.setNegative (true);
- cropBoxFilter.filter (cloud_out_negative);
- EXPECT_EQ (int (cloud_out_negative.size ()), 7);
-
- cropBoxFilter.filter (indices);
- EXPECT_EQ (int (indices.size ()), 7);
-
- cropBoxFilter.setNegative (false);
- cropBoxFilter.filter (cloud_out);
-
- // Remove point cloud rotation
- cropBoxFilter.setTransform (getTransformation(0, -1, 0, 0, 0, 0));
- cropBoxFilter.filter (indices);
- cropBoxFilter.filter (cloud_out);
-
- EXPECT_EQ (int (indices.size ()), 0);
- EXPECT_EQ (int (cloud_out.size ()), 0);
- EXPECT_EQ (int (cloud_out.width), 0);
- EXPECT_EQ (int (cloud_out.height), 1);
-
- removed_indices = cropBoxFilter.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices->size ()), 9);
-
- // Test setNegative
- cropBoxFilter.setNegative (true);
- cropBoxFilter.filter (cloud_out_negative);
- EXPECT_EQ (int (cloud_out_negative.size ()), 9);
-
- cropBoxFilter.filter (indices);
- EXPECT_EQ (int (indices.size ()), 9);
-
- // PCLPointCloud2
- // -------------------------------------------------------------------------
-
- // Create cloud with center point and corner points
- PCLPointCloud2::Ptr input2 (new PCLPointCloud2);
- pcl::toPCLPointCloud2 (*input, *input2);
-
- // Test the PointCloud<PointT> method
- CropBox<PCLPointCloud2> cropBoxFilter2(true);
- cropBoxFilter2.setInputCloud (input2);
-
- // Cropbox slighlty bigger then bounding box of points
- cropBoxFilter2.setMin (min_pt);
- cropBoxFilter2.setMax (max_pt);
-
- // Indices
- vector<int> indices2;
- cropBoxFilter2.filter (indices2);
-
- // Cloud
- PCLPointCloud2 cloud_out2;
- cropBoxFilter2.filter (cloud_out2);
-
- // Should contain all
- EXPECT_EQ (int (indices2.size ()), 9);
- EXPECT_EQ (int (indices2.size ()), int (cloud_out2.width * cloud_out2.height));
-
- IndicesConstPtr removed_indices2;
- removed_indices2 = cropBoxFilter2.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices2->size ()), 0);
-
- // Test setNegative
- PCLPointCloud2 cloud_out2_negative;
- cropBoxFilter2.setNegative (true);
- cropBoxFilter2.filter (cloud_out2_negative);
- EXPECT_EQ (int (cloud_out2_negative.width), 0);
-
- cropBoxFilter2.filter (indices2);
- EXPECT_EQ (int (indices2.size ()), 0);
-
- cropBoxFilter2.setNegative (false);
- cropBoxFilter2.filter (cloud_out2);
-
- // Translate crop box up by 1
- cropBoxFilter2.setTranslation (Eigen::Vector3f(0, 1, 0));
- cropBoxFilter2.filter (indices2);
- cropBoxFilter2.filter (cloud_out2);
-
- EXPECT_EQ (int (indices2.size ()), 5);
- EXPECT_EQ (int (indices2.size ()), int (cloud_out2.width * cloud_out2.height));
-
- removed_indices2 = cropBoxFilter2.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices2->size ()), 4);
-
- // Test setNegative
- cropBoxFilter2.setNegative (true);
- cropBoxFilter2.filter (cloud_out2_negative);
- EXPECT_EQ (int (cloud_out2_negative.width), 4);
-
- cropBoxFilter2.filter (indices2);
- EXPECT_EQ (int (indices2.size ()), 4);
-
- cropBoxFilter2.setNegative (false);
- cropBoxFilter2.filter (cloud_out2);
-
- // Rotate crop box up by 45
- cropBoxFilter2.setRotation (Eigen::Vector3f (0.0f, 45.0f * float (M_PI) / 180.0f, 0.0f));
- cropBoxFilter2.filter (indices2);
- cropBoxFilter2.filter (cloud_out2);
-
- EXPECT_EQ (int (indices2.size ()), 1);
- EXPECT_EQ (int (indices2.size ()), int (cloud_out2.width * cloud_out2.height));
-
- // Rotate point cloud by -45
- cropBoxFilter2.setTransform (getTransformation (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, -45.0f * float (M_PI) / 180.0f));
- cropBoxFilter2.filter (indices2);
- cropBoxFilter2.filter (cloud_out2);
-
- EXPECT_EQ (int (indices2.size ()), 3);
- EXPECT_EQ (int (cloud_out2.width * cloud_out2.height), 3);
-
- removed_indices2 = cropBoxFilter2.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices2->size ()), 6);
-
- // Test setNegative
- cropBoxFilter2.setNegative (true);
- cropBoxFilter2.filter (cloud_out2_negative);
- EXPECT_EQ (int (cloud_out2_negative.width), 6);
-
- cropBoxFilter2.filter (indices2);
- EXPECT_EQ (int (indices2.size ()), 6);
-
- cropBoxFilter2.setNegative (false);
- cropBoxFilter2.filter (cloud_out2);
-
- // Translate point cloud down by -1
- cropBoxFilter2.setTransform (getTransformation (0.0f, -1.0f, 0.0f, 0.0f, 0.0f, -45.0f * float (M_PI) / 180.0f));
- cropBoxFilter2.filter (indices2);
- cropBoxFilter2.filter (cloud_out2);
-
- EXPECT_EQ (int (indices2.size ()), 2);
- EXPECT_EQ (int (cloud_out2.width * cloud_out2.height), 2);
-
- removed_indices2 = cropBoxFilter2.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices2->size ()), 7);
-
- // Test setNegative
- cropBoxFilter2.setNegative (true);
- cropBoxFilter2.filter (cloud_out2_negative);
- EXPECT_EQ (int (cloud_out2_negative.width), 7);
-
- cropBoxFilter2.filter (indices2);
- EXPECT_EQ (int (indices2.size ()), 7);
-
- cropBoxFilter2.setNegative (false);
- cropBoxFilter2.filter (cloud_out2);
-
- // Remove point cloud rotation
- cropBoxFilter2.setTransform (getTransformation(0, -1, 0, 0, 0, 0));
- cropBoxFilter2.filter (indices2);
- cropBoxFilter2.filter (cloud_out2);
-
- EXPECT_EQ (int (indices2.size ()), 0);
- EXPECT_EQ (int (cloud_out2.width * cloud_out2.height), 0);
-
- removed_indices2 = cropBoxFilter2.getRemovedIndices ();
- EXPECT_EQ (int (removed_indices2->size ()), 9);
-
- // Test setNegative
- cropBoxFilter2.setNegative (true);
- cropBoxFilter2.filter (cloud_out2_negative);
- EXPECT_EQ (int (cloud_out2_negative.width), 9);
-
- cropBoxFilter2.filter (indices2);
- EXPECT_EQ (int (indices2.size ()), 9);
-
- cropBoxFilter2.setNegative (false);
- cropBoxFilter2.filter (cloud_out2);
-}
-
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST (StatisticalOutlierRemoval, Filters)
{
//Creating a point cloud on the XY plane
PointCloud<PointXYZ>::Ptr input (new PointCloud<PointXYZ> ());
- for (int i = 0; i < 5; i++)
+ for (int i = 0; i < 5; i++)
{
- for (int j = 0; j < 5; j++)
+ for (int j = 0; j < 5; j++)
{
- for (int k = 0; k < 5; k++)
+ for (int k = 0; k < 5; k++)
{
pcl::PointXYZ pt;
pt.x = float (i);
pcl::PointCloud <pcl::PointXYZ>::Ptr output (new pcl::PointCloud <pcl::PointXYZ>);
fc.filter (*output);
-
+
// Should filter all points in the input cloud
EXPECT_EQ (output->points.size (), input->points.size ());
pcl::IndicesConstPtr removed;
EXPECT_EQ (output->size (), input->size ());
for (size_t i = 0; i < output->size (); i++)
{
- EXPECT_TRUE (pcl_isnan (output->at (i).x));
+ EXPECT_TRUE (pcl_isnan (output->at (i).x));
EXPECT_TRUE (pcl_isnan (output->at (i).y));
EXPECT_TRUE (pcl_isnan (output->at (i).z));
}
removed = fc.getRemovedIndices ();
EXPECT_EQ (removed->size (), input->size ());
-
+
}
cyl_comp->setComparisonMatrix (planeMatrix);
cyl_comp->setComparisonVector (planeVector);
cyl_comp->setComparisonScalar (-2 * 5.0);
- cyl_comp->setComparisonOperator (ComparisonOps::LT);
+ cyl_comp->setComparisonOperator (ComparisonOps::LT);
condrem.filter (output);
/*
* Initialization of parameters
*/
-
+
// Input without NaN
pcl::PointCloud<pcl::PointXYZRGB> cloud_organized_nonan;
std::vector<int> dummy;
pcl::removeNaNFromPointCloud<pcl::PointXYZRGB> (*cloud_organized, cloud_organized_nonan, dummy);
-
+
// Viewpoint
const float vp_x = cloud_organized_nonan.sensor_origin_[0];
const float vp_y = cloud_organized_nonan.sensor_origin_[1];
const float vp_z = cloud_organized_nonan.sensor_origin_[2];
-
+
// Search parameters
const int k = 5;
std::vector<std::vector<int> > k_indices;
std::vector<std::vector<float> > k_sqr_distances;
-
+
// Estimated and refined normal containers
pcl::PointCloud<pcl::PointXYZRGBNormal> cloud_organized_normal;
pcl::PointCloud<pcl::PointXYZRGBNormal> cloud_organized_normal_refined;
-
+
/*
* Neighbor search
*/
-
+
// Search for neighbors
pcl::search::KdTree<pcl::PointXYZRGB> kdtree;
kdtree.setInputCloud (cloud_organized_nonan.makeShared ());
kdtree.nearestKSearch (cloud_organized_nonan, std::vector<int> (), k, k_indices, k_sqr_distances);
-
+
/*
* Estimate normals
*/
-
+
// Run estimation
pcl::NormalEstimation<pcl::PointXYZRGB, pcl::PointXYZRGBNormal> ne;
cloud_organized_normal.reserve (cloud_organized_nonan.size ());
// Store
cloud_organized_normal.push_back (normali);
}
-
+
/*
* Refine normals
*/
-
+
// Run refinement
pcl::NormalRefinement<pcl::PointXYZRGBNormal> nr (k_indices, k_sqr_distances);
nr.setInputCloud (cloud_organized_normal.makeShared());
nr.setMaxIterations (15);
nr.setConvergenceThreshold (0.00001f);
nr.filter (cloud_organized_normal_refined);
-
+
/*
* Find dominant plane in the scene
*/
-
+
// Calculate SAC model
pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
seg.setDistanceThreshold (0.005);
seg.setInputCloud (cloud_organized_normal.makeShared ());
seg.segment (*inliers, *coefficients);
-
+
// Read out SAC model
const std::vector<int>& idx_table = inliers->indices;
float a = coefficients->values[0];
float b = coefficients->values[1];
float c = coefficients->values[2];
const float d = coefficients->values[3];
-
+
// Find a point on the plane [0 0 z] => z = -d / c
pcl::PointXYZ p_table;
p_table.x = 0.0f;
p_table.y = 0.0f;
p_table.z = -d / c;
-
+
// Use the point to orient the SAC normal correctly
- pcl::flipNormalTowardsViewpoint (p_table, vp_x, vp_y, vp_z, a, b, c);
-
+ pcl::flipNormalTowardsViewpoint (p_table, vp_x, vp_y, vp_z, a, b, c);
+
/*
* Test: check that the refined table normals are closer to the SAC model normal
*/
-
+
// Errors for the two normal sets and their means
std::vector<float> errs_est;
float err_est_mean = 0.0f;
std::vector<float> errs_refined;
float err_refined_mean = 0.0f;
-
+
// Number of zero or NaN vectors produced by refinement
int num_zeros = 0;
int num_nans = 0;
-
+
// Loop
for (unsigned int i = 0; i < idx_table.size (); ++i)
{
float tmp;
-
+
// Estimated (need to avoid zeros and NaNs)
const pcl::PointXYZRGBNormal& calci = cloud_organized_normal[idx_table[i]];
if ((fabsf (calci.normal_x) + fabsf (calci.normal_y) + fabsf (calci.normal_z)) > 0.0f)
err_est_mean += tmp;
}
}
-
+
// Refined
const pcl::PointXYZRGBNormal& refinedi = cloud_organized_normal_refined[idx_table[i]];
if ((fabsf (refinedi.normal_x) + fabsf (refinedi.normal_y) + fabsf (refinedi.normal_z)) > 0.0f)
++num_zeros;
}
}
-
+
// Mean errors
err_est_mean /= static_cast<float> (errs_est.size ());
err_refined_mean /= static_cast<float> (errs_refined.size ());
-
+
// Error variance of estimated
float err_est_var = 0.0f;
for (unsigned int i = 0; i < errs_est.size (); ++i)
err_est_var = (errs_est[i] - err_est_mean) * (errs_est[i] - err_est_mean);
err_est_var /= static_cast<float> (errs_est.size () - 1);
-
+
// Error variance of refined
float err_refined_var = 0.0f;
for (unsigned int i = 0; i < errs_refined.size (); ++i)
err_refined_var = (errs_refined[i] - err_refined_mean) * (errs_refined[i] - err_refined_mean);
err_refined_var /= static_cast<float> (errs_refined.size () - 1);
-
+
// Refinement should not produce any zeros and NaNs
EXPECT_EQ(num_zeros, 0);
EXPECT_EQ(num_nans, 0);
-
+
// Expect mean/variance of error of refined to be smaller, i.e. closer to SAC model
EXPECT_LT(err_refined_mean, err_est_mean);
EXPECT_LT(err_refined_var, err_est_var);
covariance_sampling.setNormals (cloud_walls_normals);
covariance_sampling.setNumberOfSamples (static_cast<unsigned int> (cloud_walls_normals->size ()) / 4);
double cond_num_walls = covariance_sampling.computeConditionNumber ();
- EXPECT_NEAR (113.29773, cond_num_walls, 1e-1);
+
+ // Conditioning number should be loosely close to the expected number. Adopting 10% of the reference value
+ EXPECT_NEAR (113.29773, cond_num_walls, 10.);
IndicesPtr walls_indices (new std::vector<int> ());
covariance_sampling.filter (*walls_indices);
-
covariance_sampling.setIndices (walls_indices);
double cond_num_walls_sampled = covariance_sampling.computeConditionNumber ();
- EXPECT_NEAR (22.11506, cond_num_walls_sampled, 1e-1);
- EXPECT_EQ (686, (*walls_indices)[0]);
- EXPECT_EQ (1900, (*walls_indices)[walls_indices->size () / 4]);
- EXPECT_EQ (1278, (*walls_indices)[walls_indices->size () / 2]);
- EXPECT_EQ (2960, (*walls_indices)[walls_indices->size () * 3 / 4]);
- EXPECT_EQ (2060, (*walls_indices)[walls_indices->size () - 1]);
+ // Conditioning number should be loosely close to the expected number. Adopting 10% of the reference value
+ EXPECT_NEAR (22.11506, cond_num_walls_sampled, 2.);
+
+ // Ensure it respects the requested sampling size
+ EXPECT_EQ (static_cast<unsigned int> (cloud_walls_normals->size ()) / 4, walls_indices->size ());
covariance_sampling.setInputCloud (cloud_turtle_normals);
covariance_sampling.setNormals (cloud_turtle_normals);
covariance_sampling.setIndices (IndicesPtr ());
covariance_sampling.setNumberOfSamples (static_cast<unsigned int> (cloud_turtle_normals->size ()) / 8);
double cond_num_turtle = covariance_sampling.computeConditionNumber ();
- EXPECT_NEAR (cond_num_turtle, 20661.7663, 0.5);
+
+ // Conditioning number should be loosely close to the expected number. Adopting 10% of the reference value
+ EXPECT_NEAR (cond_num_turtle, 20661.7663, 2e4);
IndicesPtr turtle_indices (new std::vector<int> ());
covariance_sampling.filter (*turtle_indices);
covariance_sampling.setIndices (turtle_indices);
double cond_num_turtle_sampled = covariance_sampling.computeConditionNumber ();
- EXPECT_NEAR (cond_num_turtle_sampled, 5795.5057, 0.5);
- EXPECT_EQ ((*turtle_indices)[0], 80344);
- EXPECT_EQ ((*turtle_indices)[turtle_indices->size () / 4], 145982);
- EXPECT_EQ ((*turtle_indices)[turtle_indices->size () / 2], 104557);
- EXPECT_EQ ((*turtle_indices)[turtle_indices->size () * 3 / 4], 41512);
- EXPECT_EQ ((*turtle_indices)[turtle_indices->size () - 1], 136885);
+ // Conditioning number should be loosely close to the expected number. Adopting 10% of the reference value
+ EXPECT_NEAR (cond_num_turtle_sampled, 5795.5057, 5e3);
+
+ // Ensure it respects the requested sampling size
+ EXPECT_EQ (static_cast<unsigned int> (cloud_turtle_normals->size ()) / 8, turtle_indices->size ());
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
FILES test_grabbers.cpp
LINK_WITH pcl_gtest pcl_io
ARGUMENTS "${PCL_SOURCE_DIR}/test/grabber_sequences")
+
+ PCL_ADD_TEST(io_ply_io test_ply_io
+ FILES test_ply_io.cpp
+ LINK_WITH pcl_gtest pcl_io)
+
# Uses VTK readers to verify
if (VTK_FOUND AND NOT ANDROID)
include_directories(${VTK_INCLUDE_DIRS})
grabber.registerCallback (fxn);
grabber.start ();
// 1 second should be /plenty/ of time
- boost::this_thread::sleep (boost::posix_time::microseconds (1E6));
+ boost::this_thread::sleep (boost::posix_time::seconds (1));
grabber.stop ();
//// Make sure they match
boost::this_thread::sleep (boost::posix_time::microseconds (10000));
if (++niter > 100)
{
- ASSERT_TRUE (false);
+ #ifdef PCL_BUILT_WITH_VTK
+ FAIL ();
+ #endif
return;
}
}
boost::this_thread::sleep (boost::posix_time::microseconds (10000));
if (++niter > 100)
{
- ASSERT_TRUE (false);
+ #ifdef PCL_BUILT_WITH_VTK
+ FAIL ();
+ #endif
return;
}
}
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/io/ascii_io.h>
+#include <pcl/io/obj_io.h>
#include <fstream>
#include <locale>
#include <stdexcept>
EXPECT_EQ (blob.fields[1].name, "_");
EXPECT_EQ (blob.fields[1].offset, 4 * 33);
EXPECT_EQ (blob.fields[1].count, 10);
- EXPECT_EQ (blob.fields[1].datatype, pcl::PCLPointField::FLOAT32);
+ EXPECT_EQ (blob.fields[1].datatype, (uint8_t) -1);
EXPECT_EQ (blob.fields[2].name, "x");
EXPECT_EQ (blob.fields[2].offset, 4 * 33 + 10 * 1);
EXPECT_EQ (val[30], 0);
EXPECT_EQ (val[31], 0);
EXPECT_EQ (val[32], 0);
+
+ remove ("complex_ascii.pcd");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
EXPECT_FLOAT_EQ (float (b8), -250.05f);
memcpy (&b8, &blob.data[blob.fields[7].offset + sizeof (double)], sizeof (double));
EXPECT_FLOAT_EQ (float (b8), -251.05f);
+
+ remove ("all_types.pcd");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
EXPECT_FLOAT_EQ (cloud.points[0].y, first.y); // test for fromPCLPointCloud2 ()
EXPECT_FLOAT_EQ (cloud.points[0].z, first.z); // test for fromPCLPointCloud2 ()
EXPECT_FLOAT_EQ (cloud.points[0].intensity, first.intensity); // test for fromPCLPointCloud2 ()
+
+ remove ("test_pcl_io_ascii.pcd");
+ remove ("test_pcl_io_binary.pcd");
+ remove ("test_pcl_io.pcd");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].y, last.y); // test for fromPCLPointCloud2 ()
EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].z, last.z); // test for fromPCLPointCloud2 ()
EXPECT_FLOAT_EQ (cloud.points[nr_p - 1].intensity, last.intensity); // test for fromPCLPointCloud2 ()
+
+ remove ("test_pcl_io.pcd");
}
TEST (PCL, PCDReaderWriterASCIIColorPrecision)
EXPECT_EQ (cloud[i].g, cloud_ascii[i].g);
EXPECT_EQ (cloud[i].b, cloud_ascii[i].b);
}
+
+ remove ("temp_binary_color.pcd");
+ remove ("temp_ascii_color.pcd");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, ASCIIReader)
+TEST (PCL, ASCIIRead)
{
PointCloud<PointXYZI> cloud, rcloud;
EXPECT_FLOAT_EQ(cloud.points[i].intensity, rcloud.points[i].intensity);
}
+ remove ("test_pcd.txt");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST (PCL, PLYReaderWriter)
+TEST(PCL, OBJRead)
{
- pcl::PCLPointCloud2 cloud_blob, cloud_blob2;
- PointCloud<PointXYZI> cloud, cloud2;
-
- cloud.width = 640;
- cloud.height = 480;
- cloud.resize (cloud.width * cloud.height);
- cloud.is_dense = true;
-
- srand (static_cast<unsigned int> (time (NULL)));
- size_t nr_p = cloud.size ();
- // Randomly create a new point cloud
- for (size_t i = 0; i < nr_p; ++i)
- {
- cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
- cloud[i].intensity = static_cast<float> (i);
- }
-
- // Convert from data type to blob
- toPCLPointCloud2 (cloud, cloud_blob);
-
- EXPECT_EQ (cloud_blob.width, cloud.width); // test for toPCLPointCloud2 ()
- EXPECT_EQ (cloud_blob.height, cloud.height); // test for toPCLPointCloud2 ()
- EXPECT_EQ (cloud_blob.is_dense, cloud.is_dense); // test for toPCLPointCloud2 ()
- EXPECT_EQ (cloud_blob.data.size (),
- cloud_blob.width * cloud_blob.height * sizeof (PointXYZI));
+ std::ofstream fs;
+ fs.open ("test_obj.obj");
+ fs << "# Blender v2.79 (sub 4) OBJ File: ''\n"
+ "mtllib test_obj.mtl\n"
+ "o Cube_Cube.001\n"
+ "v -1.000000 -1.000000 1.000000\n"
+ "v -1.000000 1.000000 1.000000\n"
+ "v -1.000000 -1.000000 -1.000000\n"
+ "v -1.000000 1.000000 -1.000000\n"
+ "v 1.000000 -1.000000 1.000000\n"
+ "v 1.000000 1.000000 1.000000\n"
+ "v 1.000000 -1.000000 -1.000000\n"
+ "v 1.000000 1.000000 -1.000000\n"
+ "vn -1.0000 0.0000 0.0000\n"
+ "vn 0.0000 0.0000 -1.0000\n"
+ "vn 1.0000 0.0000 0.0000\n"
+ "vn 0.0000 0.0000 1.0000\n"
+ "vn 0.0000 -1.0000 0.0000\n"
+ "vn 0.0000 1.0000 0.0000\n"
+ "# Redundant vertex normal to test error handling\n"
+ "vn 0.0000 0.0000 0.0000\n"
+ "usemtl None\n"
+ "s off\n"
+ "f 1//1 2//1 4//1 3//1\n"
+ "f 3//2 4//2 8//2 7//2\n"
+ "f 7//3 8//3 6//3 5//3\n"
+ "f 5//4 6//4 2//4 1//4\n"
+ "f 3//5 7//5 5//5 1//5\n"
+ "f 8//6 4//6 2//6 6//6\n";
- // test for toPCLPointCloud2 ()
- PLYWriter writer;
- writer.write ("test_pcl_io.ply", cloud_blob, Eigen::Vector4f::Zero (), Eigen::Quaternionf::Identity (), true, true);
+ fs.close ();
+ fs.open ("test_obj.mtl");
+ fs << "# Blender MTL File: 'None'\n"
+ "# Material Count: 1\n"
+ "newmtl None\n"
+ "Ns 0\n"
+ "Ka 0.000000 0.000000 0.000000\n"
+ "Kd 0.8 0.8 0.8\n"
+ "Ks 0.8 0.8 0.8\n"
+ "d 1\n"
+ "illum 2\n";
- PLYReader reader;
- reader.read ("test_pcl_io.ply", cloud_blob2);
- //PLY DOES preserve organiziation
- EXPECT_EQ (cloud_blob.width * cloud_blob.height, cloud_blob2.width * cloud_blob2.height);
- EXPECT_EQ (cloud_blob.is_dense, cloud.is_dense);
- EXPECT_EQ (size_t (cloud_blob2.data.size ()), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
- cloud_blob2.width * cloud_blob2.height * sizeof (PointXYZ)); // test for loadPLYFile ()
+ fs.close ();
- // Convert from blob to data type
- fromPCLPointCloud2 (cloud_blob2, cloud2);
+ pcl::PCLPointCloud2 blob;
+ pcl::OBJReader objreader = pcl::OBJReader();
+ int res = objreader.read ("test_obj.obj", blob);
+ EXPECT_NE (int (res), -1);
+ EXPECT_EQ (blob.width, 8);
+ EXPECT_EQ (blob.height, 1);
+ EXPECT_EQ (blob.is_dense, true);
+ EXPECT_EQ (blob.data.size (), 8 * 6 * 4); // 8 verts, 6 values per vertex (vx,vy,vz,vnx,vny,vnz), 4 byte per value
- // EXPECT_EQ (cloud.width, cloud2.width); // test for fromPCLPointCloud2 ()
- // EXPECT_EQ (cloud.height, cloud2.height); // test for fromPCLPointCloud2 ()
- // EXPECT_EQ (cloud.is_dense, cloud2.is_dense); // test for fromPCLPointCloud2 ()
- EXPECT_EQ (cloud.size (), cloud2.size ()); // test for fromPCLPointCloud2 ()
+ // Check fields
+ EXPECT_EQ (blob.fields[0].name, "x");
+ EXPECT_EQ (blob.fields[0].offset, 0);
+ EXPECT_EQ (blob.fields[0].count, 1);
+ EXPECT_EQ (blob.fields[0].datatype, pcl::PCLPointField::FLOAT32);
- for (uint32_t counter = 0; counter < cloud.size (); ++counter)
- {
- EXPECT_FLOAT_EQ (cloud[counter].x, cloud2[counter].x); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud[counter].y, cloud2[counter].y); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud[counter].z, cloud2[counter].z); // test for fromPCLPointCloud2 ()
- EXPECT_FLOAT_EQ (cloud[counter].intensity, cloud2[counter].intensity); // test for fromPCLPointCloud2 ()
- }
-}
+ EXPECT_EQ (blob.fields[1].name, "y");
+ EXPECT_EQ (blob.fields[1].offset, 4 * 1);
+ EXPECT_EQ (blob.fields[1].count, 1);
+ EXPECT_EQ (blob.fields[1].datatype, pcl::PCLPointField::FLOAT32);
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-class PLYTest : public ::testing::Test
-{
- protected:
+ EXPECT_EQ (blob.fields[2].name, "z");
+ EXPECT_EQ (blob.fields[2].offset, 4 * 2);
+ EXPECT_EQ (blob.fields[2].count, 1);
+ EXPECT_EQ (blob.fields[2].datatype, pcl::PCLPointField::FLOAT32);
- PLYTest () : mesh_file_ply_("ply_color_mesh.ply")
- {
- std::ofstream fs;
- fs.open (mesh_file_ply_.c_str ());
- fs << "ply\n"
- "format ascii 1.0\n"
- "element vertex 4\n"
- "property float x\n"
- "property float y\n"
- "property float z\n"
- "property uchar red\n"
- "property uchar green\n"
- "property uchar blue\n"
- "property uchar alpha\n"
- "element face 2\n"
- "property list uchar int vertex_indices\n"
- "end_header\n"
- "4.23607 0 1.61803 255 0 0 255\n"
- "2.61803 2.61803 2.61803 0 255 0 0\n"
- "0 1.61803 4.23607 0 0 255 128\n"
- "0 -1.61803 4.23607 255 255 255 128\n"
- "3 0 1 2\n"
- "3 1 2 3\n";
- fs.close ();
-
- // Test colors from ply_benchmark.ply
- rgba_1_ = static_cast<uint32_t> (255) << 24 | static_cast<uint32_t> (255) << 16 |
- static_cast<uint32_t> (0) << 8 | static_cast<uint32_t> (0);
- rgba_2_ = static_cast<uint32_t> (0) << 24 | static_cast<uint32_t> (0) << 16 |
- static_cast<uint32_t> (255) << 8 | static_cast<uint32_t> (0);
- rgba_3_ = static_cast<uint32_t> (128) << 24 | static_cast<uint32_t> (0) << 16 |
- static_cast<uint32_t> (0) << 8 | static_cast<uint32_t> (255);
- rgba_4_ = static_cast<uint32_t> (128) << 24 | static_cast<uint32_t> (255) << 16 |
- static_cast<uint32_t> (255) << 8 | static_cast<uint32_t> (255);
- }
+ EXPECT_EQ (blob.fields[3].name, "normal_x");
+ EXPECT_EQ (blob.fields[3].offset, 4 * 3);
+ EXPECT_EQ (blob.fields[3].count, 1);
+ EXPECT_EQ (blob.fields[3].datatype, pcl::PCLPointField::FLOAT32);
- virtual
- ~PLYTest () { remove (mesh_file_ply_.c_str ()); }
+ EXPECT_EQ (blob.fields[4].name, "normal_y");
+ EXPECT_EQ (blob.fields[4].offset, 4 * 4);
+ EXPECT_EQ (blob.fields[4].count, 1);
+ EXPECT_EQ (blob.fields[4].datatype, pcl::PCLPointField::FLOAT32);
- std::string mesh_file_ply_;
- uint32_t rgba_1_;
- uint32_t rgba_2_;
- uint32_t rgba_3_;
- uint32_t rgba_4_;
-};
+ EXPECT_EQ (blob.fields[5].name, "normal_z");
+ EXPECT_EQ (blob.fields[5].offset, 4 * 5);
+ EXPECT_EQ (blob.fields[5].count, 1);
+ EXPECT_EQ (blob.fields[5].datatype, pcl::PCLPointField::FLOAT32);
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST_F (PLYTest, LoadPLYFileColoredASCIIIntoBlob)
-{
- int res;
- uint32_t rgba;
-
- PCLPointCloud2 cloud_blob;
- uint32_t ps;
- int32_t offset = -1;
-
- // check if loading is ok
- res = loadPLYFile (mesh_file_ply_, cloud_blob);
- ASSERT_EQ (res, 0);
-
- // blob has proper structure
- EXPECT_EQ (cloud_blob.height, 1);
- EXPECT_EQ (cloud_blob.width, 4);
- EXPECT_EQ (cloud_blob.fields.size(), 4);
- EXPECT_FALSE (cloud_blob.is_bigendian);
- EXPECT_EQ (cloud_blob.point_step, 16);
- EXPECT_EQ (cloud_blob.row_step, 16 * 4);
- EXPECT_EQ (cloud_blob.data.size(), 16 * 4);
- // EXPECT_TRUE (cloud_blob.is_dense); // this is failing and it shouldnt?
-
- // scope blob data
- ps = cloud_blob.point_step;
- for (size_t i = 0; i < cloud_blob.fields.size (); ++i)
- if (cloud_blob.fields[i].name == std::string("rgba"))
- offset = static_cast<int32_t> (cloud_blob.fields[i].offset);
-
- ASSERT_GE (offset, 0);
-
- // 1st point
- rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[offset]);
- ASSERT_EQ (rgba, rgba_1_);
-
- // 2th point
- rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[ps + offset]);
- ASSERT_EQ (rgba, rgba_2_);
-
- // 3th point
- rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[2 * ps + offset]);
- ASSERT_EQ (rgba, rgba_3_);
-
- // 4th point
- rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[3 * ps + offset]);
- ASSERT_EQ (rgba, rgba_4_);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-TEST_F (PLYTest, LoadPLYFileColoredASCIIIntoPolygonMesh)
-{
- int res;
- uint32_t rgba;
- PolygonMesh mesh;
- uint32_t ps;
- int32_t offset = -1;
-
- // check if loading is ok
- res = loadPLYFile (mesh_file_ply_, mesh);
- ASSERT_EQ (res, 0);
-
- // blob has proper structure
- EXPECT_EQ (mesh.cloud.height, 1);
- EXPECT_EQ (mesh.cloud.width, 4);
- EXPECT_EQ (mesh.cloud.fields.size(), 4);
- EXPECT_FALSE (mesh.cloud.is_bigendian);
- EXPECT_EQ (mesh.cloud.point_step, 16);
- EXPECT_EQ (mesh.cloud.row_step, 16 * 4);
- EXPECT_EQ (mesh.cloud.data.size(), 16 * 4);
- // EXPECT_TRUE (mesh.cloud.is_dense); // this is failing and it shouldnt?
-
- // scope blob data
- ps = mesh.cloud.point_step;
- for (size_t i = 0; i < mesh.cloud.fields.size (); ++i)
- if (mesh.cloud.fields[i].name == std::string("rgba"))
- offset = static_cast<int32_t> (mesh.cloud.fields[i].offset);
-
- ASSERT_GE (offset, 0);
-
- // 1st point
- rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[offset]);
- ASSERT_EQ (rgba, rgba_1_);
-
- // 2th point
- rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[ps + offset]);
- ASSERT_EQ (rgba, rgba_2_);
-
- // 3th point
- rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[2 * ps + offset]);
- ASSERT_EQ (rgba, rgba_3_);
-
- // 4th point
- rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[3 * ps + offset]);
- ASSERT_EQ (rgba, rgba_4_);
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-template <typename T> class PLYPointCloudTest : public PLYTest { };
-typedef ::testing::Types<BOOST_PP_SEQ_ENUM (PCL_RGB_POINT_TYPES)> RGBPointTypes;
-TYPED_TEST_CASE (PLYPointCloudTest, RGBPointTypes);
-TYPED_TEST (PLYPointCloudTest, LoadPLYFileColoredASCIIIntoPointCloud)
-{
- int res;
- PointCloud<TypeParam> cloud_rgb;
-
- // check if loading is ok
- res = loadPLYFile (PLYTest::mesh_file_ply_, cloud_rgb);
- ASSERT_EQ (res, 0);
-
- // cloud has proper structure
- EXPECT_EQ (cloud_rgb.height, 1);
- EXPECT_EQ (cloud_rgb.width, 4);
- EXPECT_EQ (cloud_rgb.points.size(), 4);
- // EXPECT_TRUE (cloud_rgb.is_dense); // this is failing and it shouldnt?
-
- // scope cloud data
- ASSERT_EQ (cloud_rgb[0].rgba, PLYTest::rgba_1_);
- ASSERT_EQ (cloud_rgb[1].rgba, PLYTest::rgba_2_);
- ASSERT_EQ (cloud_rgb[2].rgba, PLYTest::rgba_3_);
- ASSERT_EQ (cloud_rgb[3].rgba, PLYTest::rgba_4_);
+ remove ("test_obj.obj");
+ remove ("test_obj.mtl");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ASSERT_EQ (cloud.points[0].histogram[i], i);
ASSERT_EQ (cloud.points[1].histogram[i], 33-i);
}
+
+ remove ("v.pcd");
}
EXPECT_EQ (res, 0);
PCDReader reader;
- reader.read<PointXYZ> ("test_pcl_io_compressed.pcd", cloud2);
+ res = reader.read<PointXYZ> ("test_pcl_io_compressed.pcd", cloud2);
+ EXPECT_EQ (res, 0);
EXPECT_EQ (cloud2.width, cloud.width);
EXPECT_EQ (cloud2.height, cloud.height);
EXPECT_EQ (res, 0);
PCDReader reader;
- reader.read<PointXYZRGBNormal> ("test_pcl_io_compressed.pcd", cloud2);
+ res = reader.read<PointXYZRGBNormal> ("test_pcl_io_compressed.pcd", cloud2);
+ EXPECT_EQ (res, 0);
+
+ EXPECT_EQ (cloud2.width, blob.width);
+ EXPECT_EQ (cloud2.height, blob.height);
+ EXPECT_EQ (cloud2.is_dense, cloud.is_dense);
+ EXPECT_EQ (cloud2.points.size (), cloud.points.size ());
+
+ for (size_t i = 0; i < cloud2.points.size (); ++i)
+ {
+ EXPECT_EQ (cloud2.points[i].x, cloud.points[i].x);
+ EXPECT_EQ (cloud2.points[i].y, cloud.points[i].y);
+ EXPECT_EQ (cloud2.points[i].z, cloud.points[i].z);
+ EXPECT_EQ (cloud2.points[i].normal_x, cloud.points[i].normal_x);
+ EXPECT_EQ (cloud2.points[i].normal_y, cloud.points[i].normal_y);
+ EXPECT_EQ (cloud2.points[i].normal_z, cloud.points[i].normal_z);
+ EXPECT_EQ (cloud2.points[i].rgb, cloud.points[i].rgb);
+ }
+
+ remove ("test_pcl_io_compressed.pcd");
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, LZFInMem)
+{
+ PointCloud<PointXYZRGBNormal> cloud;
+ cloud.width = 640;
+ cloud.height = 480;
+ cloud.points.resize (cloud.width * cloud.height);
+ cloud.is_dense = true;
+
+ srand (static_cast<unsigned int> (time (NULL)));
+ size_t nr_p = cloud.points.size ();
+ // Randomly create a new point cloud
+ for (size_t i = 0; i < nr_p; ++i)
+ {
+ cloud.points[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud.points[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud.points[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud.points[i].normal_x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud.points[i].normal_y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud.points[i].normal_z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud.points[i].rgb = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ }
+
+ pcl::PCLPointCloud2 blob;
+ pcl::toPCLPointCloud2 (cloud, blob);
+ std::ostringstream oss;
+ PCDWriter writer;
+ int res = writer.writeBinaryCompressed (oss, blob);
+ EXPECT_EQ (res, 0);
+ std::string pcd_str = oss.str ();
+
+ Eigen::Vector4f origin;
+ Eigen::Quaternionf orientation;
+ int pcd_version = -1;
+ int data_type = -1;
+ unsigned int data_idx = 0;
+ std::istringstream iss (pcd_str, std::ios::binary);
+ PCDReader reader;
+ pcl::PCLPointCloud2 blob2;
+ res = reader.readHeader (iss, blob2, origin, orientation, pcd_version, data_type, data_idx);
+ EXPECT_EQ (res, 0);
+ EXPECT_EQ (blob2.width, blob.width);
+ EXPECT_EQ (blob2.height, blob.height);
+ EXPECT_EQ (data_type, 2); // since it was written by writeBinaryCompressed(), it should be compressed.
+
+ const unsigned char *data = reinterpret_cast<const unsigned char *> (pcd_str.data ());
+ res = reader.readBodyBinary (data, blob2, pcd_version, data_type == 2, data_idx);
+ PointCloud<PointXYZRGBNormal> cloud2;
+ pcl::fromPCLPointCloud2 (blob2, cloud2);
+ EXPECT_EQ (res, 0);
EXPECT_EQ (cloud2.width, blob.width);
EXPECT_EQ (cloud2.height, blob.height);
EXPECT_EQ (cloud2.is_dense, cloud.is_dense);
catch (const std::exception&)
{
}
+
+ remove ("test_pcl_io_ascii.pcd");
#endif
}
EXPECT_EQ(rv, 0) << " loadPCDFile " << pcd_file;
EXPECT_GT((int) input_cloud_ptr->width , 0) << "invalid point cloud width from " << pcd_file;
- EXPECT_GT((int) input_cloud_ptr->height, 0) << "invalid point cloud heigth from " << pcd_file;
+ EXPECT_GT((int) input_cloud_ptr->height, 0) << "invalid point cloud height from " << pcd_file;
// iterate over compression profiles
for (int compression_profile = pcl::io::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2017-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <pcl/io/ply_io.h>
+#include <pcl/conversions.h>
+#include <pcl/PolygonMesh.h>
+#include <pcl/PCLPointCloud2.h>
+#include <pcl/point_cloud.h>
+#include <pcl/point_types.h>
+#include <gtest/gtest.h>
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, PLYReaderWriter)
+{
+ using pcl::PointXYZI;
+ using pcl::PointXYZ;
+
+ pcl::PCLPointCloud2 cloud_blob, cloud_blob2;
+ pcl::PointCloud<PointXYZI> cloud, cloud2;
+
+ cloud.width = 640;
+ cloud.height = 480;
+ cloud.resize (cloud.width * cloud.height);
+ cloud.is_dense = true;
+
+ srand (static_cast<unsigned int> (time (NULL)));
+ size_t nr_p = cloud.size ();
+ // Randomly create a new point cloud
+ for (size_t i = 0; i < nr_p; ++i)
+ {
+ cloud[i].x = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].y = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].z = static_cast<float> (1024 * rand () / (RAND_MAX + 1.0));
+ cloud[i].intensity = static_cast<float> (i);
+ }
+
+ // Convert from data type to blob
+ toPCLPointCloud2 (cloud, cloud_blob);
+
+ EXPECT_EQ (cloud_blob.width, cloud.width); // test for toPCLPointCloud2 ()
+ EXPECT_EQ (cloud_blob.height, cloud.height); // test for toPCLPointCloud2 ()
+ EXPECT_EQ (cloud_blob.is_dense, cloud.is_dense); // test for toPCLPointCloud2 ()
+ EXPECT_EQ (cloud_blob.data.size (),
+ cloud_blob.width * cloud_blob.height * sizeof (PointXYZI));
+
+ // test for toPCLPointCloud2 ()
+ pcl::PLYWriter writer;
+ writer.write ("test_pcl_io.ply", cloud_blob, Eigen::Vector4f::Zero (), Eigen::Quaternionf::Identity (), true, true);
+
+ pcl::PLYReader reader;
+ reader.read ("test_pcl_io.ply", cloud_blob2);
+ //PLY DOES preserve organiziation
+ EXPECT_EQ (cloud_blob.width * cloud_blob.height, cloud_blob2.width * cloud_blob2.height);
+ EXPECT_EQ (cloud_blob.is_dense, cloud.is_dense);
+ EXPECT_EQ (size_t (cloud_blob2.data.size ()), // PointXYZI is 16*2 (XYZ+1, Intensity+3)
+ cloud_blob2.width * cloud_blob2.height * sizeof (PointXYZ)); // test for loadPLYFile ()
+
+ // Convert from blob to data type
+ fromPCLPointCloud2 (cloud_blob2, cloud2);
+
+ // EXPECT_EQ (cloud.width, cloud2.width); // test for fromPCLPointCloud2 ()
+ // EXPECT_EQ (cloud.height, cloud2.height); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.is_dense, cloud2.is_dense); // test for fromPCLPointCloud2 ()
+ EXPECT_EQ (cloud.size (), cloud2.size ()); // test for fromPCLPointCloud2 ()
+
+ for (uint32_t counter = 0; counter < cloud.size (); ++counter)
+ {
+ EXPECT_FLOAT_EQ (cloud[counter].x, cloud2[counter].x); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[counter].y, cloud2[counter].y); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[counter].z, cloud2[counter].z); // test for fromPCLPointCloud2 ()
+ EXPECT_FLOAT_EQ (cloud[counter].intensity, cloud2[counter].intensity); // test for fromPCLPointCloud2 ()
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+struct PLYTest : public ::testing::Test
+{
+ PLYTest () : mesh_file_ply_("ply_file.ply")
+ {}
+
+ virtual
+ ~PLYTest () { remove (mesh_file_ply_.c_str ()); }
+
+ std::string mesh_file_ply_;
+};
+
+struct PLYColorTest : public PLYTest
+{
+ void SetUp ()
+ {
+ // Test colors from ply_benchmark.ply
+ clr_1_.r = 255;
+ clr_1_.g = 0;
+ clr_1_.b = 0;
+ clr_1_.a = 255;
+
+ clr_2_.r = 0;
+ clr_2_.g = 255;
+ clr_2_.b = 0;
+ clr_2_.a = 0;
+
+ clr_3_.r = 0;
+ clr_3_.g = 0;
+ clr_3_.b = 255;
+ clr_3_.a = 128;
+
+ clr_4_.r = 255;
+ clr_4_.g = 255;
+ clr_4_.b = 255;
+ clr_4_.a = 128;
+
+ std::ofstream fs;
+ fs.open (mesh_file_ply_.c_str ());
+ fs << "ply\n"
+ "format ascii 1.0\n"
+ "element vertex 4\n"
+ "property float x\n"
+ "property float y\n"
+ "property float z\n"
+ "property uchar red\n"
+ "property uchar green\n"
+ "property uchar blue\n"
+ "property uchar alpha\n"
+ "element face 2\n"
+ "property list uchar int vertex_indices\n"
+ "end_header\n"
+ "4.23607 0 1.61803 "
+ << unsigned (clr_1_.r) << ' '
+ << unsigned (clr_1_.g) << ' '
+ << unsigned (clr_1_.b) << ' '
+ << unsigned (clr_1_.a) << "\n"
+ "2.61803 2.61803 2.61803 "
+ << unsigned (clr_2_.r) << ' '
+ << unsigned (clr_2_.g) << ' '
+ << unsigned (clr_2_.b) << ' '
+ << unsigned (clr_2_.a) << "\n"
+ "0 1.61803 4.23607 "
+ << unsigned (clr_3_.r) << ' '
+ << unsigned (clr_3_.g) << ' '
+ << unsigned (clr_3_.b) << ' '
+ << unsigned (clr_3_.a) << "\n"
+ "0 -1.61803 4.23607 "
+ << unsigned (clr_4_.r) << ' '
+ << unsigned (clr_4_.g) << ' '
+ << unsigned (clr_4_.b) << ' '
+ << unsigned (clr_4_.a) << "\n"
+ "3 0 1 2\n"
+ "3 1 2 3\n";
+ fs.close ();
+ }
+
+ pcl::RGB clr_1_;
+ pcl::RGB clr_2_;
+ pcl::RGB clr_3_;
+ pcl::RGB clr_4_;
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST_F (PLYColorTest, LoadPLYFileColoredASCIIIntoBlob)
+{
+ int res;
+ uint32_t rgba;
+
+ pcl::PCLPointCloud2 cloud_blob;
+ uint32_t ps;
+ int32_t offset = -1;
+
+ // check if loading is ok
+ res = pcl::io::loadPLYFile (mesh_file_ply_, cloud_blob);
+ ASSERT_EQ (res, 0);
+
+ // blob has proper structure
+ EXPECT_EQ (cloud_blob.height, 1);
+ EXPECT_EQ (cloud_blob.width, 4);
+ EXPECT_EQ (cloud_blob.fields.size(), 4);
+ EXPECT_FALSE (cloud_blob.is_bigendian);
+ EXPECT_EQ (cloud_blob.point_step, 16);
+ EXPECT_EQ (cloud_blob.row_step, 16 * 4);
+ EXPECT_EQ (cloud_blob.data.size(), 16 * 4);
+ EXPECT_TRUE (cloud_blob.is_dense);
+
+ // scope blob data
+ ps = cloud_blob.point_step;
+ for (size_t i = 0; i < cloud_blob.fields.size (); ++i)
+ if (cloud_blob.fields[i].name == std::string("rgba"))
+ offset = static_cast<int32_t> (cloud_blob.fields[i].offset);
+
+ ASSERT_GE (offset, 0);
+
+ // 1st point
+ rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[offset]);
+ ASSERT_EQ (rgba, clr_1_.rgba);
+
+ // 2th point
+ rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[ps + offset]);
+ ASSERT_EQ (rgba, clr_2_.rgba);
+
+ // 3th point
+ rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[2 * ps + offset]);
+ ASSERT_EQ (rgba, clr_3_.rgba);
+
+ // 4th point
+ rgba = *reinterpret_cast<uint32_t *> (&cloud_blob.data[3 * ps + offset]);
+ ASSERT_EQ (rgba, clr_4_.rgba);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST_F (PLYColorTest, LoadPLYFileColoredASCIIIntoPolygonMesh)
+{
+ int res;
+ uint32_t rgba;
+ pcl::PolygonMesh mesh;
+ uint32_t ps;
+ int32_t offset = -1;
+
+ // check if loading is ok
+ res = pcl::io::loadPLYFile (mesh_file_ply_, mesh);
+ ASSERT_EQ (res, 0);
+
+ // blob has proper structure
+ EXPECT_EQ (mesh.cloud.height, 1);
+ EXPECT_EQ (mesh.cloud.width, 4);
+ EXPECT_EQ (mesh.cloud.fields.size(), 4);
+ EXPECT_FALSE (mesh.cloud.is_bigendian);
+ EXPECT_EQ (mesh.cloud.point_step, 16);
+ EXPECT_EQ (mesh.cloud.row_step, 16 * 4);
+ EXPECT_EQ (mesh.cloud.data.size(), 16 * 4);
+ EXPECT_TRUE (mesh.cloud.is_dense);
+
+ // scope blob data
+ ps = mesh.cloud.point_step;
+ for (size_t i = 0; i < mesh.cloud.fields.size (); ++i)
+ if (mesh.cloud.fields[i].name == std::string("rgba"))
+ offset = static_cast<int32_t> (mesh.cloud.fields[i].offset);
+
+ ASSERT_GE (offset, 0);
+
+ // 1st point
+ rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[offset]);
+ ASSERT_EQ (rgba, clr_1_.rgba);
+
+ // 2th point
+ rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[ps + offset]);
+ ASSERT_EQ (rgba, clr_2_.rgba);
+
+ // 3th point
+ rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[2 * ps + offset]);
+ ASSERT_EQ (rgba, clr_3_.rgba);
+
+ // 4th point
+ rgba = *reinterpret_cast<uint32_t *> (&mesh.cloud.data[3 * ps + offset]);
+ ASSERT_EQ (rgba, clr_4_.rgba);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template <typename T> class PLYPointCloudTest : public PLYColorTest { };
+typedef ::testing::Types<BOOST_PP_SEQ_ENUM (PCL_RGB_POINT_TYPES)> RGBPointTypes;
+TYPED_TEST_CASE (PLYPointCloudTest, RGBPointTypes);
+TYPED_TEST (PLYPointCloudTest, LoadPLYFileColoredASCIIIntoPointCloud)
+{
+ int res;
+ pcl::PointCloud<TypeParam> cloud_rgb;
+
+ // check if loading is ok
+ res = pcl::io::loadPLYFile (PLYTest::mesh_file_ply_, cloud_rgb);
+ ASSERT_EQ (res, 0);
+
+ // cloud has proper structure
+ EXPECT_EQ (cloud_rgb.height, 1);
+ EXPECT_EQ (cloud_rgb.width, 4);
+ EXPECT_EQ (cloud_rgb.points.size(), 4);
+ EXPECT_TRUE (cloud_rgb.is_dense);
+
+ // scope cloud data
+ ASSERT_EQ (cloud_rgb[0].rgba, PLYColorTest::clr_1_.rgba);
+ ASSERT_EQ (cloud_rgb[1].rgba, PLYColorTest::clr_2_.rgba);
+ ASSERT_EQ (cloud_rgb[2].rgba, PLYColorTest::clr_3_.rgba);
+ ASSERT_EQ (cloud_rgb[3].rgba, PLYColorTest::clr_4_.rgba);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template<typename T>
+struct PLYCoordinatesIsDenseTest : public PLYTest {};
+
+typedef ::testing::Types<BOOST_PP_SEQ_ENUM (PCL_XYZ_POINT_TYPES)> XYZPointTypes;
+TYPED_TEST_CASE (PLYCoordinatesIsDenseTest, XYZPointTypes);
+
+TYPED_TEST (PLYCoordinatesIsDenseTest, NaNInCoordinates)
+{
+ // create file
+ std::ofstream fs;
+ fs.open (PLYTest::mesh_file_ply_.c_str ());
+ fs << "ply\n"
+ "format ascii 1.0\n"
+ "element vertex 4\n"
+ "property float x\n"
+ "property float y\n"
+ "property float z\n"
+ "end_header\n"
+ "4.23607 NaN 1.61803 \n"
+ "2.61803 2.61803 2.61803 \n"
+ "0 1.61803 4.23607 \n"
+ "0 -1.61803 4.23607 \n";
+ fs.close ();
+
+ // Set up cloud
+ pcl::PointCloud<TypeParam> cloud;
+
+ // check if loading is ok
+ const int res = pcl::io::loadPLYFile (PLYTest::mesh_file_ply_, cloud);
+ ASSERT_EQ (res, 0);
+
+ // cloud has proper structure
+ EXPECT_FALSE (cloud.is_dense);
+}
+
+TYPED_TEST (PLYCoordinatesIsDenseTest, nanInCoordinates)
+{
+ // create file
+ std::ofstream fs;
+ fs.open (PLYTest::mesh_file_ply_.c_str ());
+ fs << "ply\n"
+ "format ascii 1.0\n"
+ "element vertex 4\n"
+ "property float x\n"
+ "property float y\n"
+ "property float z\n"
+ "end_header\n"
+ "4.23607 0 1.61803 \n"
+ "2.61803 2.61803 2.61803 \n"
+ "nan 1.61803 4.23607 \n"
+ "0 -1.61803 4.23607 \n";
+ fs.close ();
+
+ // Set up cloud
+ pcl::PointCloud<TypeParam> cloud;
+
+ // check if loading is ok
+ const int res = pcl::io::loadPLYFile (PLYTest::mesh_file_ply_, cloud);
+ ASSERT_EQ (res, 0);
+
+ // cloud has proper structure
+ EXPECT_FALSE (cloud.is_dense);
+}
+
+TYPED_TEST (PLYCoordinatesIsDenseTest, InfInCoordinates)
+{
+ // create file
+ std::ofstream fs;
+ fs.open (PLYTest::mesh_file_ply_.c_str ());
+ fs << "ply\n"
+ "format ascii 1.0\n"
+ "element vertex 4\n"
+ "property float x\n"
+ "property float y\n"
+ "property float z\n"
+ "end_header\n"
+ "4.23607 0 1.61803 \n"
+ "2.61803 2.61803 Inf \n"
+ "0 1.61803 4.23607 \n"
+ "0 -1.61803 4.23607 \n";
+ fs.close ();
+
+ // Set up cloud
+ pcl::PointCloud<TypeParam> cloud;
+
+ // check if loading is ok
+ const int res = pcl::io::loadPLYFile (PLYTest::mesh_file_ply_, cloud);
+ ASSERT_EQ (res, 0);
+
+ // cloud has proper structure
+ EXPECT_FALSE (cloud.is_dense);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+template<typename T>
+struct PLYNormalsIsDenseTest : public PLYTest {};
+
+typedef ::testing::Types<BOOST_PP_SEQ_ENUM (PCL_NORMAL_POINT_TYPES)> NormalPointTypes;
+TYPED_TEST_CASE (PLYNormalsIsDenseTest, NormalPointTypes);
+
+TYPED_TEST (PLYNormalsIsDenseTest, NaNInNormals)
+{
+ // create file
+ std::ofstream fs;
+ fs.open (PLYTest::mesh_file_ply_.c_str ());
+ fs << "ply\n"
+ "format ascii 1.0\n"
+ "element vertex 4\n"
+ "property float normal_x\n"
+ "property float normal_y\n"
+ "property float normal_z\n"
+ "end_header\n"
+ "4.23607 0 1.61803 \n"
+ "2.61803 2.61803 NaN \n"
+ "0 1.61803 4.23607 \n"
+ "0 -1.61803 4.23607 \n";
+ fs.close ();
+
+ // Set up cloud
+ pcl::PointCloud<TypeParam> cloud;
+
+ // check if loading is ok
+ const int res = pcl::io::loadPLYFile (PLYTest::mesh_file_ply_, cloud);
+ ASSERT_EQ (res, 0);
+
+ // cloud has proper structure
+ EXPECT_FALSE (cloud.is_dense);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+TEST_F (PLYTest, NaNInIntensity)
+{
+ // create file
+ std::ofstream fs;
+ fs.open (mesh_file_ply_.c_str ());
+ fs << "ply\n"
+ "format ascii 1.0\n"
+ "element vertex 4\n"
+ "property float x\n"
+ "property float y\n"
+ "property float z\n"
+ "property float intensity\n"
+ "end_header\n"
+ "4.23607 0 1.61803 3.13223\n"
+ "2.61803 2.61803 0 3.13223\n"
+ "0 1.61803 4.23607 NaN\n"
+ "0 -1.61803 4.23607 3.13223\n";
+ fs.close ();
+
+ // Set up cloud
+ pcl::PointCloud<pcl::PointXYZI> cloud;
+
+ // check if loading is ok
+ const int res = pcl::io::loadPLYFile (PLYTest::mesh_file_ply_, cloud);
+ ASSERT_EQ (res, 0);
+
+ // cloud has proper structure
+ EXPECT_FALSE (cloud.is_dense);
+}
+
+/* ---[ */
+int
+main (int argc, char** argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
+/* ]--- */
EXPECT_EQ (mesh_binary_pcl.polygons[i].vertices[j], mesh.polygons[i].vertices[j]);
}
}
+
+ remove ("test_mesh_ascii.ply");
+ remove ("test_mesh_binary.ply");
}
TEST (PCL, PLYPolygonMeshColoredIO)
EXPECT_EQ (mesh_rgba_binary_pcl.polygons[i].vertices[j], mesh.polygons[i].vertices[j]);
}
}
+
+ remove ("test_mesh_rgb_ascii.ply");
+ remove ("test_mesh_rgba_ascii.ply");
+ remove ("test_mesh_rgb_binary.ply");
+ remove ("test_mesh_rgba_binary.ply");
}
/* ---[ */
PointCloud<MyPoint> cloud, cloud_big;
-// Includ the implementation so that KdTree<MyPoint> works
+// Include the implementation so that KdTree<MyPoint> works
#include <pcl/kdtree/impl/kdtree_flann.hpp>
void
PCL_ADD_TEST(a_octree_test test_octree
FILES test_octree.cpp
LINK_WITH pcl_gtest pcl_common)
+ PCL_ADD_TEST(a_octree_iterator_test test_octree_iterator
+ FILES test_octree_iterator.cpp
+ LINK_WITH pcl_gtest pcl_common pcl_octree)
endif (build)
unsigned int leaf_node_counter = 0;
// iterate over tree
- OctreePointCloudPointVector<PointXYZ>::LeafNodeIterator it2;
- const OctreePointCloudPointVector<PointXYZ>::LeafNodeIterator it2_end = octree.leaf_end();
- for (it2 = octree.leaf_begin(); it2 != it2_end; ++it2)
+ OctreePointCloudPointVector<PointXYZ>::LeafNodeDepthFirstIterator it2;
+ const OctreePointCloudPointVector<PointXYZ>::LeafNodeDepthFirstIterator it2_end = octree.leaf_depth_end();
+ for (it2 = octree.leaf_depth_begin(); it2 != it2_end; ++it2)
{
++leaf_node_counter;
unsigned int depth = it2.getCurrentOctreeDepth ();
octree.addPointsFromInputCloud ();
// test iterator
- OctreePointCloudPointVector<PointXYZ>::LeafNodeIterator it;
- const OctreePointCloudPointVector<PointXYZ>::LeafNodeIterator it_end = octree.leaf_end();
+ OctreePointCloudPointVector<PointXYZ>::LeafNodeDepthFirstIterator it;
+ const OctreePointCloudPointVector<PointXYZ>::LeafNodeDepthFirstIterator it_end = octree.leaf_depth_end();
unsigned int leaf_count = 0;
std::vector<int> indexVector;
// iterate over tree
- for (it = octree.leaf_begin(); it != it_end; ++it)
+ for (it = octree.leaf_depth_begin(); it != it_end; ++it)
{
OctreeNode* node = it.getCurrentOctreeNode ();
octreeA.addPointsFromInputCloud ();
// instantiate iterator for octreeA
- OctreePointCloud<PointXYZ>::LeafNodeIterator it1;
- OctreePointCloud<PointXYZ>::LeafNodeIterator it1_end = octreeA.leaf_end();
+ OctreePointCloud<PointXYZ>::LeafNodeDepthFirstIterator it1;
+ OctreePointCloud<PointXYZ>::LeafNodeDepthFirstIterator it1_end = octreeA.leaf_depth_end();
std::vector<int> indexVector;
unsigned int leafNodeCounter = 0;
- for (it1 = octreeA.leaf_begin(); it1 != it1_end; ++it1)
+ for (it1 = octreeA.leaf_depth_begin(); it1 != it1_end; ++it1)
{
it1.getLeafContainer().getPointIndices(indexVector);
leafNodeCounter++;
bool idxInResults;
const PointXYZ& pt = cloudIn->points[i];
- inBox = (pt.x > lowerBoxCorner (0)) && (pt.x < upperBoxCorner (0)) &&
- (pt.y > lowerBoxCorner (1)) && (pt.y < upperBoxCorner (1)) &&
- (pt.z > lowerBoxCorner (2)) && (pt.z < upperBoxCorner (2));
+ inBox = (pt.x >= lowerBoxCorner (0)) && (pt.x <= upperBoxCorner (0)) &&
+ (pt.y >= lowerBoxCorner (1)) && (pt.y <= upperBoxCorner (1)) &&
+ (pt.z >= lowerBoxCorner (2)) && (pt.z <= upperBoxCorner (2));
idxInResults = false;
for (j = 0; (j < k_indices.size ()) && (!idxInResults); ++j)
--- /dev/null
+/*
+ * Software License Agreement (BSD License)
+ *
+ * Point Cloud Library (PCL) - www.pointclouds.org
+ * Copyright (c) 2017-, Open Perception, Inc.
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holder(s) nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ */
+#include <pcl/octree/octree_pointcloud_adjacency.h>
+#include <pcl/octree/octree_base.h>
+#include <pcl/octree/octree_iterator.h>
+#include <pcl/common/projection_matrix.h>
+#include <pcl/point_types.h>
+#include <gtest/gtest.h>
+
+using pcl::octree::OctreeBase;
+using pcl::octree::OctreeIteratorBase;
+using pcl::octree::OctreeKey;
+
+////////////////////////////////////////////////////////
+// OctreeIteratorBase
+////////////////////////////////////////////////////////
+
+struct OctreeIteratorBaseTest : public testing::Test
+{
+ // types
+ typedef OctreeBase<int> OctreeBaseT;
+ typedef OctreeIteratorBase<OctreeBaseT> OctreeIteratorBaseT;
+
+
+ // methods
+ virtual void SetUp ()
+ {
+ octree_.setTreeDepth (2); //can have at most 8^2 leaves
+ }
+
+ // members
+ OctreeBaseT octree_;
+};
+
+TEST_F (OctreeIteratorBaseTest, CopyConstructor)
+{
+ OctreeIteratorBaseT it_a;
+ OctreeIteratorBaseT it_b (&octree_, 0);
+ OctreeIteratorBaseT it_c (it_b); //Our copy constructor
+
+ EXPECT_NE (it_a, it_c);
+ EXPECT_EQ (it_b, it_c);
+}
+
+TEST_F (OctreeIteratorBaseTest, CopyAssignment)
+{
+ OctreeIteratorBaseT it_a;
+ OctreeIteratorBaseT it_b (&octree_, 0);
+ OctreeIteratorBaseT it_c;
+
+ EXPECT_EQ (it_a, it_c);
+ EXPECT_NE (it_b, it_c);
+
+ it_c = it_a; //Our copy assignment
+ EXPECT_EQ (it_a, it_c);
+ EXPECT_NE (it_b, it_c);
+
+ it_c = it_b; //Our copy assignment
+ EXPECT_NE (it_a, it_c);
+ EXPECT_EQ (it_b, it_c);
+}
+
+////////////////////////////////////////////////////////
+// Iterator fixture setup
+////////////////////////////////////////////////////////
+
+template<typename T>
+struct OctreeIteratorTest : public OctreeIteratorBaseTest
+{
+ // types
+ typedef OctreeKey OctreeKeyT;
+
+ // methods
+ OctreeIteratorTest () : it_ (&octree_, tree_depth_) {}
+
+ virtual void SetUp ()
+ {
+ // Set up my octree
+ octree_.setTreeDepth (tree_depth_); //can have at most 8 leaves
+
+ // Generate the unique key for our leaves
+ keys_[0] = OctreeKeyT (0b0u, 0b0u, 0b0u);
+ keys_[1] = OctreeKeyT (0b0u, 0b0u, 0b1u);
+ keys_[2] = OctreeKeyT (0b0u, 0b1u, 0b0u);
+ keys_[3] = OctreeKeyT (0b0u, 0b1u, 0b1u);
+ keys_[4] = OctreeKeyT (0b1u, 0b0u, 0b0u);
+ keys_[5] = OctreeKeyT (0b1u, 0b0u, 0b1u);
+ keys_[6] = OctreeKeyT (0b1u, 0b1u, 0b0u);
+ keys_[7] = OctreeKeyT (0b1u, 0b1u, 0b1u);
+
+ // Create the leaves
+ for (uint8_t i = 0; i < 8; ++i)
+ octree_.createLeaf (keys_[i].x, keys_[i].y, keys_[i].z);
+
+ // reset the iterator state
+ it_.reset ();
+
+ // increment the iterator 4 times
+ for (uint8_t i = 0; i < 4; ++it_, ++i);
+ }
+
+ // members
+ const static unsigned tree_depth_ = 1;
+
+ OctreeKeyT keys_[8];
+
+ T it_;
+};
+
+using pcl::octree::OctreeDepthFirstIterator;
+using pcl::octree::OctreeBreadthFirstIterator;
+using pcl::octree::OctreeLeafNodeDepthFirstIterator;
+using pcl::octree::OctreeFixedDepthIterator;
+using pcl::octree::OctreeLeafNodeBreadthFirstIterator;
+
+typedef testing::Types<OctreeDepthFirstIterator<OctreeBase<int> >,
+ OctreeBreadthFirstIterator<OctreeBase<int> >,
+ OctreeLeafNodeDepthFirstIterator<OctreeBase<int> >,
+ OctreeFixedDepthIterator<OctreeBase<int> >,
+ OctreeLeafNodeBreadthFirstIterator<OctreeBase<int> > > OctreeIteratorTypes;
+TYPED_TEST_CASE (OctreeIteratorTest, OctreeIteratorTypes);
+
+TYPED_TEST (OctreeIteratorTest, CopyConstructor)
+{
+ TypeParam it_a;
+ TypeParam it_b (this->it_); // copy ctor
+
+ EXPECT_NE (it_a, it_b);
+ EXPECT_EQ (this->it_, it_b);
+ EXPECT_EQ (*this->it_, *it_b);
+ EXPECT_EQ (this->it_.getNodeID (), it_b.getNodeID ());
+ EXPECT_EQ (this->it_ == it_b, !(this->it_ != it_b));
+
+ EXPECT_EQ (this->it_.getCurrentOctreeKey (), it_b.getCurrentOctreeKey ());
+ EXPECT_EQ (this->it_.getCurrentOctreeDepth (), it_b.getCurrentOctreeDepth ());
+ EXPECT_EQ (this->it_.getCurrentOctreeNode (), it_b.getCurrentOctreeNode ());
+
+ EXPECT_EQ (this->it_.isBranchNode (), it_b.isBranchNode ());
+ EXPECT_EQ (this->it_.isLeafNode (), it_b.isLeafNode ());
+
+ EXPECT_EQ (this->it_.getNodeConfiguration (), it_b.getNodeConfiguration ());
+
+ EXPECT_EQ (this->it_.getNodeID (), it_b.getNodeID ());
+}
+
+TYPED_TEST (OctreeIteratorTest, CopyAssignment)
+{
+ TypeParam it_a;
+ TypeParam it_b (this->it_); // copy ctor
+ TypeParam it_c;
+
+ // Don't modify it
+ EXPECT_EQ (it_a, it_c);
+ EXPECT_NE (it_b, it_c);
+
+ it_c = it_b; //Our copy assignment
+ EXPECT_NE (it_a, it_c);
+ EXPECT_EQ (it_b, it_c);
+ EXPECT_EQ (*it_b, *it_c);
+ EXPECT_EQ (it_b.getNodeID (), it_c.getNodeID ());
+ EXPECT_EQ (it_b == it_c, !(it_b != it_c));
+
+ it_c = it_a; //Our copy assignment
+ EXPECT_EQ (it_a, it_c);
+ EXPECT_NE (it_b, it_c);
+}
+
+////////////////////////////////////////////////////////
+// OctreeBase Begin/End Iterator Construction
+////////////////////////////////////////////////////////
+
+struct OctreeBaseBeginEndIteratorsTest : public testing::Test
+{
+ // Types
+ typedef OctreeBase<int> OctreeT;
+
+ // Methods
+ void SetUp ()
+ {
+ // Set tree depth
+ oct_a_.setTreeDepth (2);
+ oct_b_.setTreeDepth (2);
+
+ // Populate leaves 8^2 = 64 uids
+ // 64 needs 6 bits in total spread among 3 keys (x, y and z).
+ // 2 bits per key
+ // The 3 LSBs of the id match the 1 LSB of the x, y and z keys
+ // The 3 MSBs of the id match the 1 MSB of the x, y and z keys
+ for (size_t i = 0; i < 64u; ++i)
+ {
+ const OctreeKey key (((i >> 4) & 0b10u) | ((i >> 2) & 1u), // x
+ ((i >> 3) & 0b10u) | ((i >> 1) & 1u), // y
+ ((i >> 2) & 0b10u) | (i & 1u));// z
+ oct_a_.createLeaf (key.x, key.y, key.z);
+ oct_b_.createLeaf (key.x, key.y, key.z);
+ }
+ }
+
+ // Members
+ OctreeT oct_a_, oct_b_;
+};
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, Begin)
+{
+ // Useful types
+ typedef typename OctreeT::Iterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.begin ();
+ IteratorT it_a_2 = oct_a_.begin ();
+ IteratorT it_b = oct_b_.begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.begin ();
+ IteratorT it_m_1 = oct_a_.begin (1);
+ IteratorT it_m_2 = oct_a_.begin (2);
+ IteratorT it_m_b_1 = oct_b_.begin (1);
+
+ EXPECT_NE (it_m_1, it_m_2);
+ EXPECT_EQ (it_m_2, it_m); // tree depth is 2
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, End)
+{
+ // Useful types
+ typedef typename OctreeT::Iterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.end ();
+ IteratorT it_a_2 = oct_a_.end ();
+ IteratorT it_b = oct_b_.end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, LeafBegin)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeDepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_depth_begin ();
+ IteratorT it_a_2 = oct_a_.leaf_depth_begin ();
+ IteratorT it_b = oct_b_.leaf_depth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.leaf_depth_begin ();
+ IteratorT it_m_1 = oct_a_.leaf_depth_begin (1);
+ IteratorT it_m_2 = oct_a_.leaf_depth_begin (2);
+ IteratorT it_m_b_1 = oct_b_.leaf_depth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_2);
+ EXPECT_EQ (it_m_2, it_m); // tree depth is 2
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, LeafEnd)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeDepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_depth_end ();
+ IteratorT it_a_2 = oct_a_.leaf_depth_end ();
+ IteratorT it_b = oct_b_.leaf_depth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, DepthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::DepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.depth_begin ();
+ IteratorT it_a_2 = oct_a_.depth_begin ();
+ IteratorT it_b = oct_b_.depth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.depth_begin ();
+ IteratorT it_m_1 = oct_a_.depth_begin (1);
+ IteratorT it_m_2 = oct_a_.depth_begin (2);
+ IteratorT it_m_b_1 = oct_b_.depth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_2);
+ EXPECT_EQ (it_m_2, it_m); // tree depth is 2
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, DepthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::DepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.depth_end ();
+ IteratorT it_a_2 = oct_a_.depth_end ();
+ IteratorT it_b = oct_b_.depth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, BreadthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::BreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.breadth_begin ();
+ IteratorT it_a_2 = oct_a_.breadth_begin ();
+ IteratorT it_b = oct_b_.breadth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.breadth_begin ();
+ IteratorT it_m_1 = oct_a_.breadth_begin (1);
+ IteratorT it_m_2 = oct_a_.breadth_begin (2);
+ IteratorT it_m_b_1 = oct_b_.breadth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_2);
+ EXPECT_EQ (it_m_2, it_m); // tree depth is 2
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, BreadthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::BreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.breadth_end ();
+ IteratorT it_a_2 = oct_a_.breadth_end ();
+ IteratorT it_b = oct_b_.breadth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, LeafBreadthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeBreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_breadth_begin ();
+ IteratorT it_a_2 = oct_a_.leaf_breadth_begin ();
+ IteratorT it_b = oct_b_.leaf_breadth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.leaf_breadth_begin ();
+ IteratorT it_m_1 = oct_a_.leaf_breadth_begin (1);
+ IteratorT it_m_2 = oct_a_.leaf_breadth_begin (2);
+ IteratorT it_m_b_1 = oct_b_.leaf_breadth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_2);
+ EXPECT_EQ (it_m_2, it_m); // tree depth is 2
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreeBaseBeginEndIteratorsTest, LeafBreadthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeBreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_breadth_end ();
+ IteratorT it_a_2 = oct_a_.leaf_breadth_end ();
+ IteratorT it_b = oct_b_.leaf_breadth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+////////////////////////////////////////////////////////
+// OctreeBase Iterator For Loop Case
+////////////////////////////////////////////////////////
+
+struct OctreeBaseIteratorsForLoopTest : public OctreeBaseBeginEndIteratorsTest
+{
+};
+
+TEST_F (OctreeBaseIteratorsForLoopTest, DefaultIterator)
+{
+ // Useful types
+ typedef typename OctreeT::Iterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a;
+ IteratorT it_a_end = oct_a_.end ();
+
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a = oct_a_.begin (); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 64);
+ ASSERT_EQ (branch_count, 9);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (oct_a_.getLeafCount (), leaf_count);
+ ASSERT_EQ (oct_a_.getBranchCount (), branch_count);
+
+ // Iterate over the octree oct_a_ with a depth max of 1.
+ // As oct_a_ has a depth level of 2, we should only iterate
+ // over 9 branch node: the root node + 8 node at depth 1
+ node_count = 0;
+ branch_count = 0;
+ leaf_count = 0;
+ unsigned int max_depth = 1;
+ for (it_a = oct_a_.begin (max_depth); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 0);
+ ASSERT_EQ (branch_count, 9);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (oct_a_.getBranchCount (), branch_count);
+}
+
+TEST_F (OctreeBaseIteratorsForLoopTest, LeafNodeDepthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeDepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a;
+ IteratorT it_a_end = oct_a_.leaf_depth_end ();
+
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a = oct_a_.leaf_depth_begin (); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 64);
+ ASSERT_EQ (branch_count, 0);
+ ASSERT_EQ (oct_a_.getLeafCount (), leaf_count);
+
+ // Iterate over the octree oct_a_ with a depth max of 1.
+ // As oct_a_ has a depth level of 2, we should only iterate
+ // over 9 branch node: the root node + 8 node at depth 1
+ node_count = 0;
+ branch_count = 0;
+ leaf_count = 0;
+ unsigned int max_depth = 1;
+ for (it_a = oct_a_.leaf_depth_begin (max_depth); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 0);
+ ASSERT_EQ (branch_count, 0);
+}
+
+TEST_F (OctreeBaseIteratorsForLoopTest, DepthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::DepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a;
+ IteratorT it_a_end = oct_a_.depth_end ();
+
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a = oct_a_.depth_begin (); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 64);
+ ASSERT_EQ (branch_count, 9);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (oct_a_.getLeafCount (), leaf_count);
+ ASSERT_EQ (oct_a_.getBranchCount (), branch_count);
+
+ // Iterate over the octree oct_a_ with a depth max of 1.
+ // As oct_a_ has a depth level of 2, we should only iterate
+ // over 9 branch node: the root node + 8 node at depth 1
+ node_count = 0;
+ branch_count = 0;
+ leaf_count = 0;
+ unsigned int max_depth = 1;
+ for (it_a = oct_a_.depth_begin (max_depth); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 0);
+ ASSERT_EQ (branch_count, 9);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (oct_a_.getBranchCount (), branch_count);
+}
+
+TEST_F (OctreeBaseIteratorsForLoopTest, BreadthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::BreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a;
+ IteratorT it_a_end = oct_a_.breadth_end ();
+
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a = oct_a_.breadth_begin (); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 64);
+ ASSERT_EQ (branch_count, 9);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (oct_a_.getLeafCount (), leaf_count);
+ ASSERT_EQ (oct_a_.getBranchCount (), branch_count);
+
+ // Iterate over the octree oct_a_ with a depth max of 1.
+ // As oct_a_ has a depth level of 2, we should only iterate
+ // over 9 branch node: the root node + 8 node at depth 1
+ node_count = 0;
+ branch_count = 0;
+ leaf_count = 0;
+ unsigned int max_depth = 1;
+ for (it_a = oct_a_.breadth_begin (max_depth); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 0);
+ ASSERT_EQ (branch_count, 9);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (oct_a_.getBranchCount (), branch_count);
+}
+
+TEST_F (OctreeBaseIteratorsForLoopTest, FixedDepthIterator)
+{
+ // Useful types
+ typedef typename OctreeT::FixedDepthIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a;
+ IteratorT it_a_end = oct_a_.fixed_depth_end ();
+
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ for (unsigned int depth = 0; depth <= oct_a_.getTreeDepth (); ++depth)
+ {
+ // Iterate over every node of the octree oct_a_.
+ for (it_a = oct_a_.fixed_depth_begin (depth); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 64);
+ ASSERT_EQ (branch_count, 9);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ (oct_a_.getLeafCount (), leaf_count);
+ ASSERT_EQ (oct_a_.getBranchCount (), branch_count);
+
+ // Iterate over the octree oct_a_ with a depth max of 1.
+ // As oct_a_ has a depth level of 2, we should only iterate
+ // over 9 branch node: the root node + 8 node at depth 1
+ node_count = 0;
+ branch_count = 0;
+ leaf_count = 0;
+ unsigned int fixed_depth = 1;
+ for (it_a = oct_a_.fixed_depth_begin (fixed_depth); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 0);
+ ASSERT_EQ (branch_count, 8);
+ ASSERT_EQ (branch_count + leaf_count, node_count);
+ ASSERT_EQ ((oct_a_.getBranchCount () - 1), branch_count);
+}
+
+TEST_F (OctreeBaseIteratorsForLoopTest, LeafNodeBreadthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeBreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a;
+ IteratorT it_a_end = oct_a_.leaf_breadth_end ();
+
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a = oct_a_.leaf_breadth_begin (); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 64);
+ ASSERT_EQ (branch_count, 0);
+ ASSERT_EQ (node_count, 64);
+ ASSERT_EQ (oct_a_.getLeafCount (), leaf_count);
+
+ // Iterate over the octree oct_a_ with a depth max of 1.
+ // As oct_a_ has a depth level of 2, we should only iterate
+ // over 9 branch node: the root node + 8 node at depth 1
+ node_count = 0;
+ branch_count = 0;
+ leaf_count = 0;
+ unsigned int max_depth = 1;
+ for (it_a = oct_a_.leaf_breadth_begin (max_depth); it_a != it_a_end; ++it_a)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it_a.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ // Check the node_count, branch_count and leaf_count values
+ ASSERT_EQ (leaf_count, 0);
+ ASSERT_EQ (branch_count, 0);
+ ASSERT_EQ (node_count, 0);
+}
+
+////////////////////////////////////////////////////////
+// OctreeBase Walk Through Iterator Test
+////////////////////////////////////////////////////////
+
+struct OctreeBaseWalkThroughIteratorsTest : public testing::Test
+{
+ // Types
+ typedef OctreeBase<int> OctreeT;
+
+ // Methods
+ void SetUp ()
+ {
+ // Create manually an irregular octree.
+ // Graphically, this octree appears as follows:
+ // root
+ // ' / \ `
+ // ' / \ `
+ // ' / \ `
+ // 000 010 100 110
+ // | |
+ // | |
+ // 020 220
+ //
+ // The octree key of the different node are represented on this graphic.
+ // This octree is of max_depth 2.
+ // At depth 1, you will find:
+ // - 2 leaf nodes , with the keys 000 and 100,
+ // - 2 branch nodes, with the keys 010 and 110.
+ // At depth 2, you will find:
+ // - 2 leaf nodes , with the keys 000 and 000.
+ // This octree is build to be able to check the order in which the nodes
+ // appear depending on the used iterator.
+
+ // Set the leaf nodes at depth 1
+ oct_a_.setTreeDepth (1);
+
+ oct_a_.createLeaf (0u, 0u, 0u);
+ oct_a_.createLeaf (1u, 0u, 0u);
+
+ // Set the leaf nodes at depth 2. As createLeaf method create recursively
+ // the octree nodes, if the parent node are not present, they will be create.
+ // In this case, at depth 1, the nodes 010 and 110 are created.
+ oct_a_.setTreeDepth (2);
+
+ oct_a_.createLeaf (0u, 2u, 0u);
+ oct_a_.createLeaf (2u, 2u, 0u);
+ }
+
+ // Members
+ OctreeT oct_a_;
+};
+
+TEST_F (OctreeBaseWalkThroughIteratorsTest, LeafNodeDepthFirstIterator)
+{
+ OctreeT::LeafNodeDepthFirstIterator it = oct_a_.leaf_depth_begin ();
+
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (2u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.leaf_depth_end ());
+}
+
+TEST_F (OctreeBaseWalkThroughIteratorsTest, LeafNodeBreadthFirstIterator)
+{
+ OctreeT::LeafNodeBreadthFirstIterator it = oct_a_.leaf_breadth_begin ();
+
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (2u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.leaf_breadth_end ());
+}
+
+TEST_F (OctreeBaseWalkThroughIteratorsTest, DepthFirstIterator)
+{
+ OctreeT::DepthFirstIterator it = oct_a_.depth_begin ();
+
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 0
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 0u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 1u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 1u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (2u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.depth_end ());
+}
+
+TEST_F (OctreeBaseWalkThroughIteratorsTest, BreadthFirstIterator)
+{
+ OctreeT::BreadthFirstIterator it = oct_a_.breadth_begin ();
+
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 0
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 0u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 1u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 1u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (2u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.breadth_end ());
+}
+
+TEST_F (OctreeBaseWalkThroughIteratorsTest, FixedDepthIterator)
+{
+ OctreeT::FixedDepthIterator it;
+
+ // Check the default behavior of the iterator
+ it = oct_a_.fixed_depth_begin ();
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 0
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 0u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.fixed_depth_end ());
+
+ // Check the iterator at depth 0
+ it = oct_a_.fixed_depth_begin (0);
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 0
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 0u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.fixed_depth_end ());
+
+ // Check the iterator at depth 1
+ it = oct_a_.fixed_depth_begin (1);
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 1u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 0u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (1u, 1u, 0u)); // depth: 1
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 1u);
+ EXPECT_TRUE (it.isBranchNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.fixed_depth_end ());
+
+ // Check the iterator at depth 2
+ it = oct_a_.fixed_depth_begin (2);
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (0u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it.getCurrentOctreeKey (), OctreeKey (2u, 2u, 0u)); // depth: 2
+ EXPECT_EQ (it.getCurrentOctreeDepth (), 2u);
+ EXPECT_TRUE (it.isLeafNode ());
+ ++it;
+ EXPECT_EQ (it, oct_a_.fixed_depth_end ());
+}
+
+////////////////////////////////////////////////////////
+// OctreeBase Iterator Pre/Post increment
+////////////////////////////////////////////////////////
+
+struct OctreeBaseIteratorsPrePostTest : public OctreeBaseBeginEndIteratorsTest
+{
+};
+
+TEST_F (OctreeBaseIteratorsPrePostTest, DefaultIterator)
+{
+ // Useful types
+ typedef typename OctreeT::Iterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_pre;
+ IteratorT it_a_post;
+ IteratorT it_a_end = oct_a_.end ();
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a_pre = oct_a_.begin (), it_a_post = oct_a_.begin ();
+ ((it_a_pre != it_a_end) && (it_a_post != it_a_end)); )
+ {
+ EXPECT_EQ (it_a_pre, it_a_post++);
+ EXPECT_EQ (++it_a_pre, it_a_post);
+ }
+
+ EXPECT_EQ (it_a_pre, it_a_end);
+ EXPECT_EQ (it_a_post, it_a_end);
+}
+
+TEST_F (OctreeBaseIteratorsPrePostTest, LeafNodeDepthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeDepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_pre;
+ IteratorT it_a_post;
+ IteratorT it_a_end = oct_a_.leaf_depth_end ();
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a_pre = oct_a_.leaf_depth_begin (), it_a_post = oct_a_.leaf_depth_begin ();
+ ((it_a_pre != it_a_end) && (it_a_post != it_a_end)); )
+ {
+ EXPECT_EQ (it_a_pre, it_a_post++);
+ EXPECT_EQ (++it_a_pre, it_a_post);
+ }
+
+ EXPECT_EQ (it_a_pre, it_a_end);
+ EXPECT_EQ (it_a_post, it_a_end);
+}
+
+TEST_F (OctreeBaseIteratorsPrePostTest, DepthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::DepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_pre;
+ IteratorT it_a_post;
+ IteratorT it_a_end = oct_a_.depth_end ();
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a_pre = oct_a_.depth_begin (), it_a_post = oct_a_.depth_begin ();
+ ((it_a_pre != it_a_end) && (it_a_post != it_a_end)); )
+ {
+ EXPECT_EQ (it_a_pre, it_a_post++);
+ EXPECT_EQ (++it_a_pre, it_a_post);
+ }
+
+ EXPECT_EQ (it_a_pre, it_a_end);
+ EXPECT_EQ (it_a_post, it_a_end);
+}
+
+TEST_F (OctreeBaseIteratorsPrePostTest, BreadthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::BreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_pre;
+ IteratorT it_a_post;
+ IteratorT it_a_end = oct_a_.breadth_end ();
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a_pre = oct_a_.breadth_begin (), it_a_post = oct_a_.breadth_begin ();
+ ((it_a_pre != it_a_end) && (it_a_post != it_a_end)); )
+ {
+ EXPECT_EQ (it_a_pre, it_a_post++);
+ EXPECT_EQ (++it_a_pre, it_a_post);
+ }
+
+ EXPECT_EQ (it_a_pre, it_a_end);
+ EXPECT_EQ (it_a_post, it_a_end);
+}
+
+TEST_F (OctreeBaseIteratorsPrePostTest, FixedDepthIterator)
+{
+ // Useful types
+ typedef typename OctreeT::FixedDepthIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_pre;
+ IteratorT it_a_post;
+ IteratorT it_a_end = oct_a_.fixed_depth_end ();
+
+ for (unsigned int depth = 0; depth <= oct_a_.getTreeDepth (); ++depth)
+ {
+ it_a_pre = oct_a_.fixed_depth_begin (depth);
+ it_a_post = oct_a_.fixed_depth_begin (depth);
+
+
+ // Iterate over every node at a given depth of the octree oct_a_.
+ for (it_a_pre = oct_a_.fixed_depth_begin (depth), it_a_post = oct_a_.fixed_depth_begin (depth);
+ ((it_a_pre != it_a_end) && (it_a_post != it_a_end)); )
+ {
+ EXPECT_EQ (it_a_pre, it_a_post++);
+ EXPECT_EQ (++it_a_pre, it_a_post);
+ }
+
+ EXPECT_EQ (it_a_pre, it_a_end);
+ EXPECT_EQ (it_a_post, it_a_end);
+ }
+}
+
+TEST_F (OctreeBaseIteratorsPrePostTest, LeafNodeBreadthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeBreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_pre;
+ IteratorT it_a_post;
+ IteratorT it_a_end = oct_a_.leaf_breadth_end ();
+
+ // Iterate over every node of the octree oct_a_.
+ for (it_a_pre = oct_a_.leaf_breadth_begin (), it_a_post = oct_a_.leaf_breadth_begin ();
+ ((it_a_pre != it_a_end) && (it_a_post != it_a_end)); )
+ {
+ EXPECT_EQ (it_a_pre, it_a_post++);
+ EXPECT_EQ (++it_a_pre, it_a_post);
+ }
+
+ EXPECT_EQ (it_a_pre, it_a_end);
+ EXPECT_EQ (it_a_post, it_a_end);
+}
+
+////////////////////////////////////////////////////////
+// OctreePointCloudAdjacency Begin/End Iterator Construction
+////////////////////////////////////////////////////////
+
+struct OctreePointCloudAdjacencyBeginEndIteratorsTest
+ : public testing::Test
+{
+ // Types
+ typedef pcl::PointXYZ PointT;
+ typedef pcl::PointCloud<PointT> PointCloudT;
+ typedef pcl::octree::OctreePointCloudAdjacency<PointT> OctreeT;
+
+ // Methods
+ OctreePointCloudAdjacencyBeginEndIteratorsTest ()
+ : oct_a_ (1)
+ , oct_b_ (1)
+ {}
+
+ void SetUp ()
+ {
+ // Replicable results
+ std::srand (42);
+
+ // Generate Point Cloud
+ typename PointCloudT::Ptr cloud (new PointCloudT (100, 1));
+ const float max_inv = 1.f / float (RAND_MAX);
+ for (size_t i = 0; i < 100; ++i)
+ {
+ const PointT pt (10.f * (float (std::rand ()) * max_inv - .5f),
+ 10.f * (float (std::rand ()) * max_inv - .5f),
+ 10.f * (float (std::rand ()) * max_inv - .5f));
+ (*cloud)[i] = pt;
+ }
+
+ // Add points to octree
+ oct_a_.setInputCloud (cloud);
+ oct_a_.addPointsFromInputCloud ();
+
+ oct_b_.setInputCloud (cloud);
+ oct_b_.addPointsFromInputCloud ();
+ }
+
+ // Members
+ OctreeT oct_a_, oct_b_;
+};
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, LeafDepthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeDepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_depth_begin ();
+ IteratorT it_a_2 = oct_a_.leaf_depth_begin ();
+ IteratorT it_b = oct_b_.leaf_depth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.leaf_depth_begin ();
+ IteratorT it_m_1 = oct_a_.leaf_depth_begin (1);
+ IteratorT it_m_md = oct_a_.leaf_depth_begin (oct_a_.getTreeDepth ());
+ IteratorT it_m_b_1 = oct_b_.leaf_depth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_md);
+ EXPECT_EQ (it_m_md, it_m); // should default to tree depth
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, LeafDepthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeDepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_depth_end ();
+ IteratorT it_a_2 = oct_a_.leaf_depth_end ();
+ IteratorT it_b = oct_b_.leaf_depth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, DepthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::DepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.depth_begin ();
+ IteratorT it_a_2 = oct_a_.depth_begin ();
+ IteratorT it_b = oct_b_.depth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.depth_begin ();
+ IteratorT it_m_1 = oct_a_.depth_begin (1);
+ IteratorT it_m_md = oct_a_.depth_begin (oct_a_.getTreeDepth ());
+ IteratorT it_m_b_1 = oct_b_.depth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_md);
+ EXPECT_EQ (it_m_md, it_m); // should default to tree depth
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, DepthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::DepthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.depth_end ();
+ IteratorT it_a_2 = oct_a_.depth_end ();
+ IteratorT it_b = oct_b_.depth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, BreadthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::BreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.breadth_begin ();
+ IteratorT it_a_2 = oct_a_.breadth_begin ();
+ IteratorT it_b = oct_b_.breadth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.breadth_begin ();
+ IteratorT it_m_1 = oct_a_.breadth_begin (1);
+ IteratorT it_m_md = oct_a_.breadth_begin (oct_a_.getTreeDepth ());
+ IteratorT it_m_b_1 = oct_b_.breadth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_md);
+ EXPECT_EQ (it_m_md, it_m); // should default to tree depth
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, BreadthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::BreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.breadth_end ();
+ IteratorT it_a_2 = oct_a_.breadth_end ();
+ IteratorT it_b = oct_b_.breadth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, FixedDepthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::FixedDepthIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.fixed_depth_begin ();
+ IteratorT it_a_2 = oct_a_.fixed_depth_begin ();
+ IteratorT it_b = oct_b_.fixed_depth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_f = oct_a_.fixed_depth_begin ();
+ IteratorT it_f_1 = oct_a_.fixed_depth_begin (1);
+ IteratorT it_f_fd = oct_a_.fixed_depth_begin (oct_a_.getTreeDepth ());
+ IteratorT it_f_0 = oct_a_.fixed_depth_begin (0);
+ IteratorT it_f_b_1 = oct_b_.fixed_depth_begin (1);
+
+ EXPECT_NE (it_f_1, it_f_fd);
+ EXPECT_NE (it_f_fd, it_f);
+ EXPECT_EQ (it_f_0, it_f); // should default to root tree
+ EXPECT_NE (it_f_1, it_f_b_1);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, FixedDepthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::FixedDepthIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.fixed_depth_end ();
+ IteratorT it_a_2 = oct_a_.fixed_depth_end ();
+ IteratorT it_b = oct_b_.fixed_depth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, LeafBreadthBegin)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeBreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_breadth_begin ();
+ IteratorT it_a_2 = oct_a_.leaf_breadth_begin ();
+ IteratorT it_b = oct_b_.leaf_breadth_begin ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+
+ // Different max depths are not the same iterators
+ IteratorT it_m = oct_a_.leaf_breadth_begin ();
+ IteratorT it_m_1 = oct_a_.leaf_breadth_begin (1);
+ IteratorT it_m_md = oct_a_.leaf_breadth_begin (oct_a_.getTreeDepth ());
+ IteratorT it_m_b_1 = oct_b_.leaf_breadth_begin (1);
+
+ EXPECT_NE (it_m_1, it_m_md);
+ EXPECT_EQ (it_m_md, it_m); // should default to tree depth
+ EXPECT_NE (it_m_1, it_m_b_1);
+}
+
+TEST_F (OctreePointCloudAdjacencyBeginEndIteratorsTest, LeafBreadthEnd)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeBreadthFirstIterator IteratorT;
+
+ // Default initialization
+ IteratorT it_a_1 = oct_a_.leaf_breadth_end ();
+ IteratorT it_a_2 = oct_a_.leaf_breadth_end ();
+ IteratorT it_b = oct_b_.leaf_breadth_end ();
+
+ EXPECT_EQ (it_a_1, it_a_2);
+ EXPECT_NE (it_a_1, it_b);
+ EXPECT_NE (it_a_2, it_b);
+}
+
+////////////////////////////////////////////////////////
+// OctreePointCloudSierpinski Iterator Traversal Test
+////////////////////////////////////////////////////////
+
+struct OctreePointCloudSierpinskiTest
+ : public testing::Test
+{
+ // Types
+ typedef pcl::PointXYZ PointT;
+ typedef pcl::PointCloud<PointT> PointCloudT;
+ typedef pcl::octree::OctreePointCloud<PointT> OctreeT;
+
+ // Methods
+ OctreePointCloudSierpinskiTest ()
+ : oct_ (1)
+ , depth_ (7)
+ {}
+
+ void SetUp ()
+ {
+ // Create a point cloud which points are inside Sierpinski fractal voxel at the deepest level
+ // https://en.wikipedia.org/wiki/Sierpinski_triangle
+ typename PointCloudT::Ptr cloud (new PointCloudT);
+
+ // The voxels will be generate between the points (0, 0, 0) and (1, 1, 1)
+ Eigen::Vector3f v_min (0, 0, 0);
+ Eigen::Vector3f v_max (1, 1, 1);
+
+ // Generate Sierpinski fractal voxel at the deepest level
+ std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> > voxels (generateSierpinskiVoxelExtremities (v_min, v_max, depth_));
+
+ // The number of points in each voxel
+ unsigned int total_nb_pt = 100000;
+ unsigned int nb_pt_in_voxel = total_nb_pt / pow (4, depth_);
+
+ // Replicable results
+ std::srand (42);
+
+ // Fill the point cloud
+ for (std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> >::const_iterator it = voxels.begin ();
+ it != voxels.end ();
+ ++it)
+ {
+ const static float eps = std::numeric_limits<float>::epsilon ();
+ double x_min = it->first.x () + eps;
+ double y_min = it->first.y () + eps;
+ double z_min = it->first.z () + eps;
+ double x_max = it->second.x () - eps;
+ double y_max = it->second.y () - eps;
+ double z_max = it->second.z () - eps;
+
+ for (unsigned int i = 0; i < nb_pt_in_voxel; ++i)
+ {
+ float x = x_min + (rand () / ((float)(RAND_MAX) + 1)) * (x_max - x_min);
+ float y = y_min + (rand () / ((float)(RAND_MAX) + 1)) * (y_max - y_min);
+ float z = z_min + (rand () / ((float)(RAND_MAX) + 1)) * (z_max - z_min);
+
+ cloud->points.push_back (PointT (x, y, z));
+ }
+ }
+
+ // Setup the octree
+ double resolution = 1.0 / (pow (2, depth_));
+ oct_.setResolution (resolution);
+
+ // Add points to octree
+ oct_.setInputCloud (cloud);
+
+ // Bounding box
+ oct_.defineBoundingBox (0.0, 0.0, 0.0, 1.0, 1.0, 1.0);
+
+ // Add points from cloud to octree
+ oct_.addPointsFromInputCloud ();
+ }
+
+ // Generate a vector of Sierpinski voxels at a given 'depth_arg'
+ std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> >
+ generateSierpinskiVoxelExtremities (const Eigen::Vector3f & v_min, const Eigen::Vector3f & v_max,
+ const unsigned int & depth_arg)
+ {
+ std::pair<Eigen::Vector3f, Eigen::Vector3f> voxel (v_min, v_max);
+ std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> > voxels;
+ voxels.push_back (voxel);
+
+ for (unsigned int i = 0; i < depth_arg; ++i)
+ {
+ voxels = generateSierpinskiVoxelExtremitiesAux (voxels);
+ }
+
+ return voxels;
+ }
+
+ // Apply a recursion step to a vector of macro Sierpinski voxel
+ // For each voxel in the input vector 'voxels_in', 4 sub-voxels are generated
+ std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> >
+ generateSierpinskiVoxelExtremitiesAux (const std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> > & voxels_in)
+ {
+ std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> > voxels_out;
+
+ for (std::vector<std::pair<Eigen::Vector3f, Eigen::Vector3f> >::const_iterator it = voxels_in.begin ();
+ it != voxels_in.end ();
+ ++it)
+ {
+ Eigen::Vector3f v_min = it->first;
+ Eigen::Vector3f v_max = it->second;
+ Eigen::Vector3f v_mid = 0.5 * (v_min + v_max);
+
+ std::pair<Eigen::Vector3f, Eigen::Vector3f> voxel_0;
+ std::pair<Eigen::Vector3f, Eigen::Vector3f> voxel_1;
+ std::pair<Eigen::Vector3f, Eigen::Vector3f> voxel_2;
+ std::pair<Eigen::Vector3f, Eigen::Vector3f> voxel_3;
+
+ voxel_0.first = v_min;
+ voxel_0.second = v_mid;
+ voxel_1.first = Eigen::Vector3f (v_min.x (), v_mid.y (), v_mid.z ());
+ voxel_1.second = Eigen::Vector3f (v_mid.x (), v_max.y (), v_max.z ());
+ voxel_2.first = Eigen::Vector3f (v_mid.x (), v_min.y (), v_mid.z ());
+ voxel_2.second = Eigen::Vector3f (v_max.x (), v_mid.y (), v_max.z ());
+ voxel_3.first = Eigen::Vector3f (v_mid.x (), v_mid.y (), v_min.z ());
+ voxel_3.second = Eigen::Vector3f (v_max.x (), v_max.y (), v_mid.z ());
+
+ voxels_out.push_back (voxel_0);
+ voxels_out.push_back (voxel_1);
+ voxels_out.push_back (voxel_2);
+ voxels_out.push_back (voxel_3);
+ }
+
+ return voxels_out;
+ }
+
+ /** \brief Computes the total number of parent nodes at the specified depth
+ *
+ * The octree is built such that the number of the leaf nodes is equal to
+ * 4^depth and the number of branch nodes is egal to (4^depth -1)/(4 - 1),
+ * where depth is the detph of the octree. The details of the expression
+ * provided for the number of branch nodes could be found at:
+ * https://en.wikipedia.org/wiki/Geometric_progression#Geometric_series
+ * \param[in] depth - The depth of the octree
+ * \return The total number of parent nodes at the specified depth level.
+ */
+ static unsigned
+ computeTotalParentNodeCount (const unsigned depth)
+ {
+ return (pow (4, depth) - 1) / (4 - 1);
+ }
+
+ // Members
+ OctreeT oct_;
+
+ const unsigned depth_;
+};
+
+/** \brief Octree test based on a Sierpinski fractal
+ */
+TEST_F (OctreePointCloudSierpinskiTest, DefaultIterator)
+{
+ // Useful types
+ typedef typename OctreeT::Iterator IteratorT;
+
+ // Check the number of branch and leaf nodes
+ ASSERT_EQ (oct_.getLeafCount (), pow (4, depth_));
+ ASSERT_EQ (oct_.getBranchCount (), computeTotalParentNodeCount (depth_));
+
+ IteratorT it;
+ IteratorT it_end = oct_.end ();
+
+ // Check the number of leaf and branch nodes
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ for (it = oct_.begin (); it != it_end; ++it)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ EXPECT_EQ (leaf_count, pow (4, depth_));
+ EXPECT_EQ (branch_count, computeTotalParentNodeCount (depth_));
+ EXPECT_EQ (node_count, computeTotalParentNodeCount (depth_ + 1));
+
+ // Check the specific key/child_idx value for this octree
+ for (it = oct_.begin (); it != it_end; ++it)
+ {
+ for (unsigned int i = 0; i < depth_; ++i)
+ {
+ // The binary representation child_idx can only contain an even number of 1
+ int child_idx = it.getCurrentOctreeKey ().getChildIdxWithDepthMask (1 << i);
+ EXPECT_TRUE ((child_idx == 0) || (child_idx == 3) || (child_idx == 5) || (child_idx == 6));
+ }
+ }
+}
+
+TEST_F (OctreePointCloudSierpinskiTest, LeafNodeDepthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeDepthFirstIterator IteratorT;
+
+ // Check the number of branch and leaf nodes
+ ASSERT_EQ (oct_.getLeafCount (), pow (4, depth_));
+ ASSERT_EQ (oct_.getBranchCount (), computeTotalParentNodeCount (depth_));
+
+ IteratorT it;
+ IteratorT it_end = oct_.leaf_depth_end ();
+
+ // Check the number of leaf and branch nodes
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ for (it = oct_.leaf_depth_begin (); it != it_end; ++it)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ EXPECT_EQ (leaf_count, pow (4, depth_));
+ EXPECT_EQ (branch_count, 0);
+ EXPECT_EQ (node_count, pow (4, depth_));
+
+ // Check the specific key/child_idx value for this octree
+ for (it = oct_.leaf_depth_begin (); it != it_end; ++it)
+ {
+ for (unsigned int i = 0; i < depth_; ++i)
+ {
+ // The binary representation child_idx can only contain an even number of 1
+ int child_idx = it.getCurrentOctreeKey ().getChildIdxWithDepthMask (1 << i);
+ EXPECT_TRUE ((child_idx == 0) || (child_idx == 3) || (child_idx == 5) || (child_idx == 6));
+ }
+ }
+}
+
+TEST_F (OctreePointCloudSierpinskiTest, DepthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::DepthFirstIterator IteratorT;
+
+ // Check the number of branch and leaf nodes
+ ASSERT_EQ (oct_.getLeafCount (), pow (4, depth_));
+ ASSERT_EQ (oct_.getBranchCount (), computeTotalParentNodeCount (depth_));
+
+ IteratorT it;
+ IteratorT it_end = oct_.depth_end ();
+
+ // Check the number of leaf and branch nodes
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ for (it = oct_.depth_begin (); it != it_end; ++it)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ EXPECT_EQ (leaf_count, pow (4, depth_));
+ EXPECT_EQ (branch_count, computeTotalParentNodeCount (depth_));
+ EXPECT_EQ (node_count, computeTotalParentNodeCount (depth_ + 1));
+
+ // Check the specific key/child_idx value for this octree
+ for (it = oct_.depth_begin (); it != it_end; ++it)
+ {
+ for (unsigned int i = 0; i < depth_; ++i)
+ {
+ // The binary representation child_idx can only contain an even number of 1
+ int child_idx = it.getCurrentOctreeKey ().getChildIdxWithDepthMask (1 << i);
+ EXPECT_TRUE ((child_idx == 0) || (child_idx == 3) || (child_idx == 5) || (child_idx == 6));
+ }
+ }
+}
+
+TEST_F (OctreePointCloudSierpinskiTest, BreadthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::BreadthFirstIterator IteratorT;
+
+ // Check the number of branch and leaf nodes
+ ASSERT_EQ (oct_.getLeafCount (), pow (4, depth_));
+ ASSERT_EQ (oct_.getBranchCount (), computeTotalParentNodeCount (depth_));
+
+ IteratorT it;
+ IteratorT it_end = oct_.breadth_end ();
+
+ // Check the number of leaf and branch nodes
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ for (it = oct_.breadth_begin (); it != it_end; ++it)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ EXPECT_EQ (leaf_count, pow (4, depth_));
+ EXPECT_EQ (branch_count, computeTotalParentNodeCount (depth_));
+ EXPECT_EQ (node_count, computeTotalParentNodeCount (depth_ + 1));
+
+ // Check the specific key/child_idx value for this octree
+ for (it = oct_.breadth_begin (); it != it_end; ++it)
+ {
+ for (unsigned int i = 0; i < depth_; ++i)
+ {
+ // The binary representation child_idx can only contain an even number of 1
+ int child_idx = it.getCurrentOctreeKey ().getChildIdxWithDepthMask (1 << i);
+ EXPECT_TRUE ((child_idx == 0) || (child_idx == 3) || (child_idx == 5) || (child_idx == 6));
+ }
+ }
+}
+
+TEST_F (OctreePointCloudSierpinskiTest, FixedDepthIterator)
+{
+ // Useful types
+ typedef typename OctreeT::FixedDepthIterator IteratorT;
+
+ // Check the number of branch and leaf nodes
+ ASSERT_EQ (oct_.getLeafCount (), pow (4, depth_));
+ ASSERT_EQ (oct_.getBranchCount (), computeTotalParentNodeCount (depth_));
+
+ IteratorT it;
+ IteratorT it_end = oct_.fixed_depth_end ();
+
+ // Check the number of nodes at each level of the octree
+ unsigned int nb_nodes;
+ for (unsigned int idx_depth = 1; idx_depth <= depth_; ++idx_depth)
+ {
+ nb_nodes = 0;
+ for (it = oct_.fixed_depth_begin (idx_depth); it != it_end; ++it)
+ {
+ ASSERT_EQ (it.getCurrentOctreeDepth (), idx_depth);
+ ++nb_nodes;
+ }
+
+ ASSERT_EQ (nb_nodes, pow (4, idx_depth));
+ }
+
+ // Check the specific key/child_idx value for this octree
+ for (it = oct_.fixed_depth_begin (depth_); it != it_end; ++it)
+ {
+ for (unsigned int i = 0; i < depth_; ++i)
+ {
+ // The binary representation child_idx can only contain an even number of 1
+ int child_idx = it.getCurrentOctreeKey ().getChildIdxWithDepthMask (1 << i);
+ ASSERT_TRUE ((child_idx == 0) || (child_idx == 3) || (child_idx == 5) || (child_idx == 6));
+ }
+ }
+}
+
+TEST_F (OctreePointCloudSierpinskiTest, LeafNodeBreadthFirstIterator)
+{
+ // Useful types
+ typedef typename OctreeT::LeafNodeBreadthFirstIterator IteratorT;
+
+ // Check the number of branch and leaf nodes
+ ASSERT_EQ (oct_.getLeafCount (), pow (4, depth_));
+ ASSERT_EQ (oct_.getBranchCount (), (pow (4, depth_) - 1) / (4 - 1));
+
+ IteratorT it;
+ IteratorT it_end = oct_.leaf_breadth_end ();
+
+ // Check the number of leaf and branch nodes
+ unsigned int node_count = 0;
+ unsigned int branch_count = 0;
+ unsigned int leaf_count = 0;
+
+ for (it = oct_.leaf_breadth_begin (); it != it_end; ++it)
+ {
+ // store node, branch and leaf count
+ const pcl::octree::OctreeNode* node = it.getCurrentOctreeNode ();
+ if (node->getNodeType () == pcl::octree::BRANCH_NODE)
+ {
+ branch_count++;
+ }
+ else if (node->getNodeType () == pcl::octree::LEAF_NODE)
+ {
+ leaf_count++;
+ }
+ node_count++;
+ }
+
+ EXPECT_EQ (leaf_count, pow (4, depth_));
+ EXPECT_EQ (branch_count, 0);
+ EXPECT_EQ (node_count, pow (4, depth_));
+
+ // Check the specific key/child_idx value for this octree
+ for (it = oct_.leaf_breadth_begin (); it != it_end; ++it)
+ {
+ for (unsigned int i = 0; i < depth_; ++i)
+ {
+ // The binary representation child_idx can only contain an even number of 1
+ int child_idx = it.getCurrentOctreeKey ().getChildIdxWithDepthMask (1 << i);
+ EXPECT_TRUE ((child_idx == 0) || (child_idx == 3) || (child_idx == 5) || (child_idx == 6));
+ }
+ }
+}
+
+int
+main (int argc, char** const argv)
+{
+ testing::InitGoogleTest (&argc, argv);
+ return (RUN_ALL_TESTS ());
+}
clusterer.setSceneRf (scene_rf);
clusterer.setModelSceneCorrespondences (model_scene_corrs_);
clusterer.setHoughBinSize (0.03);
- clusterer.setHoughThreshold (25);
+ clusterer.setHoughThreshold (10);
EXPECT_TRUE (clusterer.recognize (rototranslations));
//Assertions
- EXPECT_EQ (rototranslations.size (), 1);
- EXPECT_LT (computeRmsE (model_, scene_, rototranslations[0]), 1E-2);
+ ASSERT_GE (rototranslations.size (), 1);
+
+ // Pick transformation with lowest error
+ double min_rms_e = std::numeric_limits<double>::max ();
+ for (size_t i = 0; i < rototranslations.size (); ++i)
+ min_rms_e = std::min (min_rms_e, computeRmsE (model_, scene_, rototranslations[i]));
+ EXPECT_LT (min_rms_e, 1E-2);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <pcl/registration/ppf_registration.h>
#include <pcl/registration/ndt.h>
#include <pcl/registration/sample_consensus_prerejective.h>
-// We need Histogram<2> to function, so we'll explicitely add kdtree_flann.hpp here
+// We need Histogram<2> to function, so we'll explicitly add kdtree_flann.hpp here
#include <pcl/kdtree/impl/kdtree_flann.hpp>
//(pcl::Histogram<2>)
{
IterativeClosestPoint<PointXYZ, PointXYZ> reg;
PointCloud<PointXYZ>::ConstPtr source (cloud_source.makeShared ());
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- reg.setInputCloud (source); // test for PCL_DEPRECATED
- source = reg.getInputCloud (); // test for PCL_DEPRECATED
-#pragma GCC diagnostic pop
reg.setInputSource (source);
reg.setInputTarget (cloud_target.makeShared ());
reg.setMaximumIterations (50);
reg.setMaxCorrespondenceDistance (0.25); // Making sure the correspondence distance > the max translation
// Add a median distance rejector
pcl::registration::CorrespondenceRejectorMedianDistance::Ptr rej_med (new pcl::registration::CorrespondenceRejectorMedianDistance);
- rej_med->setMedianFactor (4.0);
+ rej_med->setMedianFactor (8.0);
reg.addCorrespondenceRejector (rej_med);
// Also add a SaC rejector
pcl::registration::CorrespondenceRejectorSampleConsensus<PointXYZ>::Ptr rej_samp (new pcl::registration::CorrespondenceRejectorSampleConsensus<PointXYZ>);
// re-do correspondence estimation
boost::shared_ptr<pcl::Correspondences> correspondences (new pcl::Correspondences);
pcl::registration::CorrespondenceEstimation<PointXYZ, PointXYZ> corr_est;
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- corr_est.setInputCloud (source); // test for PCL_DEPRECATED
- source = corr_est.getInputCloud (); // test for PCL_DEPRECATED
-#pragma GCC diagnostic pop
corr_est.setInputSource (source);
corr_est.setInputTarget (target);
corr_est.determineCorrespondences (*correspondences);
boost::shared_ptr<pcl::Correspondences> correspondences_result_rej_sac (new pcl::Correspondences);
pcl::registration::CorrespondenceRejectorSampleConsensus<PointXYZ> corr_rej_sac;
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- corr_rej_sac.setInputCloud (source); // test for PCL_DEPRECATED
- source = corr_rej_sac.getInputCloud (); // test for PCL_DEPRECATED
-#pragma GCC diagnostic pop
corr_rej_sac.setInputSource (source);
corr_rej_sac.setInputTarget (target);
corr_rej_sac.setInlierThreshold (rej_sac_max_dist);
const Eigen::Quaternionf R_LM_1_float (T_LM_float.topLeftCorner <3, 3> ());
const Eigen::Translation3f t_LM_1_float (T_LM_float.topRightCorner <3, 1> ());
- EXPECT_NEAR (R_LM_1_float.x (), R_ref.x (), 1e-4f);
- EXPECT_NEAR (R_LM_1_float.y (), R_ref.y (), 1e-4f);
- EXPECT_NEAR (R_LM_1_float.z (), R_ref.z (), 1e-4f);
- EXPECT_NEAR (R_LM_1_float.w (), R_ref.w (), 1e-4f);
+ EXPECT_NEAR (R_LM_1_float.x (), R_ref.x (), 1e-3f);
+ EXPECT_NEAR (R_LM_1_float.y (), R_ref.y (), 1e-3f);
+ EXPECT_NEAR (R_LM_1_float.z (), R_ref.z (), 1e-3f);
+ EXPECT_NEAR (R_LM_1_float.w (), R_ref.w (), 1e-3f);
EXPECT_NEAR (t_LM_1_float.x (), t_ref.x (), 1e-3f);
EXPECT_NEAR (t_LM_1_float.y (), t_ref.y (), 1e-3f);
cloud.points[idx].z = 0.0;
}
+#if defined(DEBUG) || defined(_DEBUG)
+ boost::posix_time::time_duration delay (0, 0, 15, 0);
+#else
boost::posix_time::time_duration delay (0, 0, 1, 0);
+#endif
+
boost::function<bool ()> sac_function;
SampleConsensusModelSpherePtr model (new SampleConsensusModelSphere<PointXYZ> (cloud.makeShared ()));
#include <pcl/pcl_tests.h>
+#include <pcl/common/common.h>
#include <pcl/sample_consensus/ransac.h>
#include <pcl/sample_consensus/sac_model_line.h>
#include <pcl/sample_consensus/sac_model_parallel_line.h>
cloud.points[15].getVector3fMap () << -1.05f, 5.01f, 5.0f;
// Create a shared line model pointer directly
+ const double eps = 0.1; //angle eps in radians
+ const Eigen::Vector3f axis (0, 0, 1);
SampleConsensusModelParallelLinePtr model (new SampleConsensusModelParallelLine<PointXYZ> (cloud.makeShared ()));
- model->setAxis (Eigen::Vector3f (0, 0, 1));
- model->setEpsAngle (0.1);
+ model->setAxis (axis);
+ model->setEpsAngle (eps);
// Create the RANSAC object
RandomSampleConsensus<PointXYZ> sac (model, 0.1);
Eigen::VectorXf coeff;
sac.getModelCoefficients (coeff);
EXPECT_EQ (6, coeff.size ());
- EXPECT_NEAR (0, coeff[3], 1e-4);
- EXPECT_NEAR (0, coeff[4], 1e-4);
- EXPECT_NEAR (1, coeff[5], 1e-4);
+
+ // Make sure the returned direction respects the angular constraint
+ double angle_diff = getAngle3D (axis, coeff.tail<3> ());
+ angle_diff = std::min (angle_diff, M_PI - angle_diff);
+ EXPECT_GT (eps, angle_diff);
// Projection tests
PointCloud<PointXYZ> proj_points;
vector<int> unorganized_sparse_cloud_query_indices;
vector<int> organized_sparse_query_indices;
-/** \briet test whether the result of a search containes unique point ids or not
+/** \briet test whether the result of a search contains unique point ids or not
* @param indices resulting indices from a search
* @param name name of the search method that returned these distances
* @return true if indices are unique, false otherwise
EXPECT_NEAR (sphere_coefficients->values[2], 1.24558, 1e-2);
EXPECT_NEAR (sphere_coefficients->values[3], 0.0536238, 1e-2);
- EXPECT_NEAR (static_cast<int> (inliers->indices.size ()), 3516, 10);
+ EXPECT_NEAR (static_cast<int> (inliers->indices.size ()), 3516, 15);
}
//* ---[ */
PointCloud<PointXYZ> hull_3d;
concave_hull_3d.reconstruct (hull_3d);
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- EXPECT_EQ (concave_hull_3d.getDim (), 3); // test for PCL_DEPRECATED
-#pragma GCC diagnostic pop
EXPECT_EQ (concave_hull_3d.getDimension (), 3);
ModelCoefficients::Ptr plane_coefficients (new ModelCoefficients ());
//tex_material.tex_Ns = 0.0f;
//tex_material.tex_illum = 2;
- //// set texture material paramaters
+ //// set texture material parameters
//tm.setTextureMaterials(tex_material);
//// set texture files
std::vector<Vertices> vertices;
hoppe.reconstruct (points, vertices);
- EXPECT_NEAR (points.points[points.size()/2].x, -0.042528, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].y, 0.080196, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].z, 0.043159, 1e-3);
- EXPECT_EQ (vertices[vertices.size ()/2].vertices[0], 10854);
- EXPECT_EQ (vertices[vertices.size ()/2].vertices[1], 10855);
- EXPECT_EQ (vertices[vertices.size ()/2].vertices[2], 10856);
+ EXPECT_NEAR (points.points[points.size()/2].x, -0.037143, 1e-3);
+ EXPECT_NEAR (points.points[points.size()/2].y, 0.098213, 1e-3);
+ EXPECT_NEAR (points.points[points.size()/2].z, -0.044911, 1e-3);
+ EXPECT_EQ (vertices[vertices.size ()/2].vertices[0], 11202);
+ EXPECT_EQ (vertices[vertices.size ()/2].vertices[1], 11203);
+ EXPECT_EQ (vertices[vertices.size ()/2].vertices[2], 11204);
MarchingCubesRBF<PointNormal> rbf;
rbf.setOffSurfaceDisplacement (0.02f);
rbf.reconstruct (points, vertices);
- EXPECT_NEAR (points.points[points.size()/2].x, -0.033919, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].y, 0.151683, 1e-3);
- EXPECT_NEAR (points.points[points.size()/2].z, -0.000086, 1e-3);
- EXPECT_EQ (vertices[vertices.size ()/2].vertices[0], 4284);
- EXPECT_EQ (vertices[vertices.size ()/2].vertices[1], 4285);
- EXPECT_EQ (vertices[vertices.size ()/2].vertices[2], 4286);
+ EXPECT_NEAR (points.points[points.size()/2].x, -0.025630, 1e-3);
+ EXPECT_NEAR (points.points[points.size()/2].y, 0.135228, 1e-3);
+ EXPECT_NEAR (points.points[points.size()/2].z, 0.035766, 1e-3);
+ EXPECT_EQ (vertices[vertices.size ()/2].vertices[0], 4275);
+ EXPECT_EQ (vertices[vertices.size ()/2].vertices[1], 4276);
+ EXPECT_EQ (vertices[vertices.size ()/2].vertices[2], 4277);
}
// Set parameters
mls.setInputCloud (cloud);
mls.setComputeNormals (true);
- mls.setPolynomialFit (true);
+ mls.setPolynomialOrder (2);
mls.setSearchMethod (tree);
mls.setSearchRadius (0.03);
MovingLeastSquaresOMP<PointXYZ, PointNormal> mls_omp;
mls_omp.setInputCloud (cloud);
mls_omp.setComputeNormals (true);
- mls_omp.setPolynomialFit (true);
+ mls_omp.setPolynomialOrder (2);
mls_omp.setSearchMethod (tree);
mls_omp.setSearchRadius (0.03);
mls_omp.setNumberOfThreads (4);
// Set parameters
mls_upsampling.setInputCloud (cloud);
mls_upsampling.setComputeNormals (true);
- mls_upsampling.setPolynomialFit (true);
+ mls_upsampling.setPolynomialOrder (2);
mls_upsampling.setSearchMethod (tree);
mls_upsampling.setSearchRadius (0.03);
mls_upsampling.setUpsamplingMethod (MovingLeastSquares<PointXYZ, PointNormal>::SAMPLE_LOCAL_PLANE);
// cerr << "reset camera viewer pose:" << endl << viewer_pose << endl;
}
+////////////////////////////////////////////////////////////////////////////////
+TEST (PCL, PCLVisualizer_getPointCloudRenderingProperties)
+{
+ PCLVisualizer visualizer;
-
+ std::string cloud_id = "input_cloud";
+ visualizer.addPointCloud (cloud, cloud_id);
+ ASSERT_TRUE (visualizer.setPointCloudRenderingProperties (PCL_VISUALIZER_COLOR,
+ 1., 0., 0., cloud_id));
+ double r, g, b;
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_POINT_SIZE,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_OPACITY,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_LINE_WIDTH,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_FONT_SIZE,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_REPRESENTATION,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_IMMEDIATE_RENDERING,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_SHADING,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_LUT,
+ r, g, b, cloud_id));
+ EXPECT_FALSE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_LUT_RANGE,
+ r, g, b, cloud_id));
+
+ r = 666.;
+ g = 666.;
+ b = 666.;
+ EXPECT_TRUE (visualizer.getPointCloudRenderingProperties (PCL_VISUALIZER_COLOR,
+ r, g, b, cloud_id));
+
+ EXPECT_EQ (r, 1.);
+ EXPECT_EQ (g, 0.);
+ EXPECT_EQ (b, 0.);
+}
/* ---[ */
int
set (SUBSYS_NAME tools)
set (SUBSYS_DESC "Useful PCL-based command line tools")
set (SUBSYS_DEPS common io filters sample_consensus segmentation search kdtree features surface octree registration recognition geometry keypoints ml)
+set (SUBSYS_OPT_DEPS visualization)
set (DEFAULT ON)
set (REASON "")
PCL_SUBSYS_OPTION (BUILD_tools "${SUBSYS_NAME}" "${SUBSYS_DESC}" ${DEFAULT} "${REASON}")
-PCL_SUBSYS_DEPEND (BUILD_tools "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS})
+PCL_SUBSYS_DEPEND (BUILD_tools "${SUBSYS_NAME}" DEPS ${SUBSYS_DEPS} OPT_DEPS ${SUBSYS_OPT_DEPS})
if (BUILD_tools)
if(BUILD_visualization)
- PCL_SUBSYS_DEPEND(BUILD_tools "${SUBSYS_NAME}" DEPS visualization)
-
PCL_ADD_EXECUTABLE(pcl_obj_rec_ransac_model_opps "${SUBSYS_NAME}" obj_rec_ransac_model_opps.cpp)
target_link_libraries(pcl_obj_rec_ransac_model_opps pcl_common pcl_visualization pcl_recognition)
int
main (int argc, char** argv)
{
- print_info ("Convert a PCD file to PLY format. For more information, use: %s -h\n", argv[0]);
+ print_info ("Convert a PCD file to a de-meaned PCD file. For more information, use: %s -h\n", argv[0]);
if (argc < 3)
{
else if (feature_name == "VFHEstimation")
computeFeatureViaNormals< VFHEstimation<PointXYZ, Normal, VFHSignature308>, PointXYZ, Normal, VFHSignature308>
(cloud, output, argc, argv, false);
+ else
+ {
+ print_error ("Valid feature names are PFHEstimation, FPFHEstimation, VFHEstimation.\n");
+ return (-1);
+ }
// Save into the second file
saveCloud (argv[p_file_indices[1]], output);
double loopDist = 5.0;
pcl::console::parse_argument (argc, argv, "-D", loopDist);
- int loopCount = 20;
+ unsigned int loopCount = 20;
pcl::console::parse_argument (argc, argv, "-c", loopCount);
pcl::registration::LUM<PointType> lum;
{
print_error ("Syntax is: %s input.{ply,obj} output.pcd <options>\n", argv[0]);
print_info (" where options are:\n");
- print_info (" -level X = tesselated sphere level (default: ");
+ print_info (" -level X = tessellated sphere level (default: ");
print_value ("%d", default_tesselated_sphere_level);
print_info (")\n");
print_info (" -resolution X = the sphere resolution in angle increments (default: ");
inline void
randomPointTriangle (float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3,
- Eigen::Vector4f& p)
+ float r1, float r2, Eigen::Vector3f& p)
{
- float r1 = static_cast<float> (uniform_deviate (rand ()));
- float r2 = static_cast<float> (uniform_deviate (rand ()));
float r1sqr = std::sqrt (r1);
float OneMinR1Sqr = (1 - r1sqr);
float OneMinR2 = (1 - r2);
p[0] = c1;
p[1] = c2;
p[2] = c3;
- p[3] = 0;
}
inline void
-randPSurface (vtkPolyData * polydata, std::vector<double> * cumulativeAreas, double totalArea, Eigen::Vector4f& p, bool calcNormal, Eigen::Vector3f& n)
+randPSurface (vtkPolyData * polydata, std::vector<double> * cumulativeAreas, double totalArea, Eigen::Vector3f& p, bool calcNormal, Eigen::Vector3f& n, bool calcColor, Eigen::Vector3f& c)
{
float r = static_cast<float> (uniform_deviate (rand ()) * totalArea);
n = v1.cross (v2);
n.normalize ();
}
+ float r1 = static_cast<float> (uniform_deviate (rand ()));
+ float r2 = static_cast<float> (uniform_deviate (rand ()));
randomPointTriangle (float (A[0]), float (A[1]), float (A[2]),
float (B[0]), float (B[1]), float (B[2]),
- float (C[0]), float (C[1]), float (C[2]), p);
+ float (C[0]), float (C[1]), float (C[2]), r1, r2, p);
+
+ if (calcColor)
+ {
+ vtkUnsignedCharArray *const colors = vtkUnsignedCharArray::SafeDownCast (polydata->GetPointData ()->GetScalars ());
+ if (colors && colors->GetNumberOfComponents () == 3)
+ {
+ double cA[3], cB[3], cC[3];
+ colors->GetTuple (ptIds[0], cA);
+ colors->GetTuple (ptIds[1], cB);
+ colors->GetTuple (ptIds[2], cC);
+
+ randomPointTriangle (float (cA[0]), float (cA[1]), float (cA[2]),
+ float (cB[0]), float (cB[1]), float (cB[2]),
+ float (cC[0]), float (cC[1]), float (cC[2]), r1, r2, c);
+ }
+ else
+ {
+ static bool printed_once = false;
+ if (!printed_once)
+ PCL_WARN ("Mesh has no vertex colors, or vertex colors are not RGB!");
+ printed_once = true;
+ }
+ }
}
void
-uniform_sampling (vtkSmartPointer<vtkPolyData> polydata, size_t n_samples, bool calc_normal, pcl::PointCloud<pcl::PointNormal> & cloud_out)
+uniform_sampling (vtkSmartPointer<vtkPolyData> polydata, size_t n_samples, bool calc_normal, bool calc_color, pcl::PointCloud<pcl::PointXYZRGBNormal> & cloud_out)
{
polydata->BuildCells ();
vtkSmartPointer<vtkCellArray> cells = polydata->GetPolys ();
for (i = 0; i < n_samples; i++)
{
- Eigen::Vector4f p;
+ Eigen::Vector3f p;
Eigen::Vector3f n;
- randPSurface (polydata, &cumulativeAreas, totalArea, p, calc_normal, n);
+ Eigen::Vector3f c;
+ randPSurface (polydata, &cumulativeAreas, totalArea, p, calc_normal, n, calc_color, c);
cloud_out.points[i].x = p[0];
cloud_out.points[i].y = p[1];
cloud_out.points[i].z = p[2];
cloud_out.points[i].normal_y = n[1];
cloud_out.points[i].normal_z = n[2];
}
+ if (calc_color)
+ {
+ cloud_out.points[i].r = static_cast<uint8_t>(c[0]);
+ cloud_out.points[i].g = static_cast<uint8_t>(c[1]);
+ cloud_out.points[i].b = static_cast<uint8_t>(c[2]);
+ }
}
}
print_value ("%f", default_leaf_size);
print_info (" m)\n");
print_info (" -write_normals = flag to write normals to the output pcd\n");
+ print_info (" -write_colors = flag to write colors to the output pcd\n");
print_info (
" -no_vis_result = flag to stop visualizing the generated pcd\n");
}
parse_argument (argc, argv, "-leaf_size", leaf_size);
bool vis_result = ! find_switch (argc, argv, "-no_vis_result");
const bool write_normals = find_switch (argc, argv, "-write_normals");
+ const bool write_colors = find_switch (argc, argv, "-write_colors");
// Parse the command line arguments for .ply and PCD files
std::vector<int> pcd_file_indices = parse_file_extension_argument (argc, argv, ".pcd");
vis.spin ();
}
- pcl::PointCloud<pcl::PointNormal>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointNormal>);
- uniform_sampling (polydata1, SAMPLE_POINTS_, write_normals, *cloud_1);
+ pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointXYZRGBNormal>);
+ uniform_sampling (polydata1, SAMPLE_POINTS_, write_normals, write_colors, *cloud_1);
if (INTER_VIS)
{
visualization::PCLVisualizer vis_sampled;
- vis_sampled.addPointCloud<pcl::PointNormal> (cloud_1);
+ vis_sampled.addPointCloud<pcl::PointXYZRGBNormal> (cloud_1);
if (write_normals)
- vis_sampled.addPointCloudNormals<pcl::PointNormal> (cloud_1, 1, 0.02f, "cloud_normals");
+ vis_sampled.addPointCloudNormals<pcl::PointXYZRGBNormal> (cloud_1, 1, 0.02f, "cloud_normals");
vis_sampled.spin ();
}
// Voxelgrid
- VoxelGrid<PointNormal> grid_;
+ VoxelGrid<PointXYZRGBNormal> grid_;
grid_.setInputCloud (cloud_1);
grid_.setLeafSize (leaf_size, leaf_size, leaf_size);
- pcl::PointCloud<pcl::PointNormal>::Ptr voxel_cloud (new pcl::PointCloud<pcl::PointNormal>);
+ pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr voxel_cloud (new pcl::PointCloud<pcl::PointXYZRGBNormal>);
grid_.filter (*voxel_cloud);
if (vis_result)
{
visualization::PCLVisualizer vis3 ("VOXELIZED SAMPLES CLOUD");
- vis3.addPointCloud<pcl::PointNormal> (voxel_cloud);
+ vis3.addPointCloud<pcl::PointXYZRGBNormal> (voxel_cloud);
if (write_normals)
- vis3.addPointCloudNormals<pcl::PointNormal> (voxel_cloud, 1, 0.02f, "cloud_normals");
+ vis3.addPointCloudNormals<pcl::PointXYZRGBNormal> (voxel_cloud, 1, 0.02f, "cloud_normals");
vis3.spin ();
}
- if (!write_normals)
+ if (write_normals && write_colors)
{
- pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_xyz (new pcl::PointCloud<pcl::PointXYZ>);
+ savePCDFileASCII (argv[pcd_file_indices[0]], *voxel_cloud);
+ }
+ else if (write_normals)
+ {
+ pcl::PointCloud<pcl::PointNormal>::Ptr cloud_xyzn (new pcl::PointCloud<pcl::PointNormal>);
+ // Strip uninitialized colors from cloud:
+ pcl::copyPointCloud (*voxel_cloud, *cloud_xyzn);
+ savePCDFileASCII (argv[pcd_file_indices[0]], *cloud_xyzn);
+ }
+ else if (write_colors)
+ {
+ pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_xyzrgb (new pcl::PointCloud<pcl::PointXYZRGB>);
// Strip uninitialized normals from cloud:
- pcl::copyPointCloud (*voxel_cloud, *cloud_xyz);
- savePCDFileASCII (argv[pcd_file_indices[0]], *cloud_xyz);
+ pcl::copyPointCloud (*voxel_cloud, *cloud_xyzrgb);
+ savePCDFileASCII (argv[pcd_file_indices[0]], *cloud_xyzrgb);
}
- else
+ else // !write_normals && !write_colors
{
- savePCDFileASCII (argv[pcd_file_indices[0]], *voxel_cloud);
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_xyz (new pcl::PointCloud<pcl::PointXYZ>);
+ // Strip uninitialized normals and colors from cloud:
+ pcl::copyPointCloud (*voxel_cloud, *cloud_xyz);
+ savePCDFileASCII (argv[pcd_file_indices[0]], *cloud_xyz);
}
}
using namespace pcl::console;
int default_polynomial_order = 2;
-bool default_use_polynomial_fit = false;
double default_search_radius = 0.0,
default_sqr_gauss_param = 0.0;
print_value ("%f", default_search_radius); print_info (")\n");
print_info (" -sqr_gauss_param X = parameter used for the distance based weighting of neighbors (recommended = search_radius^2) (default: ");
print_value ("%f", default_sqr_gauss_param); print_info (")\n");
- print_info (" -use_polynomial_fit X = decides whether the surface and normal are approximated using a polynomial or only via tangent estimation (default: ");
- print_value ("%d", default_use_polynomial_fit); print_info (")\n");
- print_info (" -polynomial_order X = order of the polynomial to be fit (implicitly, use_polynomial_fit = 1) (default: ");
+ print_info (" -polynomial_order X = order of the polynomial to be fit (polynomial_order > 1, indicates using a polynomial fit) (default: ");
print_value ("%d", default_polynomial_order); print_info (")\n");
}
}
void
-compute (const pcl::PCLPointCloud2::ConstPtr &input, pcl::PCLPointCloud2 &output,
- double search_radius, bool sqr_gauss_param_set, double sqr_gauss_param,
- bool use_polynomial_fit, int polynomial_order)
+compute (const pcl::PCLPointCloud2::ConstPtr &input,
+ pcl::PCLPointCloud2 &output,
+ double search_radius,
+ bool sqr_gauss_param_set,
+ double sqr_gauss_param,
+ int polynomial_order)
{
PointCloud<PointXYZ>::Ptr xyz_cloud_pre (new pcl::PointCloud<PointXYZ> ()),
mls.setInputCloud (xyz_cloud);
mls.setSearchRadius (search_radius);
if (sqr_gauss_param_set) mls.setSqrGaussParam (sqr_gauss_param);
- mls.setPolynomialFit (use_polynomial_fit);
mls.setPolynomialOrder (polynomial_order);
// mls.setUpsamplingMethod (MovingLeastSquares<PointXYZ, PointNormal>::SAMPLE_LOCAL_PLANE);
mls.setSearchMethod (tree);
mls.setComputeNormals (true);
- PCL_INFO ("Computing smoothed surface and normals with search_radius %f , sqr_gaussian_param %f, polynomial fitting %d, polynomial order %d\n",
- mls.getSearchRadius(), mls.getSqrGaussParam(), mls.getPolynomialFit(), mls.getPolynomialOrder());
+ PCL_INFO ("Computing smoothed surface and normals with search_radius %f , sqr_gaussian_param %f, polynomial order %d\n",
+ mls.getSearchRadius(), mls.getSqrGaussParam(), mls.getPolynomialOrder());
TicToc tt;
tt.tic ();
mls.process (*xyz_cloud_smoothed);
double sqr_gauss_param = default_sqr_gauss_param;
bool sqr_gauss_param_set = true;
int polynomial_order = default_polynomial_order;
- bool use_polynomial_fit = default_use_polynomial_fit;
parse_argument (argc, argv, "-radius", search_radius);
+ parse_argument (argc, argv, "-polynomial_order", polynomial_order);
if (parse_argument (argc, argv, "-sqr_gauss_param", sqr_gauss_param) == -1)
sqr_gauss_param_set = false;
- if (parse_argument (argc, argv, "-polynomial_order", polynomial_order) != -1 )
- use_polynomial_fit = true;
- parse_argument (argc, argv, "-use_polynomial_fit", use_polynomial_fit);
// Load the first file
pcl::PCLPointCloud2::Ptr cloud (new pcl::PCLPointCloud2);
// Do the smoothing
pcl::PCLPointCloud2 output;
- compute (cloud, output, search_radius, sqr_gauss_param_set, sqr_gauss_param,
- use_polynomial_fit, polynomial_order);
+ compute (cloud, output, search_radius, sqr_gauss_param_set, sqr_gauss_param, polynomial_order);
// Save into the second file
saveCloud (argv[p_file_indices[1]], output);
* \author Raphael Favier
* */
-#include <pcl/io/pcd_io.h>
+#include <pcl/io/auto_io.h>
#include <pcl/common/time.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/point_cloud_handlers.h>
#include <pcl/visualization/common/common.h>
#include <pcl/octree/octree_pointcloud_voxelcentroid.h>
+#include <pcl/common/centroid.h>
#include <pcl/filters/filter.h>
#include "boost.h"
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
#include <vtkCubeSource.h>
-//=============================
-// Displaying cubes is very long!
-// so we limit their numbers.
- const int MAX_DISPLAYED_CUBES(15000);
-//=============================
+#include <vtkCleanPolyData.h>
class OctreeViewer
{
public:
OctreeViewer (std::string &filename, double resolution) :
- viz ("Octree visualizator"), cloud (new pcl::PointCloud<pcl::PointXYZ>()),
- displayCloud (new pcl::PointCloud<pcl::PointXYZ>()), octree (resolution), displayCubes(false),
- showPointsWithCubes (false), wireframe (true)
+ viz ("Octree visualizator"),
+ cloud (new pcl::PointCloud<pcl::PointXYZ>()),
+ displayCloud (new pcl::PointCloud<pcl::PointXYZ>()),
+ cloudVoxel (new pcl::PointCloud<pcl::PointXYZ>()),
+ octree (resolution),
+ wireframe (true),
+ show_cubes_ (true),
+ show_centroids_ (false),
+ show_original_points_ (false),
+ point_size_ (1.0)
{
//try to load the cloud
viz.registerKeyboardCallback(&OctreeViewer::keyboardEventOccurred, *this, 0);
//key legends
- viz.addText("Keys:", 0, 170, 0.0, 1.0, 0.0, "keys_t");
- viz.addText("a -> Increment displayed depth", 10, 155, 0.0, 1.0, 0.0, "key_a_t");
- viz.addText("z -> Decrement displayed depth", 10, 140, 0.0, 1.0, 0.0, "key_z_t");
- viz.addText("d -> Toggle Point/Cube representation", 10, 125, 0.0, 1.0, 0.0, "key_d_t");
- viz.addText("x -> Show/Hide original cloud", 10, 110, 0.0, 1.0, 0.0, "key_x_t");
- viz.addText("s/w -> Surface/Wireframe representation", 10, 95, 0.0, 1.0, 0.0, "key_sw_t");
+ viz.addText ("Keys:", 0, 170, 0.0, 1.0, 0.0, "keys_t");
+ viz.addText ("a -> Increment displayed depth", 10, 155, 0.0, 1.0, 0.0, "key_a_t");
+ viz.addText ("z -> Decrement displayed depth", 10, 140, 0.0, 1.0, 0.0, "key_z_t");
+ viz.addText ("v -> Toggle octree cubes representation", 10, 125, 0.0, 1.0, 0.0, "key_v_t");
+ viz.addText ("b -> Toggle centroid points representation", 10, 110, 0.0, 1.0, 0.0, "key_b_t");
+ viz.addText ("n -> Toggle original point cloud representation", 10, 95, 0.0, 1.0, 0.0, "key_n_t");
//set current level to half the maximum one
displayedDepth = static_cast<int> (floor (octree.getTreeDepth() / 2.0));
if (displayedDepth == 0)
displayedDepth = 1;
+ // assign point cloud to octree
+ octree.setInputCloud (cloud);
+
+ // add points from cloud to octree
+ octree.addPointsFromInputCloud ();
+
//show octree at default depth
extractPointsAtLevel(displayedDepth);
//========================================================
//visualizer
pcl::PointCloud<pcl::PointXYZ>::Ptr xyz;
- pcl::PointCloud<pcl::PointXYZRGB>::Ptr xyz_rgb;
pcl::visualization::PCLVisualizer viz;
//original cloud
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud;
//displayed_cloud
pcl::PointCloud<pcl::PointXYZ>::Ptr displayCloud;
+ // cloud which contains the voxel center
+ pcl::PointCloud<pcl::PointXYZ>::Ptr cloudVoxel;
//octree
pcl::octree::OctreePointCloudVoxelCentroid<pcl::PointXYZ> octree;
//level
int displayedDepth;
- //bool to decide if we display points or cubes
- bool displayCubes, showPointsWithCubes, wireframe;
+ //bool to decide what should be display
+ bool wireframe;
+ bool show_cubes_, show_centroids_, show_original_points_;
+ float point_size_;
//========================================================
/* \brief Callback to interact with the keyboard
*
*/
- void keyboardEventOccurred(const pcl::visualization::KeyboardEvent &event, void *)
+ void keyboardEventOccurred (const pcl::visualization::KeyboardEvent &event, void *)
{
- if (event.getKeySym() == "a" && event.keyDown())
+ if (event.getKeySym () == "a" && event.keyDown ())
+ {
+ IncrementLevel ();
+ }
+ else if (event.getKeySym () == "z" && event.keyDown ())
+ {
+ DecrementLevel ();
+ }
+ else if (event.getKeySym () == "v" && event.keyDown ())
{
- IncrementLevel();
+ show_cubes_ = !show_cubes_;
+ update ();
}
- else if (event.getKeySym() == "z" && event.keyDown())
+ else if (event.getKeySym () == "b" && event.keyDown ())
{
- DecrementLevel();
+ show_centroids_ = !show_centroids_;
+ update ();
}
- else if (event.getKeySym() == "d" && event.keyDown())
+ else if (event.getKeySym () == "n" && event.keyDown ())
{
- displayCubes = !displayCubes;
- update();
+ show_original_points_ = !show_original_points_;
+ update ();
}
- else if (event.getKeySym() == "x" && event.keyDown())
+ else if (event.getKeySym () == "w" && event.keyDown ())
{
- showPointsWithCubes = !showPointsWithCubes;
- update();
+ if (!wireframe)
+ wireframe = true;
+ update ();
}
- else if (event.getKeySym() == "w" && event.keyDown())
+ else if (event.getKeySym () == "s" && event.keyDown ())
{
- if(!wireframe)
- wireframe=true;
- update();
+ if (wireframe)
+ wireframe = false;
+ update ();
}
- else if (event.getKeySym() == "s" && event.keyDown())
+ else if ((event.getKeyCode () == '-') && event.keyDown ())
{
- if(wireframe)
- wireframe=false;
- update();
+ point_size_ = std::max(1.0f, point_size_ * (1 / 2.0f));
+ update ();
+ }
+ else if ((event.getKeyCode () == '+') && event.keyDown ())
+ {
+ point_size_ *= 2.0f;
+ update ();
}
}
{
std::cout << "Loading file " << filename.c_str() << std::endl;
//read cloud
- if (pcl::io::loadPCDFile(filename, *cloud))
+ if (pcl::io::load (filename, *cloud))
{
- std::cerr << "ERROR: Cannot open file " << filename << "! Aborting..." << std::endl;
return false;
}
/* \brief Helper function that draw info for the user on the viewer
*
*/
- void showLegend(bool showCubes)
+ void showLegend ()
{
char dataDisplay[256];
- sprintf(dataDisplay, "Displaying data as %s", (showCubes) ? ("CUBES") : ("POINTS"));
- viz.removeShape("disp_t");
- viz.addText(dataDisplay, 0, 60, 1.0, 0.0, 0.0, "disp_t");
+ sprintf (dataDisplay, "Displaying octree cubes: %s", (show_cubes_) ? ("True") : ("False"));
+ viz.removeShape ("disp_octree_cubes");
+ viz.addText (dataDisplay, 0, 75, 1.0, 0.0, 0.0, "disp_octree_cubes");
+
+ sprintf (dataDisplay, "Displaying centroids voxel: %s", (show_centroids_) ? ("True") : ("False"));
+ viz.removeShape ("disp_centroids_voxel");
+ viz.addText (dataDisplay, 0, 60, 1.0, 0.0, 0.0, "disp_centroids_voxel");
+
+ sprintf (dataDisplay, "Displaying original point cloud: %s", (show_original_points_) ? ("True") : ("False"));
+ viz.removeShape ("disp_original_points");
+ viz.addText (dataDisplay, 0, 45, 1.0, 0.0, 0.0, "disp_original_points");
char level[256];
- sprintf(level, "Displayed depth is %d on %d", displayedDepth, octree.getTreeDepth());
- viz.removeShape("level_t1");
- viz.addText(level, 0, 45, 1.0, 0.0, 0.0, "level_t1");
-
- viz.removeShape("level_t2");
- sprintf(level, "Voxel size: %.4fm [%lu voxels]", sqrt(octree.getVoxelSquaredSideLen(displayedDepth)),
- displayCloud->points.size());
- viz.addText(level, 0, 30, 1.0, 0.0, 0.0, "level_t2");
-
- viz.removeShape("org_t");
- if (showPointsWithCubes)
- viz.addText("Displaying original cloud", 0, 15, 1.0, 0.0, 0.0, "org_t");
+ sprintf (level, "Displayed depth is %d on %d", displayedDepth, octree.getTreeDepth());
+ viz.removeShape ("level_t1");
+ viz.addText (level, 0, 30, 1.0, 0.0, 0.0, "level_t1");
+
+ viz.removeShape ("level_t2");
+ sprintf (level, "Voxel size: %.4fm [%lu voxels]", std::sqrt (octree.getVoxelSquaredSideLen (displayedDepth)),
+ cloudVoxel->points.size ());
+ viz.addText (level, 0, 15, 1.0, 0.0, 0.0, "level_t2");
}
/* \brief Visual update. Create visualizations and add them to the viewer
void update()
{
//remove existing shapes from visualizer
- clearView();
-
- //prevent the display of too many cubes
- bool displayCubeLegend = displayCubes && static_cast<int> (displayCloud->points.size ()) <= MAX_DISPLAYED_CUBES;
+ clearView ();
- showLegend(displayCubeLegend);
+ showLegend ();
- if (displayCubeLegend)
+ if (show_cubes_)
{
//show octree as cubes
- showCubes(sqrt(octree.getVoxelSquaredSideLen(displayedDepth)));
- if (showPointsWithCubes)
- {
- //add original cloud in visualizer
- pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZ> color_handler(cloud, "z");
- viz.addPointCloud(cloud, color_handler, "cloud");
- }
+ showCubes (std::sqrt (octree.getVoxelSquaredSideLen (displayedDepth)));
}
- else
+
+ if (show_centroids_)
+ {
+ //show centroid points
+ pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZ> color_handler (cloudVoxel, "x");
+ viz.addPointCloud (cloudVoxel, color_handler, "cloud_centroid");
+ viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, point_size_, "cloud_centroid");
+ }
+
+ if (show_original_points_)
{
- //add current cloud in visualizer
- pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZ> color_handler(displayCloud,"z");
- viz.addPointCloud(displayCloud, color_handler, "cloud");
+ //show origin point cloud
+ pcl::visualization::PointCloudColorHandlerGenericField<pcl::PointXYZ> color_handler (cloud, "z");
+ viz.addPointCloud (cloud, color_handler, "cloud");
+ viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, point_size_, "cloud");
}
}
void clearView()
{
//remove cubes if any
- vtkRenderer *renderer = viz.getRenderWindow()->GetRenderers()->GetFirstRenderer();
- while (renderer->GetActors()->GetNumberOfItems() > 0)
- renderer->RemoveActor(renderer->GetActors()->GetLastActor());
+ vtkRenderer *renderer = viz.getRenderWindow ()->GetRenderers ()->GetFirstRenderer ();
+ while (renderer->GetActors ()->GetNumberOfItems () > 0)
+ renderer->RemoveActor (renderer->GetActors ()->GetLastActor ());
//remove point clouds if any
- viz.removePointCloud("cloud");
+ viz.removePointCloud ("cloud");
+ viz.removePointCloud ("cloud_centroid");
}
- /* \brief Create a vtkSmartPointer object containing a cube
- *
- */
- vtkSmartPointer<vtkPolyData> GetCuboid(double minX, double maxX, double minY, double maxY, double minZ, double maxZ)
- {
- vtkSmartPointer<vtkCubeSource> cube = vtkSmartPointer<vtkCubeSource>::New();
- cube->SetBounds(minX, maxX, minY, maxY, minZ, maxZ);
- return cube->GetOutput();
- }
/* \brief display octree cubes via vtk-functions
*
*/
void showCubes(double voxelSideLen)
{
- //get the renderer of the visualizer object
- vtkRenderer *renderer = viz.getRenderWindow()->GetRenderers()->GetFirstRenderer();
+ vtkSmartPointer<vtkAppendPolyData> appendFilter = vtkSmartPointer<vtkAppendPolyData>::New ();
- vtkSmartPointer<vtkAppendPolyData> treeWireframe = vtkSmartPointer<vtkAppendPolyData>::New();
- size_t i;
+ // Create every cubes to be displayed
double s = voxelSideLen / 2.0;
- for (i = 0; i < displayCloud->points.size(); i++)
+ for (size_t i = 0; i < cloudVoxel->points.size (); i++)
{
+ double x = cloudVoxel->points[i].x;
+ double y = cloudVoxel->points[i].y;
+ double z = cloudVoxel->points[i].z;
+
+ vtkSmartPointer<vtkCubeSource> wk_cubeSource = vtkSmartPointer<vtkCubeSource>::New ();
- double x = displayCloud->points[i].x;
- double y = displayCloud->points[i].y;
- double z = displayCloud->points[i].z;
+ wk_cubeSource->SetBounds (x - s, x + s, y - s, y + s, z - s, z + s);
+ wk_cubeSource->Update ();
#if VTK_MAJOR_VERSION < 6
- treeWireframe->AddInput(GetCuboid(x - s, x + s, y - s, y + s, z - s, z + s));
+ appendFilter->AddInput (wk_cubeSource->GetOutput ());
#else
- treeWireframe->AddInputData (GetCuboid (x - s, x + s, y - s, y + s, z - s, z + s));
+ appendFilter->AddInputData (wk_cubeSource->GetOutput ());
#endif
}
- vtkSmartPointer<vtkActor> treeActor = vtkSmartPointer<vtkActor>::New();
+ // Remove any duplicate points
+ vtkSmartPointer<vtkCleanPolyData> cleanFilter = vtkSmartPointer<vtkCleanPolyData>::New ();
- vtkSmartPointer<vtkDataSetMapper> mapper = vtkSmartPointer<vtkDataSetMapper>::New();
-#if VTK_MAJOR_VERSION < 6
- mapper->SetInput(treeWireframe->GetOutput());
-#else
- mapper->SetInputData (treeWireframe->GetOutput ());
-#endif
- treeActor->SetMapper(mapper);
+ cleanFilter->SetInputConnection (appendFilter->GetOutputPort ());
+ cleanFilter->Update ();
+
+ //Create a mapper and actor
+ vtkSmartPointer<vtkPolyDataMapper> multiMapper = vtkSmartPointer<vtkPolyDataMapper>::New ();
- treeActor->GetProperty()->SetColor(1.0, 1.0, 1.0);
- treeActor->GetProperty()->SetLineWidth(2);
- if(wireframe)
+ multiMapper->SetInputConnection (cleanFilter->GetOutputPort ());
+
+ vtkSmartPointer<vtkActor> multiActor = vtkSmartPointer<vtkActor>::New ();
+
+ multiActor->SetMapper (multiMapper);
+
+ multiActor->GetProperty ()->SetColor (1.0, 1.0, 1.0);
+ multiActor->GetProperty ()->SetAmbient (1.0);
+ multiActor->GetProperty ()->SetLineWidth (1);
+ multiActor->GetProperty ()->EdgeVisibilityOn ();
+ multiActor->GetProperty ()->SetOpacity (1.0);
+
+ if (wireframe)
{
- treeActor->GetProperty()->SetRepresentationToWireframe();
- treeActor->GetProperty()->SetOpacity(0.35);
+ multiActor->GetProperty ()->SetRepresentationToWireframe ();
}
else
- treeActor->GetProperty()->SetRepresentationToSurface();
+ {
+ multiActor->GetProperty ()->SetRepresentationToSurface ();
+ }
+
+ // Add the actor to the scene
+ viz.getRenderWindow ()->GetRenderers ()->GetFirstRenderer ()->AddActor (multiActor);
- renderer->AddActor(treeActor);
+ // Render and interact
+ viz.getRenderWindow ()->Render ();
}
/* \brief Extracts all the points of depth = level from the octree
void extractPointsAtLevel(int depth)
{
displayCloud->points.clear();
+ cloudVoxel->points.clear();
- pcl::octree::OctreePointCloudVoxelCentroid<pcl::PointXYZ>::Iterator tree_it;
- pcl::octree::OctreePointCloudVoxelCentroid<pcl::PointXYZ>::Iterator tree_it_end = octree.end();
-
- pcl::PointXYZ pt;
+ pcl::PointXYZ pt_voxel_center;
+ pcl::PointXYZ pt_centroid;
std::cout << "===== Extracting data at depth " << depth << "... " << std::flush;
double start = pcl::getTime ();
- for (tree_it = octree.begin(depth); tree_it!=tree_it_end; ++tree_it)
+ for (pcl::octree::OctreePointCloudVoxelCentroid<pcl::PointXYZ>::FixedDepthIterator tree_it = octree.fixed_depth_begin (depth);
+ tree_it != octree.fixed_depth_end ();
+ ++tree_it)
{
+ // Compute the point at the center of the voxel which represents the current OctreeNode
Eigen::Vector3f voxel_min, voxel_max;
- octree.getVoxelBounds(tree_it, voxel_min, voxel_max);
+ octree.getVoxelBounds (tree_it, voxel_min, voxel_max);
+
+ pt_voxel_center.x = (voxel_min.x () + voxel_max.x ()) / 2.0f;
+ pt_voxel_center.y = (voxel_min.y () + voxel_max.y ()) / 2.0f;
+ pt_voxel_center.z = (voxel_min.z () + voxel_max.z ()) / 2.0f;
+ cloudVoxel->points.push_back (pt_voxel_center);
+
+ // If the asked depth is the depth of the octree, retrieve the centroid at this LeafNode
+ if (octree.getTreeDepth () == (unsigned int) depth)
+ {
+ pcl::octree::OctreePointCloudVoxelCentroid<pcl::PointXYZ>::LeafNode* container = static_cast<pcl::octree::OctreePointCloudVoxelCentroid<pcl::PointXYZ>::LeafNode*> (tree_it.getCurrentOctreeNode ());
+
+ container->getContainer ().getCentroid (pt_centroid);
+ }
+ // Else, compute the centroid of the LeafNode under the current BranchNode
+ else
+ {
+ // Retrieve every centroid under the current BranchNode
+ pcl::octree::OctreeKey dummy_key;
+ pcl::PointCloud<pcl::PointXYZ>::VectorType voxelCentroids;
+ octree.getVoxelCentroidsRecursive (static_cast<pcl::octree::OctreePointCloudVoxelCentroid<pcl::PointXYZ>::BranchNode*> (*tree_it), dummy_key, voxelCentroids);
+
+ // Iterate over the leafs to compute the centroid of all of them
+ pcl::CentroidPoint<pcl::PointXYZ> centroid;
+ for (size_t j = 0; j < voxelCentroids.size (); ++j)
+ {
+ centroid.add (voxelCentroids[j]);
+ }
+ centroid.get (pt_centroid);
+ }
- pt.x = (voxel_min.x() + voxel_max.x()) / 2.0f;
- pt.y = (voxel_min.y() + voxel_max.y()) / 2.0f;
- pt.z = (voxel_min.z() + voxel_max.z()) / 2.0f;
- displayCloud->points.push_back(pt);
+ displayCloud->points.push_back (pt_centroid);
}
double end = pcl::getTime ();
tide::TrackEntry::Ptr track(new tide::TrackEntry(1, 1, "pointcloud2"));
track->name("3D video");
track->codec_name("pcl::PCLPointCloud2");
- // Adding each level 1 element (only the first occurance, in the case of
+ // Adding each level 1 element (only the first occurrence, in the case of
// clusters) to the index makes opening the file later much faster.
segment.index.insert(std::make_pair(tracks.id(),
segment.to_segment_offset(stream_.tellp())));
tide::Segment segment;
segment.read(stream);
// The segment's date is stored as the number of nanoseconds since the
- // start of the millenium. Boost::Date_Time is invaluable here.
+ // start of the millennium. Boost::Date_Time is invaluable here.
bpt::ptime basis(boost::gregorian::date(2001, 1, 1));
bpt::time_duration sd(bpt::microseconds(segment.info.date() / 1000));
bpt::ptime seg_start(basis + sd);
}
else
{
- print_error ("Unknow depth unit defined.\n");
+ print_error ("Unknown depth unit defined.\n");
exit (-1);
}
}
*/
#include <pcl/PCLPointCloud2.h>
-#include <pcl/io/pcd_io.h>
+#include <pcl/io/auto_io.h>
#include <pcl/filters/uniform_sampling.h>
#include <pcl/console/print.h>
#include <pcl/console/parse.h>
#include <pcl/console/time.h>
+#include <boost/filesystem.hpp>
+#include <algorithm>
+#include <string>
+#include <pcl/io/vtk_io.h>
using namespace std;
using namespace pcl;
double default_radius = 0.01;
-Eigen::Vector4f translation;
-Eigen::Quaternionf orientation;
-
void
printHelp (int, char **argv)
{
- print_error ("Syntax is: %s input.pcd output.pcd <options>\n", argv[0]);
+ print_error ("Syntax is: %s <input_point_cloud> <output_point_cloud> <options>\n", argv[0]);
+ print_info ("This tool rely on the file extensions to guess the good reader/writer.\n");
+ print_info ("The supported extension for the point cloud are .pcd .ply and .vtk\n");
print_info (" where options are:\n");
print_info (" -radius X = use a leaf size of X,X,X to uniformly select 1 point per leaf (default: ");
print_value ("%f", default_radius); print_info (")\n");
print_highlight ("Loading "); print_value ("%s ", filename.c_str ());
tt.tic ();
- if (loadPCDFile (filename, cloud, translation, orientation) < 0)
+ if (pcl::io::load (filename, cloud)) {
+ print_error ("Cannot found input file name (%s).\n", filename.c_str ());
return (false);
- print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", cloud.width * cloud.height); print_info (" points]\n");
+ }
+ print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : ");
+ print_value ("%d", cloud.width * cloud.height); print_info (" points]\n");
print_info ("Available dimensions: "); print_value ("%s\n", getFieldsList (cloud).c_str ());
return (true);
PointCloud<PointXYZ> output_;
us.filter (output_);
- print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", output_.size()); print_info (" points]\n");
+ print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : ");
+ print_value ("%d", output_.size()); print_info (" points]\n");
// Convert data back
toPCLPointCloud2 (output_, output);
print_highlight ("Saving "); print_value ("%s ", filename.c_str ());
- PCDWriter w;
- w.writeBinaryCompressed (filename, output, translation, orientation);
+ PCDWriter w_pcd;
+ PLYWriter w_ply;
+ std::string output_ext = boost::filesystem::extension (filename);
+ std::transform (output_ext.begin (), output_ext.end (), output_ext.begin (), ::tolower);
+
+ if (output_ext.compare (".pcd") == 0)
+ {
+ w_pcd.writeBinaryCompressed (filename, output);
+ }
+ else if (output_ext.compare (".ply") == 0)
+ {
+ w_ply.writeBinary (filename, output);
+ }
+ else if (output_ext.compare (".vtk") == 0)
+ {
+ w_ply.writeBinary (filename, output);
+ }
print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", output.width * output.height); print_info (" points]\n");
}
// Parse the command line arguments for .pcd files
vector<int> p_file_indices;
- p_file_indices = parse_file_extension_argument (argc, argv, ".pcd");
+ vector<std::string> extension;
+ extension.push_back (".pcd");
+ extension.push_back (".ply");
+ extension.push_back (".vtk");
+ p_file_indices = parse_file_extension_argument (argc, argv, extension);
+
if (p_file_indices.size () != 2)
{
- print_error ("Need one input PCD file and one output PCD file to continue.\n");
+ print_error ("Need one input file and one output file to continue.\n");
return (-1);
}
+ std::string input_filename = argv[p_file_indices[0]];
+ std::string output_filename = argv[p_file_indices[1]];
+
// Command line parsing
double radius = default_radius;
parse_argument (argc, argv, "-radius", radius);
// Load the first file
pcl::PCLPointCloud2::Ptr cloud (new pcl::PCLPointCloud2);
- if (!loadCloud (argv[p_file_indices[0]], *cloud))
+ if (!loadCloud (input_filename, *cloud))
return (-1);
// Perform the keypoint estimation
compute (cloud, output, radius);
// Save into the second file
- saveCloud (argv[p_file_indices[1]], output);
+ saveCloud (output_filename, output);
}
-
" -view_point <x,y,z> : set the camera viewpoint from where the acquisition will take place\n"
" -target_point <x,y,z> : the target point that the camera should look at (default: 0, 0, 0)\n"
" -organized <0|1> : create an organized, grid-like point cloud of width x height (1), or keep it unorganized with height = 1 (0)\n"
- " -noise <0|1> : add gausian noise (1) or keep the model noiseless (0)\n"
+ " -noise <0|1> : add gaussian noise (1) or keep the model noiseless (0)\n"
" -noise_std <x> : use X times the standard deviation\n"
"");
return (-1);
#include <pcl/tracking/kld_adaptive_particle_filter_omp.h>
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename StateT> void
+pcl::tracking::KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename StateT> void
pcl::tracking::KLDAdaptiveParticleFilterOMPTracker<PointInT, StateT>::weight ()
{
#include <pcl/tracking/particle_filter_omp.h>
+//////////////////////////////////////////////////////////////////////////////////////////////
+template <typename PointInT, typename StateT> void
+pcl::tracking::ParticleFilterOMPTracker<PointInT, StateT>::setNumberOfThreads (unsigned int nr_threads)
+{
+ if (nr_threads == 0)
+#ifdef _OPENMP
+ threads_ = omp_get_num_procs();
+#else
+ threads_ = 1;
+#endif
+ else
+ threads_ = nr_threads;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointInT, typename StateT> void
pcl::tracking::ParticleFilterOMPTracker<PointInT, StateT>::weight ()
{
iprev_point[0] = floor (prev_pt[0]);
iprev_point[1] = floor (prev_pt[1]);
- if (iprev_point[0] < -track_width_ || iprev_point[0] >= grad_x.width ||
- iprev_point[1] < -track_height_ || iprev_point[1] >= grad_y.height)
+ if (iprev_point[0] < -track_width_ || (uint32_t) iprev_point[0] >= grad_x.width ||
+ iprev_point[1] < -track_height_ || (uint32_t) iprev_point[1] >= grad_y.height)
{
if (level == 0)
status [ptidx] = -1;
next_pt -= half_win;
Eigen::Array2f prev_delta;
- for (int j = 0; j < max_iterations_; j++)
+ for (unsigned int j = 0; j < max_iterations_; j++)
{
inext_pt[0] = floor (next_pt[0]);
inext_pt[1] = floor (next_pt[1]);
- if (inext_pt[0] < -track_width_ || inext_pt[0] >= next.width ||
- inext_pt[1] < -track_height_ || inext_pt[1] >= next.height)
+ if (inext_pt[0] < -track_width_ || (uint32_t) inext_pt[0] >= next.width ||
+ inext_pt[1] < -track_height_ || (uint32_t) inext_pt[1] >= next.height)
{
if (level == 0)
status[ptidx] = -1;
inext_point[0] = floor (next_point[0]);
inext_point[1] = floor (next_point[1]);
- if (inext_point[0] < -track_width_ || inext_point[0] >= next.width ||
- inext_point[1] < -track_height_ || inext_point[1] >= next.height)
+ if (inext_point[0] < -track_width_ || (uint32_t) inext_point[0] >= next.width ||
+ inext_point[1] < -track_height_ || (uint32_t) inext_point[1] >= next.height)
{
status[ptidx] = -1;
continue;
x += static_cast<float> (sampleNormal (mean[0], cov[0]));
y += static_cast<float> (sampleNormal (mean[1], cov[1]));
z += static_cast<float> (sampleNormal (mean[2], cov[2]));
- roll += static_cast<float> (sampleNormal (mean[3], cov[3]));
- pitch += static_cast<float> (sampleNormal (mean[4], cov[4]));
- yaw += static_cast<float> (sampleNormal (mean[5], cov[5]));
+
+ // The roll, pitch, yaw space is not Euclidean, so if we sample roll,
+ // pitch, and yaw independently, we bias our sampling in a complicated
+ // way that depends on where in the space we are sampling.
+ //
+ // A solution is to always sample around mean = 0 and project our
+ // samples onto the space of rotations, SO(3). We rely on the fact
+ // that we are sampling with small variance, so we do not bias
+ // ourselves too much with this projection. We then rotate our
+ // samples by the user's requested mean. The benefit of this approach
+ // is that our distribution's properties are consistent over the space
+ // of rotations.
+ Eigen::Matrix3f current_rotation;
+ current_rotation = getTransformation (x, y, z, roll, pitch, yaw).rotation ();
+ Eigen::Quaternionf q_current_rotation (current_rotation);
+
+ Eigen::Matrix3f mean_rotation;
+ mean_rotation = getTransformation (mean[0], mean[1], mean[2],
+ mean[3], mean[4], mean[5]).rotation ();
+ Eigen::Quaternionf q_mean_rotation (mean_rotation);
+
+ // Scales 1.0 radians of variance in RPY sampling into equivalent units for quaternion sampling.
+ const float scale_factor = 0.2862;
+
+ float a = sampleNormal (0, scale_factor*cov[3]);
+ float b = sampleNormal (0, scale_factor*cov[4]);
+ float c = sampleNormal (0, scale_factor*cov[5]);
+
+ Eigen::Vector4f vec_sample_mean_0 (a, b, c, 1);
+ Eigen::Quaternionf q_sample_mean_0 (vec_sample_mean_0);
+ q_sample_mean_0.normalize ();
+
+ Eigen::Quaternionf q_sample_user_mean = q_sample_mean_0 * q_mean_rotation * q_current_rotation;
+
+ Eigen::Affine3f affine_R (q_sample_user_mean.toRotationMatrix ());
+ pcl::getEulerAngles (affine_R, roll, pitch, yaw);
}
void
if (u == 0.)
return (0.5);
y = u / 2.0;
- if (y < -6.)
+ if (y < -3.)
return (0.0);
- if (y > 6.)
+ if (y > 3.)
return (1.0);
if (y < 0.0)
y = - y;
using KLDAdaptiveParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
typedef Tracker<PointInT, StateT> BaseClass;
-
+
typedef typename Tracker<PointInT, StateT>::PointCloudIn PointCloudIn;
typedef typename PointCloudIn::Ptr PointCloudInPtr;
typedef typename PointCloudIn::ConstPtr PointCloudInConstPtr;
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
- */
+ */
KLDAdaptiveParticleFilterOMPTracker (unsigned int nr_threads = 0)
: KLDAdaptiveParticleFilterTracker<PointInT, StateT> ()
- , threads_ (nr_threads)
{
tracker_name_ = "KLDAdaptiveParticleFilterOMPTracker";
+
+ setNumberOfThreads(nr_threads);
}
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
protected:
/** \brief The number of threads the scheduler should use. */
namespace tracking
{
/** \brief @b ParticleFilterOMPTracker tracks the PointCloud which is given by
- setReferenceCloud within the measured PointCloud using particle filter method
+ setReferenceCloud within the measured PointCloud using particle filter method
in parallel, using the OpenMP standard.
* \author Ryohei Ueda
* \ingroup tracking
using ParticleFilterTracker<PointInT, StateT>::calcBoundingBox;
typedef Tracker<PointInT, StateT> BaseClass;
-
+
typedef typename Tracker<PointInT, StateT>::PointCloudIn PointCloudIn;
typedef typename PointCloudIn::Ptr PointCloudInPtr;
typedef typename PointCloudIn::ConstPtr PointCloudInConstPtr;
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
- */
+ */
ParticleFilterOMPTracker (unsigned int nr_threads = 0)
: ParticleFilterTracker<PointInT, StateT> ()
- , threads_ (nr_threads)
{
tracker_name_ = "ParticleFilterOMPTracker";
+
+ setNumberOfThreads(nr_threads);
}
/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
- inline void
- setNumberOfThreads (unsigned int nr_threads = 0) { threads_ = nr_threads; }
-
+ void
+ setNumberOfThreads (unsigned int nr_threads = 0);
+
protected:
/** \brief The number of threads the scheduler should use. */
unsigned int threads_;
namespace tracking
{
/** Pyramidal Kanade Lucas Tomasi tracker.
- * This is an implemntation of the Pyramidal Kanade Lucas Tomasi tracker that operates on
+ * This is an implementation of the Pyramidal Kanade Lucas Tomasi tracker that operates on
* organized 3D keypoints with color/intensity information (this is the default behaviour but you
* can alterate it by providing another operator as second template argument). It is an affine
* tracker that iteratively computes the optical flow to find the best guess for a point p at t
inline void
setPointsToTrack (const pcl::PointCloud<pcl::PointUV>::ConstPtr& points);
- /// \brief \return a pointer to the points succesfully tracked.
+ /// \brief \return a pointer to the points successfully tracked.
inline pcl::PointCloud<pcl::PointUV>::ConstPtr
getTrackedPoints () const { return (keypoints_); };
/** \brief \return the status of points to track.
- * Status == 0 --> points succesfully tracked;
+ * Status == 0 --> points successfully tracked;
* Status < 0 --> point is lost;
* Status == -1 --> point is out of bond;
* Status == -2 --> optical flow can not be computed for this point.
inline pcl::PointIndicesConstPtr
getPointsToTrackStatus () const { return (keypoints_status_); }
- /** \brief Return the computed transfromation from tracked points. */
+ /** \brief Return the computed transformation from tracked points. */
Eigen::Affine3f
getResult () const { return (motion_); }
* \param[out] win patch with interpolated intensity values
* \param[out] grad_x_win patch with interpolated gradient along X values
* \param[out] grad_y_win patch with interpolated gradient along Y values
- * \param[out] covariance covariance matrix coefficents
+ * \param[out] covariance covariance matrix coefficients
*/
virtual void
spatialGradient (const FloatImage& img,
set(SUBSYS_NAME visualization)
set(SUBSYS_DESC "Point cloud visualization library")
-set(SUBSYS_DEPS common io kdtree geometry search)
+set(SUBSYS_DEPS common io kdtree geometry search octree)
if(NOT VTK_FOUND)
set(DEFAULT FALSE)
set(LIB_NAME "pcl_${SUBSYS_NAME}")
PCL_ADD_LIBRARY("${LIB_NAME}" "${SUBSYS_NAME}" ${srcs} ${incs} ${common_incs} ${impl_incs} ${common_impl_incs} ${vtk_incs})
+ target_include_directories("${LIB_NAME}" PUBLIC ${VTK_INCLUDE_DIRS})
+
# apple workaround (continued)
if(APPLE)
target_link_libraries("${LIB_NAME}" "-framework Cocoa")
#define PCL_CLOUD_VIEWER_H_
#include <pcl/visualization/pcl_visualizer.h> //pcl vis
+#include <boost/scoped_ptr.hpp> // scoped_ptr for pre-C++11
#include <string>
private:
/** \brief Private implementation. */
struct CloudViewer_impl;
- std::auto_ptr<CloudViewer_impl> impl_;
+ boost::scoped_ptr<CloudViewer_impl> impl_;
boost::signals2::connection
registerMouseCallback (boost::function<void (const pcl::visualization::MouseEvent&)>);
PCL_VISUALIZER_LUT_HSV_INVERSE, /**< Inverse HSV colormap */
PCL_VISUALIZER_LUT_GREY, /**< Grey colormap (black to white) */
PCL_VISUALIZER_LUT_BLUE2RED, /**< Blue to red colormap (blue to white to red) */
- PCL_VISUALIZER_LUT_RANGE_AUTO /**< Set LUT range to min and max values of the data */
+ PCL_VISUALIZER_LUT_RANGE_AUTO, /**< Set LUT range to min and max values of the data */
+ PCL_VISUALIZER_LUT_VIRIDIS /**< Viridis colormap */
};
/** \brief Generate a lookup table for a colormap.
float *data = (static_cast<vtkFloatArray*> (points->GetData ()))->GetPointer (0);
// Set the points
+ vtkIdType ptr = 0;
if (cloud->is_dense)
{
- for (vtkIdType i = 0; i < nr_points; ++i)
- memcpy (&data[i * 3], &cloud->points[i].x, 12); // sizeof (float) * 3
+ for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
}
else
{
!pcl_isfinite (cloud->points[i].z))
continue;
- memcpy (&data[j * 3], &cloud->points[i].x, 12); // sizeof (float) * 3
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
j++;
}
nr_points = j;
actor->GetProperty ()->SetRepresentationToSurface ();
actor->GetProperty ()->SetInterpolationToFlat ();
actor->GetProperty ()->SetColor (r, g, b);
+#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
actor->GetMapper ()->ImmediateModeRenderingOn ();
+#endif
actor->GetMapper ()->StaticOn ();
actor->GetMapper ()->ScalarVisibilityOff ();
actor->GetMapper ()->Update ();
else
tid = id;
- if (contains (tid))
+ if (viewport < 0)
+ return false;
+
+ // If there is no custom viewport and the viewport number is not 0, exit
+ if (rens_->GetNumberOfItems () <= viewport)
{
- PCL_WARN ("[addText3D] The id <%s> already exists! Please choose a different id and retry.\n", id.c_str ());
- return (false);
+ PCL_ERROR ("[addText3D] The viewport [%d] doesn't exist (id <%s>)! ",
+ viewport,
+ tid.c_str ());
+ return false;
+ }
+
+ // check all or an individual viewport for a similar id
+ rens_->InitTraversal ();
+ for (size_t i = viewport; rens_->GetNextItem () != NULL; ++i)
+ {
+ const std::string uid = tid + std::string (i, '*');
+ if (contains (uid))
+ {
+ PCL_ERROR ( "[addText3D] The id <%s> already exists in viewport [%d]! "
+ "Please choose a different id and retry.\n",
+ tid.c_str (),
+ i);
+ return false;
+ }
+
+ if (viewport > 0)
+ break;
}
vtkSmartPointer<vtkVectorText> textSource = vtkSmartPointer<vtkVectorText>::New ();
// Since each follower may follow a different camera, we need different followers
rens_->InitTraversal ();
- vtkRenderer* renderer = NULL;
+ vtkRenderer* renderer;
int i = 0;
while ((renderer = rens_->GetNextItem ()) != NULL)
{
// Save the pointer/ID pair to the global actor map. If we are saving multiple vtkFollowers
// for multiple viewport
- std::string alternate_tid = tid;
- alternate_tid.append(i, '*');
-
- (*shape_actor_map_)[(viewport == 0) ? tid : alternate_tid] = textActor;
+ const std::string uid = tid + std::string (i, '*');
+ (*shape_actor_map_)[uid] = textActor;
}
++i;
return (true);
}
+//////////////////////////////////////////////////
+template <typename PointT> bool
+pcl::visualization::PCLVisualizer::addText3D (
+ const std::string &text,
+ const PointT& position,
+ double orientation[3],
+ double textScale,
+ double r,
+ double g,
+ double b,
+ const std::string &id,
+ int viewport)
+{
+ std::string tid;
+ if (id.empty ())
+ tid = text;
+ else
+ tid = id;
+
+ if (viewport < 0)
+ return false;
+
+ // If there is no custom viewport and the viewport number is not 0, exit
+ if (rens_->GetNumberOfItems () <= viewport)
+ {
+ PCL_ERROR ("[addText3D] The viewport [%d] doesn't exist (id <%s>)! ",
+ viewport,
+ tid.c_str ());
+ return false;
+ }
+
+ // check all or an individual viewport for a similar id
+ rens_->InitTraversal ();
+ for (size_t i = viewport; rens_->GetNextItem () != NULL; ++i)
+ {
+ const std::string uid = tid + std::string (i, '*');
+ if (contains (uid))
+ {
+ PCL_ERROR ( "[addText3D] The id <%s> already exists in viewport [%d]! "
+ "Please choose a different id and retry.\n",
+ tid.c_str (),
+ i);
+ return false;
+ }
+
+ if (viewport > 0)
+ break;
+ }
+
+ vtkSmartPointer<vtkVectorText> textSource = vtkSmartPointer<vtkVectorText>::New ();
+ textSource->SetText (text.c_str());
+ textSource->Update ();
+
+ vtkSmartPointer<vtkPolyDataMapper> textMapper = vtkSmartPointer<vtkPolyDataMapper>::New ();
+ textMapper->SetInputConnection (textSource->GetOutputPort ());
+
+ vtkSmartPointer<vtkActor> textActor = vtkSmartPointer<vtkActor>::New ();
+ textActor->SetMapper (textMapper);
+ textActor->SetPosition (position.x, position.y, position.z);
+ textActor->SetScale (textScale);
+ textActor->GetProperty ()->SetColor (r, g, b);
+ textActor->SetOrientation (orientation);
+
+ // Save the pointer/ID pair to the global actor map. If we are saving multiple vtkFollowers
+ rens_->InitTraversal ();
+ int i = 0;
+ for ( vtkRenderer* renderer = rens_->GetNextItem ();
+ renderer != NULL;
+ renderer = rens_->GetNextItem (), ++i)
+ {
+ if (viewport == 0 || viewport == i)
+ {
+ renderer->AddActor (textActor);
+ const std::string uid = tid + std::string (i, '*');
+ (*shape_actor_map_)[uid] = textActor;
+ }
+ }
+
+ return (true);
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////
template <typename PointNT> bool
pcl::visualization::PCLVisualizer::addPointCloudNormals (
PCL_ERROR ("[addPointCloudNormals] The number of points differs from the number of normals!\n");
return (false);
}
+
+ if (normals->empty ())
+ {
+ PCL_WARN ("[addPointCloudNormals] An empty normal cloud is given! Nothing to display.\n");
+ return (false);
+ }
+
if (contains (id))
{
PCL_WARN ("[addPointCloudNormals] The id <%s> already exists! Please choose a different id and retry.\n", id.c_str ());
double minmax[2];
minmax[0] = std::numeric_limits<double>::min ();
minmax[1] = std::numeric_limits<double>::max ();
+#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
am_it->second.actor->GetMapper ()->ImmediateModeRenderingOff ();
+#endif
am_it->second.actor->GetMapper ()->SetScalarRange (minmax);
// Update the mapper
double minmax[2];
minmax[0] = std::numeric_limits<double>::min ();
minmax[1] = std::numeric_limits<double>::max ();
+#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
am_it->second.actor->GetMapper ()->ImmediateModeRenderingOff ();
+#endif
am_it->second.actor->GetMapper ()->SetScalarRange (minmax);
// Update the mapper
// Get a pointer to the beginning of the data array
float *data = (static_cast<vtkFloatArray*> (points->GetData ()))->GetPointer (0);
- int pts = 0;
+ vtkIdType pts = 0;
// If the dataset is dense (no NaNs)
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, pts += 3)
- memcpy (&data[pts], &cloud->points[i].x, 12); // sizeof (float) * 3
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[pts]);
}
else
{
// Check if the point is invalid
if (!isFinite (cloud->points[i]))
continue;
-
- memcpy (&data[pts], &cloud->points[i].x, 12); // sizeof (float) * 3
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[pts]);
pts += 3;
j++;
}
vertices->SetCells (nr_points, cells);
// Get the colors from the handler
- vtkSmartPointer<vtkDataArray> scalars;
- color_handler.getColor (scalars);
+ bool has_colors = false;
double minmax[2];
- scalars->GetRange (minmax);
- // Update the data
- polydata->GetPointData ()->SetScalars (scalars);
+ vtkSmartPointer<vtkDataArray> scalars;
+ if (color_handler.getColor (scalars))
+ {
+ // Update the data
+ polydata->GetPointData ()->SetScalars (scalars);
+ scalars->GetRange (minmax);
+ has_colors = true;
+ }
+#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
am_it->second.actor->GetMapper ()->ImmediateModeRenderingOff ();
- am_it->second.actor->GetMapper ()->SetScalarRange (minmax);
+#endif
+
+ if (has_colors)
+ am_it->second.actor->GetMapper ()->SetScalarRange (minmax);
// Update the mapper
#if VTK_MAJOR_VERSION < 6
colors = vtkSmartPointer<vtkUnsignedCharArray>::New ();
colors->SetNumberOfComponents (3);
colors->SetName ("Colors");
-
- pcl::RGB rgb_data;
+ uint32_t offset = fields[rgb_idx].offset;
for (size_t i = 0; i < cloud->size (); ++i)
{
if (!isFinite (cloud->points[i]))
continue;
- memcpy (&rgb_data, reinterpret_cast<const char*> (&cloud->points[i]) + fields[rgb_idx].offset, sizeof (pcl::RGB));
+ const pcl::RGB* const rgb_data = reinterpret_cast<const pcl::RGB*>(reinterpret_cast<const char*> (&cloud->points[i]) + offset);
unsigned char color[3];
- color[0] = rgb_data.r;
- color[1] = rgb_data.g;
- color[2] = rgb_data.b;
+ color[0] = rgb_data->r;
+ color[1] = rgb_data->g;
+ color[2] = rgb_data->b;
colors->InsertNextTupleValue (color);
}
}
// Get a pointer to the beginning of the data array
float *data = static_cast<vtkFloatArray*> (points->GetData ())->GetPointer (0);
- int ptr = 0;
+ vtkIdType ptr = 0;
std::vector<int> lookup;
// If the dataset is dense (no NaNs)
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
- memcpy (&data[ptr], &cloud->points[i].x, sizeof (float) * 3);
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
}
else
{
continue;
lookup[i] = static_cast<int> (j);
- memcpy (&data[ptr], &cloud->points[i].x, sizeof (float) * 3);
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
j++;
ptr += 3;
}
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
- memcpy (&data[ptr], &cloud->points[i].x, sizeof (float) * 3);
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
}
else
{
continue;
lookup [i] = static_cast<int> (j);
- memcpy (&data[ptr], &cloud->points[i].x, sizeof (float) * 3);
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
j++;
ptr += 3;
}
rgb_idx = pcl::getFieldIndex (*cloud, "rgba", fields);
if (rgb_idx != -1 && colors)
{
- pcl::RGB rgb_data;
int j = 0;
+ uint32_t offset = fields[rgb_idx].offset;
for (size_t i = 0; i < cloud->size (); ++i)
{
if (!isFinite (cloud->points[i]))
continue;
- memcpy (&rgb_data,
- reinterpret_cast<const char*> (&cloud->points[i]) + fields[rgb_idx].offset,
- sizeof (pcl::RGB));
+ const pcl::RGB* const rgb_data = reinterpret_cast<const pcl::RGB*>(reinterpret_cast<const char*> (&cloud->points[i]) + offset);
unsigned char color[3];
- color[0] = rgb_data.r;
- color[1] = rgb_data.g;
- color[2] = rgb_data.b;
+ color[0] = rgb_data->r;
+ color[1] = rgb_data->g;
+ color[2] = rgb_data->b;
colors->SetTupleValue (j++, color);
}
}
*/
class PCL_EXPORTS PCLVisualizerInteractorStyle : public vtkInteractorStyleRubberBandPick
{
- typedef boost::shared_ptr<CloudActorMap> CloudActorMapPtr;
-
public:
+ typedef boost::shared_ptr<CloudActorMap> CloudActorMapPtr;
+
static PCLVisualizerInteractorStyle *New ();
/** \brief Empty constructor. */
/** \brief Get camera parameters from a string vector.
* \param[in] camera A string vector:
* Clipping Range, Focal Point, Position, ViewUp, Distance, Field of View Y, Window Size, Window Pos.
- * Values in each string are seperated by a ','
+ * Values in each string are separated by a ','
*/
bool
getCameraParameters (const std::vector<std::string> &camera);
std::vector<char> const &color = std::vector<char>());
/** \brief Adds a plot based on a space/tab delimited table provided in a file
- * \param[in] filename name of the file containing the table. 1st column represents the values of X-Axis. Rest of the columns represent the corresponding values in Y-Axes. First row of the file is concidered for naming/labling of the plot. The plot-names should not contain any space in between.
+ * \param[in] filename name of the file containing the table. 1st column represents the values of X-Axis. Rest of the columns represent the corresponding values in Y-Axes. First row of the file is considered for naming/labeling of the plot. The plot-names should not contain any space in between.
* \param[in] type type of the graph plotted. vtkChart::LINE for line plot, vtkChart::BAR for bar plot, and vtkChart::POINTS for a scattered point plot
*/
void
*/
PCLVisualizer (const std::string &name = "", const bool create_interactor = true);
- /** \brief PCL Visualizer constructor.
+ /** \brief PCL Visualizer constructor. It looks through the passed argv arguments to find the "-cam *.cam" argument.
+ * If the search failed, the name for cam file is calculated with boost uuid. If there is no such file, camera is not initilalized.
* \param[in] argc
* \param[in] argv
* \param[in] name the window name (empty by default)
*/
PCLVisualizer (int &argc, char **argv, const std::string &name = "",
PCLVisualizerInteractorStyle* style = PCLVisualizerInteractorStyle::New (), const bool create_interactor = true);
+
+ /** \brief PCL Visualizer constructor.
+ * \param[in] custom vtk renderer
+ * \param[in] custom vtk render window
+ * \param[in] create_interactor if true (default), create an interactor, false otherwise
+ */
+ PCLVisualizer (vtkSmartPointer<vtkRenderer> ren, vtkSmartPointer<vtkRenderWindow> wind, const std::string &name = "", const bool create_interactor = true);
+
+ /** \brief PCL Visualizer constructor.
+ * \param[in] argc
+ * \param[in] argv
+ * \param[in] custom vtk renderer
+ * \param[in] custom vtk render window
+ * \param[in] style interactor style (defaults to PCLVisualizerInteractorStyle)
+ * \param[in] create_interactor if true (default), create an interactor, false otherwise
+ */
+ PCLVisualizer (int &argc, char **argv, vtkSmartPointer<vtkRenderer> ren, vtkSmartPointer<vtkRenderWindow> wind, const std::string &name = "",
+ PCLVisualizerInteractorStyle* style = PCLVisualizerInteractorStyle::New (),
+ const bool create_interactor = true);
+
/** \brief PCL Visualizer destructor. */
virtual ~PCLVisualizer ();
/** \brief Disables the Orientatation Marker Widget so it is removed from the renderer */
void
removeOrientationMarkerWidgetAxes ();
-
- /** \brief Adds 3D axes describing a coordinate system to screen at 0,0,0.
- * \param[in] scale the scale of the axes (default: 1)
- * \param[in] viewport the view port where the 3D axes should be added (default: all)
- */
- PCL_DEPRECATED (
- "addCoordinateSystem (scale, viewport) is deprecated, please use function "
- "addCoordinateSystem (scale, id, viewport) with id a unique string identifier.")
- void
- addCoordinateSystem (double scale, int viewport);
/** \brief Adds 3D axes describing a coordinate system to screen at 0,0,0.
* \param[in] scale the scale of the axes (default: 1)
void
addCoordinateSystem (double scale = 1.0, const std::string& id = "reference", int viewport = 0);
- /** \brief Adds 3D axes describing a coordinate system to screen at x, y, z
- * \param[in] scale the scale of the axes (default: 1)
- * \param[in] x the X position of the axes
- * \param[in] y the Y position of the axes
- * \param[in] z the Z position of the axes
- * \param[in] viewport the view port where the 3D axes should be added (default: all)
- */
- PCL_DEPRECATED (
- "addCoordinateSystem (scale, x, y, z, viewport) is deprecated, please use function "
- "addCoordinateSystem (scale, x, y, z, id, viewport) with id a unique string identifier.")
- void
- addCoordinateSystem (double scale, float x, float y, float z, int viewport);
-
/** \brief Adds 3D axes describing a coordinate system to screen at x, y, z
* \param[in] scale the scale of the axes (default: 1)
* \param[in] x the X position of the axes
void
addCoordinateSystem (double scale, float x, float y, float z, const std::string &id = "reference", int viewport = 0);
- /** \brief Adds 3D axes describing a coordinate system to screen at x, y, z, Roll,Pitch,Yaw
- *
- * \param[in] scale the scale of the axes (default: 1)
- * \param[in] t transformation matrix
- * \param[in] viewport the view port where the 3D axes should be added (default: all)
- */
- PCL_DEPRECATED (
- "addCoordinateSystem (scale, t, viewport) is deprecated, please use function "
- "addCoordinateSystem (scale, t, id, viewport) with id a unique string identifier.")
- void
- addCoordinateSystem (double scale, const Eigen::Affine3f& t, int viewport);
-
/** \brief Adds 3D axes describing a coordinate system to screen at x, y, z, Roll,Pitch,Yaw
*
* \param[in] scale the scale of the axes (default: 1)
void
addCoordinateSystem (double scale, const Eigen::Affine3f& t, const std::string &id = "reference", int viewport = 0);
- /** \brief Removes a previously added 3D axes (coordinate system)
- * \param[in] viewport view port where the 3D axes should be removed from (default: all)
- */
- PCL_DEPRECATED (
- "removeCoordinateSystem (viewport) is deprecated, please use function "
- "addCoordinateSystem (id, viewport) with id a unique string identifier.")
- bool
- removeCoordinateSystem (int viewport);
-
/** \brief Removes a previously added 3D axes (coordinate system)
* \param[in] id the coordinate system object id (default: reference)
* \param[in] viewport view port where the 3D axes should be removed from (default: all)
double r = 1.0, double g = 1.0, double b = 1.0,
const std::string &id = "", int viewport = 0);
+ /** \brief Add a 3d text to the scene
+ * \param[in] text the text to add
+ * \param[in] position the world position where the text should be added
+ * \param[in] orientation the angles of rotation of the text around X, Y and Z axis,
+ in this order. The way the rotations are effectively done is the
+ Z-X-Y intrinsic rotations:
+ https://en.wikipedia.org/wiki/Euler_angles#Definition_by_intrinsic_rotations
+ * \param[in] textScale the scale of the text to render
+ * \param[in] r the red color value
+ * \param[in] g the green color value
+ * \param[in] b the blue color value
+ * \param[in] id the text object id (default: equal to the "text" parameter)
+ * \param[in] viewport the view port (default: all)
+ */
+ template <typename PointT> bool
+ addText3D (const std::string &text,
+ const PointT &position,
+ double orientation[3],
+ double textScale = 1.0,
+ double r = 1.0, double g = 1.0, double b = 1.0,
+ const std::string &id = "", int viewport = 0);
+
/** \brief Check if the cloud, shape, or coordinate with the given id was already added to this visualizer.
* \param[in] id the id of the cloud, shape, or coordinate to check
* \return true if a cloud, shape, or coordinate with the specified id was found
getPointCloudRenderingProperties (int property, double &value,
const std::string &id = "cloud");
+ /** \brief Get the rendering properties of a PointCloud
+ * \param[in] property the property type
+ * \param[out] val1 the resultant property value
+ * \param[out] val2 the resultant property value
+ * \param[out] val3 the resultant property value
+ * \param[in] id the point cloud object id (default: cloud)
+ * \return True if the property is effectively retrieved.
+ * \note The list of properties can be found in \ref pcl::visualization::LookUpTableRepresentationProperties.
+ */
+ bool
+ getPointCloudRenderingProperties (RenderingProperties property, double &val1, double &val2, double &val3,
+ const std::string &id = "cloud");
+
/** \brief Set whether the point cloud is selected or not
* \param[in] selected whether the cloud is selected or not (true = selected)
* \param[in] id the point cloud object id (default: cloud)
setShapeRenderingProperties (int property, double value,
const std::string &id, int viewport = 0);
- /** \brief Set the rendering properties of a shape (3x values - e.g., RGB)
+ /** \brief Set the rendering properties of a shape (2x values - e.g., LUT minmax values)
+ * \param[in] property the property type
+ * \param[in] val1 the first value to be set
+ * \param[in] val2 the second value to be set
+ * \param[in] id the shape object id
+ * \param[in] viewport the view port where the shape's properties should be modified (default: all)
+ * \note When using \ref addPolygonMesh you you should use \ref setPointCloudRenderingProperties
+ */
+ bool
+ setShapeRenderingProperties (int property, double val1, double val2,
+ const std::string &id, int viewport = 0);
+
+ /** \brief Set the rendering properties of a shape (3x values - e.g., RGB)
* \param[in] property the property type
* \param[in] val1 the first value to be set
* \param[in] val2 the second value to be set
const std::string &id = "line",
int viewport = 0);
+ /** \brief Add a line from a set of given model coefficients
+ * \param[in] coefficients the model coefficients (point_on_line, direction)
+ * \param[in] id the line id/name (default: "line")
+ * \param[in] viewport (optional) the id of the new viewport (default: 0)
+ *
+ * \code
+ * // The following are given (or computed using sample consensus techniques)
+ * // See SampleConsensusModelLine for more information
+ * // Eigen::Vector3f point_on_line, line_direction;
+ *
+ * pcl::ModelCoefficients line_coeff;
+ * line_coeff.values.resize (6); // We need 6 values
+ * line_coeff.values[0] = point_on_line.x ();
+ * line_coeff.values[1] = point_on_line.y ();
+ * line_coeff.values[2] = point_on_line.z ();
+ *
+ * line_coeff.values[3] = line_direction.x ();
+ * line_coeff.values[4] = line_direction.y ();
+ * line_coeff.values[5] = line_direction.z ();
+ *
+ * addLine (line_coeff);
+ * \endcode
+ */
+ bool
+ addLine (const pcl::ModelCoefficients &coefficients,
+ const char *id = "line",
+ int viewport = 0)
+ {
+ return addLine (coefficients, std::string (id), viewport);
+ }
+
/** \brief Add a plane from a set of given model coefficients
* \param[in] coefficients the model coefficients (a, b, c, d with ax+by+cz+d=0)
* \param[in] id the plane id/name (default: "plane")
void
setShowFPS (bool show_fps);
+ /** Get the current rendering framerate.
+ * \see setShowFPS */
+ float
+ getFPS () const;
+
/** \brief Renders a virtual scene as seen from the camera viewpoint and returns the rendered point cloud.
* ATT: This method will only render the scene if only on viewport exists. Otherwise, returns an empty
* point cloud and exits immediately.
vtkSmartPointer<vtkRenderWindowInteractor> interactor_;
#endif
private:
- struct ExitMainLoopTimerCallback : public vtkCommand
+ /** \brief Internal function for renderer setup
+ * \param[in] vtk renderer
+ */
+ void setupRenderer (vtkSmartPointer<vtkRenderer> ren);
+
+ /** \brief Internal function for setting up FPS callback
+ * \param[in] vtk renderer
+ */
+ void setupFPSCallback (const vtkSmartPointer<vtkRenderer>& ren);
+
+ /** \brief Internal function for setting up render window
+ * \param[in] name the window name
+ */
+ void setupRenderWindow (const std::string& name);
+
+ /** \brief Internal function for setting up interactor style
+ */
+ void setupStyle ();
+
+ /** \brief Internal function for setting the default render window size and position on screen
+ */
+ void setDefaultWindowSizeAndPos ();
+
+ /** \brief Internal function for setting up camera parameters
+ * \param[in] argc
+ * \param[in] argv
+ */
+ void setupCamera (int &argc, char **argv);
+
+ struct PCL_EXPORTS ExitMainLoopTimerCallback : public vtkCommand
{
static ExitMainLoopTimerCallback* New ()
{
PCLVisualizer* pcl_visualizer;
};
- struct ExitCallback : public vtkCommand
+ struct PCL_EXPORTS ExitCallback : public vtkCommand
{
static ExitCallback* New ()
{
};
//////////////////////////////////////////////////////////////////////////////////////////////
- struct FPSCallback : public vtkCommand
+ struct PCL_EXPORTS FPSCallback : public vtkCommand
{
static FPSCallback *New () { return (new FPSCallback); }
- FPSCallback () : actor (), pcl_visualizer (), decimated () {}
- FPSCallback (const FPSCallback& src) : vtkCommand (), actor (src.actor), pcl_visualizer (src.pcl_visualizer), decimated (src.decimated) {}
- FPSCallback& operator = (const FPSCallback& src) { actor = src.actor; pcl_visualizer = src.pcl_visualizer; decimated = src.decimated; return (*this); }
+ FPSCallback () : actor (), pcl_visualizer (), decimated (), last_fps(0.0f) {}
+ FPSCallback (const FPSCallback& src) : vtkCommand (), actor (src.actor), pcl_visualizer (src.pcl_visualizer), decimated (src.decimated), last_fps (src.last_fps) {}
+ FPSCallback& operator = (const FPSCallback& src) { actor = src.actor; pcl_visualizer = src.pcl_visualizer; decimated = src.decimated; last_fps = src.last_fps; return (*this); }
virtual void
Execute (vtkObject*, unsigned long event_id, void*);
-
+
vtkTextActor *actor;
PCLVisualizer* pcl_visualizer;
bool decimated;
+ float last_fps;
};
/** \brief The FPSCallback object for the current visualizer. */
vtkTexture* vtk_tex) const;
/** \brief Get camera file for camera parameter saving/restoring from command line.
- * Camera filename is calculated using sha1 value of all pathes of input .pcd files
+ * Camera filename is calculated using sha1 value of all paths of input .pcd files
* \return empty string if failed.
*/
std::string
virtual std::string
getName () const { return ("PointCloudColorHandlerRGBAField"); }
+ private:
// Members derived from the base class
using PointCloudColorHandler<PointT>::cloud_;
using PointCloudColorHandler<PointT>::capable_;
//////////////////////////////////////////////////////////////////////////////////////
/** \brief Surface normal handler class for PointCloud geometry. Given an input
* dataset, all data present in fields "normal_x", "normal_y", and "normal_z" is
- * extracted and dislayed on screen as XYZ data.
+ * extracted and displayed on screen as XYZ data.
* \author Radu B. Rusu
* \ingroup visualization
*/
const std::string &x_field_name,
const std::string &y_field_name,
const std::string &z_field_name)
+ : pcl::visualization::PointCloudGeometryHandler<PointT>::PointCloudGeometryHandler (cloud)
{
field_x_idx_ = pcl::getFieldIndex (*cloud, x_field_name, fields_);
if (field_x_idx_ == -1)
//////////////////////////////////////////////////////////////////////////////////////
/** \brief Surface normal handler class for PointCloud geometry. Given an input
* dataset, all data present in fields "normal_x", "normal_y", and "normal_z" is
- * extracted and dislayed on screen as XYZ data.
+ * extracted and displayed on screen as XYZ data.
* \author Radu B. Rusu
* \ingroup visualization
*/
/** \brief For situations when multiple points are selected in a sequence, return the point coordinates.
* \param[out] x1 the x coordinate of the first point that got selected by the user
* \param[out] y1 the y coordinate of the first point that got selected by the user
- * \param[out] z1 the z coordinate of the firts point that got selected by the user
+ * \param[out] z1 the z coordinate of the first point that got selected by the user
* \param[out] x2 the x coordinate of the second point that got selected by the user
* \param[out] y2 the y coordinate of the second point that got selected by the user
* \param[out] z2 the z coordinate of the second point that got selected by the user
// load values into cloud
updateValuesToDisplay();
- // check if we need to automatically handle the backgroud color
+ // check if we need to automatically handle the background color
if(control_background_color_)
{
if(values_.back() < lowest_threshold_)
}
private:
- /** \brief initialize ther buffer that stores the values to zero.
+ /** \brief initialize the buffer that stores the values to zero.
* \note The size is set by private member nb_values_ which is in the range [2-308].
*/
void
*/
int nb_values_;
- /** \brief boolean used to know if we need to change the backgroud color in case of low values. */
+ /** \brief boolean used to know if we need to change the background color in case of low values. */
bool control_background_color_;
/** \brief threshold to turn the background orange if latest value is lower. */
float lowest_threshold_;
- /** \brief boolean used to know if we need to change the backgroud color in case of low values. True means we do it ourselves. */
+ /** \brief boolean used to know if we need to change the background color in case of low values. True means we do it ourselves. */
bool handle_y_scale_;
/** \brief float tracking the minimal and maximal values ever observed. */
{
[self breakEventLoop];
- // The NSWindow is closing, so prevent anyone from accidently using it
+ // The NSWindow is closing, so prevent anyone from accidentally using it
renWin->SetRootWindow(NULL);
}
}
// - StreamRead specified once by R, queried a few times by A
// - StreamCopy specified once by R, used a few times S
// - StaticDraw specified once by A, used many times S
- // - StaticRead specificed once by R, queried many times by A
+ // - StaticRead specified once by R, queried many times by A
// - StaticCopy specified once by R, used many times S
// - DynamicDraw respecified repeatedly by A, used many times S
// - DynamicRead respecified repeatedly by R, queried many times by A
#include <pcl/point_types.h>
#include <pcl/visualization/common/common.h>
#include <pcl/console/print.h>
+#include <pcl/common/colors.h>
#include <stdlib.h>
//////////////////////////////////////////////////////////////////////////////////////////////
red[2] * weight + white[2] * (1 - weight) );
}
break;
- }
+ }
+
+ case PCL_VISUALIZER_LUT_VIRIDIS:
+ {
+ table->SetSaturationRange (1, 1);
+ table->SetAlphaRange (1, 1);
+ table->SetNumberOfTableValues (pcl::ViridisLUT::size ());
+ for (size_t i = 0; i < pcl::ViridisLUT::size (); i++)
+ {
+ pcl::RGB c = pcl::ViridisLUT::at (i);
+ table->SetTableValue (i, static_cast<double> (c.r) / 255.0,
+ static_cast<double> (c.g) / 255.0,
+ static_cast<double> (c.b) / 255.0);
+ }
+ break;
+ }
+
default:
PCL_WARN ("[pcl::visualization::getColormapLUT] Requested colormap type does not exist!\n");
return false;
window_size[0] = 2 * static_cast<int> (intrinsics (0, 2));
window_size[1] = 2 * static_cast<int> (intrinsics (1, 2));
- // Compute the vertical field of view based on the focal length and image heigh
+ // Compute the vertical field of view based on the focal length and image height
double fovy = 2 * atan (window_size[1] / (2. * intrinsics (1, 1))) * 180.0 / M_PI;
//draw every figures
for (size_t i = 0; i < figures_.size (); i++)
{
- figures_[i]->draw (painter); //thats it ;-)
+ figures_[i]->draw (painter); //that's it ;-)
}
return true;
if (pcl_isfinite (data[i]))
{
unsigned int index = (unsigned int) (floor ((data[i] - min) / size));
- if (index == nbins) index = nbins - 1; //including right boundary
+ if (index == (unsigned int) nbins) index = nbins - 1; //including right boundary
histogram[index].second++;
}
}
#include <vtkLookupTable.h>
#include <vtkTextureUnitManager.h>
+#if VTK_MAJOR_VERSION > 7
+#include <vtkTexture.h>
+#endif
+
#include <pcl/visualization/common/shapes.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/common/common.h>
#include <pcl/common/time.h>
+#if (BOOST_VERSION >= 106600)
+#include <boost/uuid/detail/sha1.hpp>
+#else
#include <boost/uuid/sha1.hpp>
+#endif
#include <boost/filesystem.hpp>
#include <pcl/console/parse.h>
, exit_main_loop_timer_callback_ ()
, exit_callback_ ()
, rens_ (vtkSmartPointer<vtkRendererCollection>::New ())
- , win_ ()
+ , win_ (vtkSmartPointer<vtkRenderWindow>::New ())
, style_ (vtkSmartPointer<pcl::visualization::PCLVisualizerInteractorStyle>::New ())
, cloud_actor_map_ (new CloudActorMap)
, shape_actor_map_ (new ShapeActorMap)
, camera_set_ ()
, camera_file_loaded_ (false)
{
- // Create a Renderer
vtkSmartPointer<vtkRenderer> ren = vtkSmartPointer<vtkRenderer>::New ();
- ren->AddObserver (vtkCommand::EndEvent, update_fps_);
- // Add it to the list of renderers
- rens_->AddItem (ren);
-
- // FPS callback
- vtkSmartPointer<vtkTextActor> txt = vtkSmartPointer<vtkTextActor>::New ();
- update_fps_->actor = txt;
- update_fps_->pcl_visualizer = this;
- update_fps_->decimated = false;
- ren->AddActor (txt);
- txt->SetInput("0 FPS");
-
- // Create a RendererWindow
- win_ = vtkSmartPointer<vtkRenderWindow>::New ();
- win_->SetWindowName (name.c_str ());
-
- // Get screen size
- int scr_size_x = win_->GetScreenSize ()[0];
- int scr_size_y = win_->GetScreenSize ()[1];
- // Set the window size as 1/2 of the screen size
- win_->SetSize (scr_size_x / 2, scr_size_y / 2);
-
- // By default, don't use vertex buffer objects
- use_vbos_ = false;
-
- // Add all renderers to the window
- rens_->InitTraversal ();
- vtkRenderer* renderer = NULL;
- while ((renderer = rens_->GetNextItem ()) != NULL)
- win_->AddRenderer (renderer);
-
- // Set renderer window in case no interactor is created
- style_->setRenderWindow (win_);
-
- // Create the interactor style
- style_->Initialize ();
- style_->setRendererCollection (rens_);
- style_->setCloudActorMap (cloud_actor_map_);
- style_->setShapeActorMap (shape_actor_map_);
- style_->UseTimersOn ();
- style_->setUseVbos(use_vbos_);
+ setupRenderer (ren);
+ setupFPSCallback (ren);
+ setupRenderWindow (name);
+ setDefaultWindowSizeAndPos ();
+ setupStyle ();
if (create_interactor)
createInteractor ();
-
- win_->SetWindowName (name.c_str ());
}
/////////////////////////////////////////////////////////////////////////////////////////////
, exit_main_loop_timer_callback_ ()
, exit_callback_ ()
, rens_ (vtkSmartPointer<vtkRendererCollection>::New ())
- , win_ ()
+ , win_ (vtkSmartPointer<vtkRenderWindow>::New ())
, style_ (style)
, cloud_actor_map_ (new CloudActorMap)
, shape_actor_map_ (new ShapeActorMap)
, camera_set_ ()
, camera_file_loaded_ (false)
{
- // Create a Renderer
vtkSmartPointer<vtkRenderer> ren = vtkSmartPointer<vtkRenderer>::New ();
- ren->AddObserver (vtkCommand::EndEvent, update_fps_);
- // Add it to the list of renderers
- rens_->AddItem (ren);
+ setupRenderer (ren);
+ setupFPSCallback (ren);
+ setupRenderWindow (name);
+ setupStyle ();
+ setupCamera (argc, argv);
- // FPS callback
- vtkSmartPointer<vtkTextActor> txt = vtkSmartPointer<vtkTextActor>::New ();
- update_fps_->actor = txt;
- update_fps_->pcl_visualizer = this;
- update_fps_->decimated = false;
- ren->AddActor (txt);
- txt->SetInput("0 FPS");
+ if(!camera_set_ && !camera_file_loaded_)
+ setDefaultWindowSizeAndPos ();
- // Create a RendererWindow
- win_ = vtkSmartPointer<vtkRenderWindow>::New ();
- win_->SetWindowName (name.c_str ());
-
- // By default, don't use vertex buffer objects
- use_vbos_ = false;
-
- // Add all renderers to the window
- rens_->InitTraversal ();
- vtkRenderer* renderer = NULL;
- while ((renderer = rens_->GetNextItem ()) != NULL)
- win_->AddRenderer (renderer);
-
- // Set renderer window in case no interactor is created
- style_->setRenderWindow (win_);
+ if (create_interactor)
+ createInteractor ();
- // Create the interactor style
- style_->Initialize ();
- style_->setRendererCollection (rens_);
- style_->setCloudActorMap (cloud_actor_map_);
- style_->setShapeActorMap (shape_actor_map_);
- style_->UseTimersOn ();
+ //window name should be reset due to its reset somewhere in camera initialization
+ win_->SetWindowName (name.c_str ());
+}
- // Get screen size
- int scr_size_x = win_->GetScreenSize ()[0];
- int scr_size_y = win_->GetScreenSize ()[1];
+/////////////////////////////////////////////////////////////////////////////////////////////
+pcl::visualization::PCLVisualizer::PCLVisualizer (vtkSmartPointer<vtkRenderer> ren, vtkSmartPointer<vtkRenderWindow> wind,
+ const std::string &name, const bool create_interactor)
+ : interactor_ ()
+ , update_fps_ (vtkSmartPointer<FPSCallback>::New ())
+#if !((VTK_MAJOR_VERSION == 5) && (VTK_MINOR_VERSION <= 4))
+ , stopped_ ()
+ , timer_id_ ()
+#endif
+ , exit_main_loop_timer_callback_ ()
+ , exit_callback_ ()
+ , rens_ (vtkSmartPointer<vtkRendererCollection>::New ())
+ , win_ (wind)
+ , style_ (vtkSmartPointer<pcl::visualization::PCLVisualizerInteractorStyle>::New ())
+ , cloud_actor_map_ (new CloudActorMap)
+ , shape_actor_map_ (new ShapeActorMap)
+ , coordinate_actor_map_ (new CoordinateActorMap)
+ , camera_set_ ()
+ , camera_file_loaded_ (false)
+{
+ setupRenderer (ren);
+ setupFPSCallback (ren);
+ setupRenderWindow (name);
+ setDefaultWindowSizeAndPos ();
+ setupStyle ();
- // Set default camera parameters
- initCameraParameters ();
+ if (create_interactor)
+ createInteractor ();
+}
- // Parse the camera settings and update the internal camera
- camera_set_ = getCameraParameters (argc, argv);
- // Calculate unique camera filename for camera parameter saving/restoring
- if (!camera_set_)
- {
- std::string camera_file = getUniqueCameraFile (argc, argv);
- if (!camera_file.empty ())
- {
- if (boost::filesystem::exists (camera_file) && style_->loadCameraParameters (camera_file))
- {
- camera_file_loaded_ = true;
- }
- else
- {
- style_->setCameraFile (camera_file);
- }
- }
- }
- // Set the window size as 1/2 of the screen size or the user given parameter
+/////////////////////////////////////////////////////////////////////////////////////////////
+pcl::visualization::PCLVisualizer::PCLVisualizer (int &argc, char **argv, vtkSmartPointer<vtkRenderer> ren, vtkSmartPointer<vtkRenderWindow> wind,
+ const std::string &name, PCLVisualizerInteractorStyle* style, const bool create_interactor)
+ : interactor_ ()
+ , update_fps_ (vtkSmartPointer<FPSCallback>::New ())
+#if !((VTK_MAJOR_VERSION == 5) && (VTK_MINOR_VERSION <= 4))
+ , stopped_ ()
+ , timer_id_ ()
+#endif
+ , exit_main_loop_timer_callback_ ()
+ , exit_callback_ ()
+ , rens_ (vtkSmartPointer<vtkRendererCollection>::New ())
+ , win_ (wind)
+ , style_ (style)
+ , cloud_actor_map_ (new CloudActorMap)
+ , shape_actor_map_ (new ShapeActorMap)
+ , coordinate_actor_map_ (new CoordinateActorMap)
+ , camera_set_ ()
+ , camera_file_loaded_ (false)
+{
+ setupRenderer (ren);
+ setupFPSCallback (ren);
+ setupRenderWindow (name);
+ setupStyle ();
+ setupCamera (argc, argv);
if (!camera_set_ && !camera_file_loaded_)
- {
- win_->SetSize (scr_size_x/2, scr_size_y/2);
- win_->SetPosition (0, 0);
- }
-
+ setDefaultWindowSizeAndPos ();
if (create_interactor)
createInteractor ();
+ //window name should be reset due to its reset somewhere in camera initialization
win_->SetWindowName (name.c_str ());
}
// iren->SetPicker (pp);
}
+/////////////////////////////////////////////////////////////////////////////////////////////
+void pcl::visualization::PCLVisualizer::setupRenderer (vtkSmartPointer<vtkRenderer> ren)
+{
+ if (!ren)
+ PCL_ERROR ("Passed pointer to renderer is null");
+
+ ren->AddObserver (vtkCommand::EndEvent, update_fps_);
+ // Add it to the list of renderers
+ rens_->AddItem (ren);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+void pcl::visualization::PCLVisualizer::setupFPSCallback (const vtkSmartPointer<vtkRenderer>& ren)
+{
+ if (!ren)
+ PCL_ERROR ("Passed pointer to renderer is null");
+ // FPS callback
+ vtkSmartPointer<vtkTextActor> txt = vtkSmartPointer<vtkTextActor>::New ();
+ update_fps_->actor = txt;
+ update_fps_->pcl_visualizer = this;
+ update_fps_->decimated = false;
+ ren->AddActor (txt);
+ txt->SetInput ("0 FPS");
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+void pcl::visualization::PCLVisualizer::setupRenderWindow (const std::string& name)
+{
+ if (!win_)
+ PCL_ERROR ("Pointer to render window is null");
+
+ win_->SetWindowName (name.c_str ());
+
+ // By default, don't use vertex buffer objects
+ use_vbos_ = false;
+
+ // Add all renderers to the window
+ rens_->InitTraversal ();
+ vtkRenderer* renderer = NULL;
+ while ((renderer = rens_->GetNextItem ()) != NULL)
+ win_->AddRenderer (renderer);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+void pcl::visualization::PCLVisualizer::setupStyle ()
+{
+ if (!style_)
+ PCL_ERROR ("Pointer to style is null");
+
+ // Set rend erer window in case no interactor is created
+ style_->setRenderWindow (win_);
+
+ // Create the interactor style
+ style_->Initialize ();
+ style_->setRendererCollection (rens_);
+ style_->setCloudActorMap (cloud_actor_map_);
+ style_->setShapeActorMap (shape_actor_map_);
+ style_->UseTimersOn ();
+ style_->setUseVbos (use_vbos_);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+void pcl::visualization::PCLVisualizer::setDefaultWindowSizeAndPos ()
+{
+ if (!win_)
+ PCL_ERROR ("Pointer to render window is null");
+ int scr_size_x = win_->GetScreenSize ()[0];
+ int scr_size_y = win_->GetScreenSize ()[1];
+ win_->SetSize (scr_size_x / 2, scr_size_y / 2);
+ win_->SetPosition (0, 0);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+void pcl::visualization::PCLVisualizer::setupCamera (int &argc, char **argv)
+{
+ initCameraParameters ();
+
+ // Parse the camera settings and update the internal camera
+ camera_set_ = getCameraParameters (argc, argv);
+ // Calculate unique camera filename for camera parameter saving/restoring
+ if (!camera_set_)
+ {
+ std::string camera_file = getUniqueCameraFile (argc, argv);
+ if (!camera_file.empty ())
+ {
+ if (boost::filesystem::exists (camera_file) && style_->loadCameraParameters (camera_file))
+ {
+ camera_file_loaded_ = true;
+ }
+ else
+ {
+ style_->setCameraFile (camera_file);
+ }
+ }
+ }
+}
+
/////////////////////////////////////////////////////////////////////////////////////////////
pcl::visualization::PCLVisualizer::~PCLVisualizer ()
{
resetStoppedFlag ();
// Render the window before we start the interactor
win_->Render ();
- interactor_->Start ();
+ if (interactor_)
+ interactor_->Start ();
}
/////////////////////////////////////////////////////////////////////////////////////////////
}
#endif
+ if (!interactor_)
+ return;
+
if (time <= 0)
time = 1;
}
}
-/////////////////////////////////////////////////////////////////////////////////////////////
-void
-pcl::visualization::PCLVisualizer::addCoordinateSystem (double scale, int viewport)
-{
- addCoordinateSystem (scale, "reference", viewport);
-}
-
-void
-pcl::visualization::PCLVisualizer::addCoordinateSystem (double scale, float x, float y, float z, int viewport)
-{
- addCoordinateSystem (scale, x, y, z, "reference", viewport);
-}
-
-void
-pcl::visualization::PCLVisualizer::addCoordinateSystem (double scale, const Eigen::Affine3f& t, int viewport)
-{
- addCoordinateSystem (scale, t, "reference", viewport);
-}
-
-bool
-pcl::visualization::PCLVisualizer::removeCoordinateSystem (int viewport)
-{
- return (removeCoordinateSystem ("reference", viewport));
-}
-
/////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::PCLVisualizer::addCoordinateSystem (double scale, const std::string &id, int viewport)
bool
pcl::visualization::PCLVisualizer::removeText3D (const std::string &id, int viewport)
{
- // Check to see if the given ID entry exists
- ShapeActorMap::iterator am_it = shape_actor_map_->find (id);
+ if (viewport < 0)
+ return false;
- if (am_it == shape_actor_map_->end ())
+ bool success = true;
+
+ // If there is no custom viewport and the viewport number is not 0, exit
+ if (rens_->GetNumberOfItems () <= viewport)
{
- //pcl::console::print_warn (stderr, "[removeSape] Could not find any shape with id <%s>!\n", id.c_str ());
- return (false);
+ PCL_ERROR ("[removeText3D] The viewport [%d] doesn't exist (id <%s>)! ",
+ viewport,
+ id.c_str ());
+ return false;
}
- // Remove it from all renderers
- if (removeActorFromRenderer (am_it->second, viewport))
+ // check all or an individual viewport for a similar id
+ rens_->InitTraversal ();
+ for (size_t i = viewport; rens_->GetNextItem () != NULL; ++i)
{
- // Remove the pointer/ID pair to the global actor map
- shape_actor_map_->erase (am_it);
- return (true);
+ const std::string uid = id + std::string (i, '*');
+ ShapeActorMap::iterator am_it = shape_actor_map_->find (uid);
+
+ // was it found
+ if (am_it == shape_actor_map_->end ())
+ {
+ if (viewport > 0)
+ return (false);
+
+ continue;
+ }
+
+ // Remove it from all renderers
+ if (removeActorFromRenderer (am_it->second, i))
+ {
+ // Remove the pointer/ID pair to the global actor map
+ shape_actor_map_->erase (am_it);
+ if (viewport > 0)
+ return (true);
+
+ success &= true;
+ }
+ else
+ success = false;
}
- return (false);
+
+ return success;
}
/////////////////////////////////////////////////////////////////////////////////////////////
mapper->ScalarVisibilityOn ();
}
}
+#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
mapper->ImmediateModeRenderingOff ();
+#endif
actor->SetNumberOfCloudPoints (int (std::max<vtkIdType> (1, data->GetNumberOfPoints () / 10)));
actor->GetProperty ()->SetInterpolationToFlat ();
mapper->ScalarVisibilityOn ();
}
}
+#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
mapper->ImmediateModeRenderingOff ();
+#endif
//actor->SetNumberOfCloudPoints (int (std::max<vtkIdType> (1, data->GetNumberOfPoints () / 10)));
actor->GetProperty ()->SetInterpolationToFlat ();
}
return (true);
}
+/////////////////////////////////////////////////////////////////////////////////////////////
+bool
+pcl::visualization::PCLVisualizer::getPointCloudRenderingProperties (RenderingProperties property,
+ double &val1,
+ double &val2,
+ double &val3,
+ const std::string &id)
+{
+ // Check to see if this ID entry already exists (has it been already added to the visualizer?)
+ CloudActorMap::iterator am_it = cloud_actor_map_->find (id);
+
+ if (am_it == cloud_actor_map_->end ())
+ return (false);
+ // Get the actor pointer
+ vtkLODActor* actor = vtkLODActor::SafeDownCast (am_it->second.actor);
+ if (!actor)
+ return (false);
+
+ switch (property)
+ {
+ case PCL_VISUALIZER_COLOR:
+ {
+ double rgb[3];
+ actor->GetProperty ()->GetColor (rgb);
+ val1 = rgb[0];
+ val2 = rgb[1];
+ val3 = rgb[2];
+ break;
+ }
+ default:
+ {
+ pcl::console::print_error ("[getPointCloudRenderingProperties] "
+ "Property (%d) is either unknown or it requires a different "
+ "number of variables to retrieve its contents.\n",
+ property);
+ return (false);
+ }
+ }
+ return (true);
+}
/////////////////////////////////////////////////////////////////////////////////////////////
bool
// using immediate more rendering.
case PCL_VISUALIZER_IMMEDIATE_RENDERING:
{
+#if VTK_RENDERING_BACKEND_OPENGL_VERSION < 2
actor->GetMapper ()->SetImmediateModeRendering (int (value));
+#endif
actor->Modified ();
break;
}
if (selected)
{
actor->GetProperty ()->EdgeVisibilityOn ();
- actor->GetProperty ()->SetEdgeColor (1.0,0.0,0.0);
+ actor->GetProperty ()->SetEdgeColor (1.0, 0.0, 0.0);
actor->Modified ();
}
else
return (true);
}
+/////////////////////////////////////////////////////////////////////////////////////////////
+bool
+pcl::visualization::PCLVisualizer::setShapeRenderingProperties (
+ int property, double val1, double val2, const std::string &id, int)
+{
+ // Check to see if this ID entry already exists (has it been already added to the visualizer?)
+ ShapeActorMap::iterator am_it = shape_actor_map_->find (id);
+
+ if (am_it == shape_actor_map_->end ())
+ {
+ pcl::console::print_error ("[setShapeRenderingProperties] Could not find any shape with id <%s>!\n", id.c_str ());
+ return (false);
+ }
+ // Get the actor pointer
+ vtkActor* actor = vtkActor::SafeDownCast (am_it->second);
+ if (!actor)
+ return (false);
+
+ switch (property)
+ {
+ case PCL_VISUALIZER_LUT_RANGE:
+ {
+ // Check if the mapper has scalars
+ if (!actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ())
+ break;
+
+ // Check that scalars are not unisgned char (i.e. check if a LUT is used to colormap scalars assuming vtk ColorMode is Default)
+ if (actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ()->IsA ("vtkUnsignedCharArray"))
+ break;
+
+ // Check that range values are correct
+ if (val1 >= val2)
+ {
+ PCL_WARN ("[setShapeRenderingProperties] Range max must be greater than range min!\n");
+ return (false);
+ }
+
+ // Update LUT
+ actor->GetMapper ()->GetLookupTable ()->SetRange (val1, val2);
+ actor->GetMapper()->UseLookupTableScalarRangeOn ();
+ style_->updateLookUpTableDisplay (false);
+ break;
+ }
+ default:
+ {
+ pcl::console::print_error ("[setShapeRenderingProperties] Unknown property (%d) specified!\n", property);
+ return (false);
+ }
+ }
+ return (true);
+}
+
/////////////////////////////////////////////////////////////////////////////////////////////
bool
pcl::visualization::PCLVisualizer::setShapeRenderingProperties (
if (!actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ())
break;
- double range[2];
- actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ()->GetRange (range);
+ // Check that scalars are not unisgned char (i.e. check if a LUT is used to colormap scalars assuming vtk ColorMode is Default)
+ if (actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ()->IsA ("vtkUnsignedCharArray"))
+ break;
+
+ // Get range limits from existing LUT
+ double *range;
+ range = actor->GetMapper ()->GetLookupTable ()->GetRange ();
+
actor->GetMapper ()->ScalarVisibilityOn ();
actor->GetMapper ()->SetScalarRange (range[0], range[1]);
- vtkSmartPointer<vtkLookupTable> table = vtkSmartPointer<vtkLookupTable>::New ();
- getColormapLUT (static_cast<LookUpTableRepresentationProperties>(static_cast<int>(value)), table);
+ vtkSmartPointer<vtkLookupTable> table;
+ if (!pcl::visualization::getColormapLUT (static_cast<LookUpTableRepresentationProperties>(static_cast<int>(value)), table))
+ break;
table->SetRange (range[0], range[1]);
actor->GetMapper ()->SetLookupTable (table);
style_->updateLookUpTableDisplay (false);
- break;
}
+ case PCL_VISUALIZER_LUT_RANGE:
+ {
+ // Check if the mapper has scalars
+ if (!actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ())
+ break;
+
+ // Check that scalars are not unisgned char (i.e. check if a LUT is used to colormap scalars assuming vtk ColorMode is Default)
+ if (actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ()->IsA ("vtkUnsignedCharArray"))
+ break;
+
+ switch (int(value))
+ {
+ case PCL_VISUALIZER_LUT_RANGE_AUTO:
+ double range[2];
+ actor->GetMapper ()->GetInput ()->GetPointData ()->GetScalars ()->GetRange (range);
+ actor->GetMapper ()->GetLookupTable ()->SetRange (range[0], range[1]);
+ actor->GetMapper ()->UseLookupTableScalarRangeOn ();
+ style_->updateLookUpTableDisplay (false);
+ break;
+ }
+ break;
+ }
default:
{
pcl::console::print_error ("[setShapeRenderingProperties] Unknown property (%d) specified!\n", property);
vtkRenderer* renderer = NULL;
while ((renderer = rens_->GetNextItem ()) != NULL)
{
- cameras.push_back(Camera());
+ cameras.push_back (Camera ());
cameras.back ().pos[0] = renderer->GetActiveCamera ()->GetPosition ()[0];
cameras.back ().pos[1] = renderer->GetActiveCamera ()->GetPosition ()[1];
cameras.back ().pos[2] = renderer->GetActiveCamera ()->GetPosition ()[2];
vtkSmartPointer<vtkLODActor> actor;
createActorFromVTKDataSet (data, actor);
actor->GetProperty ()->SetRepresentationToSurface ();
- actor->GetProperty ()->SetColor (r,g,b);
+ actor->GetProperty ()->SetColor (r, g, b);
addActorToRenderer (actor, viewport);
// Save the pointer/ID pair to the global actor map
#else
trans_filter->SetInputData (polydata);
#endif
- trans_filter->Update();
+ trans_filter->Update ();
// Create an Actor
vtkSmartPointer <vtkLODActor> actor;
return (false);
}
- int color_handler_size = int (am_it->second.color_handlers.size ());
- if (index >= color_handler_size)
+ size_t color_handler_size = am_it->second.color_handlers.size ();
+ if (!(size_t (index) < color_handler_size))
{
- pcl::console::print_warn (stderr, "[updateColorHandlerIndex] Invalid index <%d> given! Maximum range is: 0-%lu.\n", index, static_cast<unsigned long> (am_it->second.color_handlers.size ()));
+ pcl::console::print_warn (stderr, "[updateColorHandlerIndex] Invalid index <%d> given! Index must be less than %d.\n", index, int (color_handler_size));
return (false);
}
// Get the handler
pcl::fromPCLPointCloud2 (poly_mesh.cloud, *point_cloud);
poly_points->SetNumberOfPoints (point_cloud->points.size ());
- size_t i;
- for (i = 0; i < point_cloud->points.size (); ++i)
- poly_points->InsertPoint (i, point_cloud->points[i].x, point_cloud->points[i].y, point_cloud->points[i].z);
+ for (size_t i = 0; i < point_cloud->points.size (); ++i)
+ {
+ const pcl::PointXYZ& p = point_cloud->points[i];
+ poly_points->InsertPoint (i, p.x, p.y, p.z);
+ }
bool has_color = false;
vtkSmartPointer<vtkUnsignedCharArray> colors = vtkSmartPointer<vtkUnsignedCharArray>::New ();
colors->SetNumberOfComponents (3);
colors->SetName ("Colors");
pcl::PointCloud<pcl::PointXYZRGB> cloud;
- pcl::fromPCLPointCloud2(poly_mesh.cloud, cloud);
- for (i = 0; i < cloud.points.size (); ++i)
+ pcl::fromPCLPointCloud2 (poly_mesh.cloud, cloud);
+ for (size_t i = 0; i < cloud.points.size (); ++i)
{
- const unsigned char color[3] = {cloud.points[i].r, cloud.points[i].g, cloud.points[i].b};
- colors->InsertNextTupleValue(color);
+ const unsigned char color[3] = { cloud.points[i].r, cloud.points[i].g, cloud.points[i].b };
+ colors->InsertNextTupleValue (color);
}
}
- if (pcl::getFieldIndex(poly_mesh.cloud, "rgba") != -1)
+ if (pcl::getFieldIndex (poly_mesh.cloud, "rgba") != -1)
{
has_color = true;
colors->SetNumberOfComponents (3);
colors->SetName ("Colors");
pcl::PointCloud<pcl::PointXYZRGBA> cloud;
- pcl::fromPCLPointCloud2(poly_mesh.cloud, cloud);
- for (i = 0; i < cloud.points.size (); ++i)
+ pcl::fromPCLPointCloud2 (poly_mesh.cloud, cloud);
+ for (size_t i = 0; i < cloud.points.size (); ++i)
{
- const unsigned char color[3] = {cloud.points[i].r, cloud.points[i].g, cloud.points[i].b};
- colors->InsertNextTupleValue(color);
+ const unsigned char color[3] = { cloud.points[i].r, cloud.points[i].g, cloud.points[i].b };
+ colors->InsertNextTupleValue (color);
}
}
vtkSmartPointer<vtkLODActor> actor;
- if (poly_mesh.polygons.size() > 1)
+ if (poly_mesh.polygons.size () > 1)
{
//create polys from polyMesh.polygons
vtkSmartPointer<vtkCellArray> cell_array = vtkSmartPointer<vtkCellArray>::New ();
- for (i = 0; i < poly_mesh.polygons.size (); i++)
+ for (size_t i = 0; i < poly_mesh.polygons.size (); i++)
{
size_t n_points (poly_mesh.polygons[i].vertices.size ());
cell_array->InsertNextCell (int (n_points));
cell_array->InsertCellPoint (poly_mesh.polygons[i].vertices[j]);
}
- vtkSmartPointer<vtkPolyData> polydata = vtkSmartPointer<vtkPolyData>::New();
-// polydata->SetStrips (cell_array);
+ vtkSmartPointer<vtkPolyData> polydata = vtkSmartPointer<vtkPolyData>::New ();
+ // polydata->SetStrips (cell_array);
polydata->SetPolys (cell_array);
polydata->SetPoints (poly_points);
if (has_color)
- polydata->GetPointData()->SetScalars(colors);
+ polydata->GetPointData ()->SetScalars (colors);
createActorFromVTKDataSet (polydata, actor);
}
- else if (poly_mesh.polygons.size() == 1)
+ else if (poly_mesh.polygons.size () == 1)
{
vtkSmartPointer<vtkPolygon> polygon = vtkSmartPointer<vtkPolygon>::New ();
size_t n_points = poly_mesh.polygons[0].vertices.size ();
polygon->GetPointIds ()->SetNumberOfIds (n_points - 1);
- for (size_t j=0; j < (n_points - 1); j++)
+ for (size_t j = 0; j < (n_points - 1); j++)
polygon->GetPointIds ()->SetId (j, poly_mesh.polygons[0].vertices[j]);
vtkSmartPointer<vtkUnstructuredGrid> poly_grid = vtkSmartPointer<vtkUnstructuredGrid>::New ();
}
else
{
- PCL_ERROR("PCLVisualizer::addPolygonMesh: No polygons\n");
+ PCL_ERROR ("PCLVisualizer::addPolygonMesh: No polygons\n");
return false;
}
const std::string &id)
{
- if (poly_mesh.polygons.empty())
+ if (poly_mesh.polygons.empty ())
{
pcl::console::print_error ("[updatePolygonMesh] No vertices given!\n");
return (false);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ> ());
pcl::fromPCLPointCloud2 (poly_mesh.cloud, *cloud);
- std::vector<pcl::Vertices> verts(poly_mesh.polygons); // copy vector
+ std::vector<pcl::Vertices> verts (poly_mesh.polygons); // copy vector
// Get the current poly data
vtkSmartPointer<vtkPolyData> polydata = static_cast<vtkPolyDataMapper*>(am_it->second.actor->GetMapper ())->GetInput ();
vtkSmartPointer<vtkCellArray> cells = polydata->GetStrips ();
if (!cells)
return (false);
- vtkSmartPointer<vtkPoints> points = polydata->GetPoints ();
+ vtkSmartPointer<vtkPoints> points = polydata->GetPoints ();
// Copy the new point array in
vtkIdType nr_points = cloud->points.size ();
points->SetNumberOfPoints (nr_points);
if (cloud->is_dense)
{
for (vtkIdType i = 0; i < nr_points; ++i, ptr += 3)
- memcpy (&data[ptr], &cloud->points[i].x, sizeof (float) * 3);
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
}
else
{
if (!isFinite (cloud->points[i]))
continue;
- lookup [i] = static_cast<int> (j);
- memcpy (&data[ptr], &cloud->points[i].x, sizeof (float) * 3);
+ lookup[i] = static_cast<int> (j);
+ std::copy (&cloud->points[i].x, &cloud->points[i].x + 3, &data[ptr]);
j++;
ptr += 3;
}
polyLine->GetPointIds()->SetNumberOfIds(polymesh.polygons[i].vertices.size());
for(unsigned int k = 0; k < polymesh.polygons[i].vertices.size(); k++)
{
- polyLine->GetPointIds()->SetId(k,polymesh.polygons[i].vertices[k]);
+ polyLine->GetPointIds ()->SetId (k, polymesh.polygons[i].vertices[k]);
}
cells->InsertNextCell (polyLine);
// total number of vertices
std::size_t nb_vertices = 0;
for (std::size_t i = 0; i < mesh.tex_polygons.size (); ++i)
- nb_vertices+= mesh.tex_polygons[i].size ();
+ nb_vertices += mesh.tex_polygons[i].size ();
// no vertices --> exit
if (nb_vertices == 0)
{
- PCL_ERROR("[PCLVisualizer::addTextureMesh] No vertices found!\n");
+ PCL_ERROR ("[PCLVisualizer::addTextureMesh] No vertices found!\n");
return (false);
}
// total number of coordinates
std::size_t nb_coordinates = 0;
for (std::size_t i = 0; i < mesh.tex_coordinates.size (); ++i)
- nb_coordinates+= mesh.tex_coordinates[i].size ();
+ nb_coordinates += mesh.tex_coordinates[i].size ();
// no texture coordinates --> exit
if (nb_coordinates == 0)
{
- PCL_ERROR("[PCLVisualizer::addTextureMesh] No textures coordinates found!\n");
+ PCL_ERROR ("[PCLVisualizer::addTextureMesh] No textures coordinates found!\n");
return (false);
}
(pcl::getFieldIndex(mesh.cloud, "rgb") != -1))
{
pcl::PointCloud<pcl::PointXYZRGB> cloud;
- pcl::fromPCLPointCloud2(mesh.cloud, cloud);
+ pcl::fromPCLPointCloud2 (mesh.cloud, cloud);
if (cloud.points.size () == 0)
{
- PCL_ERROR("[PCLVisualizer::addTextureMesh] Cloud is empty!\n");
+ PCL_ERROR ("[PCLVisualizer::addTextureMesh] Cloud is empty!\n");
return (false);
}
convertToVtkMatrix (cloud.sensor_origin_, cloud.sensor_orientation_, transformation);
{
const pcl::PointXYZRGB &p = cloud.points[i];
poly_points->InsertPoint (i, p.x, p.y, p.z);
- const unsigned char color[3] = {p.r, p.g, p.b};
- colors->InsertNextTupleValue(color);
+ const unsigned char color[3] = { p.r, p.g, p.b };
+ colors->InsertNextTupleValue (color);
}
}
else
// no points --> exit
if (cloud->points.size () == 0)
{
- PCL_ERROR("[PCLVisualizer::addTextureMesh] Cloud is empty!\n");
+ PCL_ERROR ("[PCLVisualizer::addTextureMesh] Cloud is empty!\n");
return (false);
}
convertToVtkMatrix (cloud->sensor_origin_, cloud->sensor_orientation_, transformation);
}
}
- vtkSmartPointer<vtkPolyData> polydata = vtkSmartPointer<vtkPolyData>::New();
+ vtkSmartPointer<vtkPolyData> polydata = vtkSmartPointer<vtkPolyData>::New ();
polydata->SetPolys (polys);
polydata->SetPoints (poly_points);
if (has_color)
- polydata->GetPointData()->SetScalars(colors);
+ polydata->GetPointData ()->SetScalars (colors);
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New ();
#if VTK_MAJOR_VERSION < 6
vtkTextureUnitManager* tex_manager = vtkOpenGLRenderWindow::SafeDownCast (win_)->GetTextureUnitManager ();
if (!tex_manager)
return (false);
- // Check if hardware support multi texture
+ // hardware always supports multitexturing of some degree
int texture_units = tex_manager->GetNumberOfTextureUnits ();
- if ((mesh.tex_materials.size () > 1) && (texture_units > 1))
- {
- if (texture_units < mesh.tex_materials.size ())
- PCL_WARN ("[PCLVisualizer::addTextureMesh] GPU texture units %d < mesh textures %d!\n",
- texture_units, mesh.tex_materials.size ());
- // Load textures
- std::size_t last_tex_id = std::min (static_cast<int> (mesh.tex_materials.size ()), texture_units);
- int tu = vtkProperty::VTK_TEXTURE_UNIT_0;
- std::size_t tex_id = 0;
- while (tex_id < last_tex_id)
- {
- vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New ();
- if (textureFromTexMaterial (mesh.tex_materials[tex_id], texture))
- {
- PCL_WARN ("[PCLVisualizer::addTextureMesh] Failed to load texture %s, skipping!\n",
- mesh.tex_materials[tex_id].tex_name.c_str ());
- continue;
- }
- // the first texture is in REPLACE mode others are in ADD mode
- if (tex_id == 0)
- texture->SetBlendingMode(vtkTexture::VTK_TEXTURE_BLENDING_MODE_REPLACE);
- else
- texture->SetBlendingMode(vtkTexture::VTK_TEXTURE_BLENDING_MODE_ADD);
- // add a texture coordinates array per texture
- vtkSmartPointer<vtkFloatArray> coordinates = vtkSmartPointer<vtkFloatArray>::New ();
- coordinates->SetNumberOfComponents (2);
- std::stringstream ss; ss << "TCoords" << tex_id;
- std::string this_coordinates_name = ss.str ();
- coordinates->SetName (this_coordinates_name.c_str ());
-
- for (std::size_t t = 0 ; t < mesh.tex_coordinates.size (); ++t)
- if (t == tex_id)
- for (std::size_t tc = 0; tc < mesh.tex_coordinates[t].size (); ++tc)
- coordinates->InsertNextTuple2 (mesh.tex_coordinates[t][tc][0],
- mesh.tex_coordinates[t][tc][1]);
- else
- for (std::size_t tc = 0; tc < mesh.tex_coordinates[t].size (); ++tc)
- coordinates->InsertNextTuple2 (-1.0, -1.0);
-
- mapper->MapDataArrayToMultiTextureAttribute(tu,
- this_coordinates_name.c_str (),
- vtkDataObject::FIELD_ASSOCIATION_POINTS);
- polydata->GetPointData ()->AddArray (coordinates);
- actor->GetProperty ()->SetTexture(tu, texture);
- ++tex_id;
- ++tu;
- }
- } // end of multi texturing
- else
- {
- if ((mesh.tex_materials.size () > 1) && (texture_units < 2))
- PCL_WARN ("[PCLVisualizer::addTextureMesh] Your GPU doesn't support multi texturing. "
- "Will use first one only!\n");
+ if ((size_t) texture_units < mesh.tex_materials.size ())
+ PCL_WARN ("[PCLVisualizer::addTextureMesh] GPU texture units %d < mesh textures %d!\n",
+ texture_units, mesh.tex_materials.size ());
+ // Load textures
+ std::size_t last_tex_id = std::min (static_cast<int> (mesh.tex_materials.size ()), texture_units);
+ std::size_t tex_id = 0;
+ while (tex_id < last_tex_id)
+ {
+#if VTK_MAJOR_VERSION < 9
+ int tu = vtkProperty::VTK_TEXTURE_UNIT_0 + tex_id;
+#else
+ const char *tu = mesh.tex_materials[tex_id].tex_name.c_str ();
+#endif
vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New ();
- // fill vtkTexture from pcl::TexMaterial structure
- if (textureFromTexMaterial (mesh.tex_materials[0], texture))
- PCL_WARN ("[PCLVisualizer::addTextureMesh] Failed to create vtkTexture from %s!\n",
- mesh.tex_materials[0].tex_name.c_str ());
-
- // set texture coordinates
- vtkSmartPointer<vtkFloatArray> coordinates = vtkSmartPointer<vtkFloatArray>::New ();
- coordinates->SetNumberOfComponents (2);
- coordinates->SetNumberOfTuples (mesh.tex_coordinates[0].size ());
- for (std::size_t tc = 0; tc < mesh.tex_coordinates[0].size (); ++tc)
+ if (textureFromTexMaterial (mesh.tex_materials[tex_id], texture))
{
- const Eigen::Vector2f &uv = mesh.tex_coordinates[0][tc];
- coordinates->SetTuple2 (tc, uv[0], uv[1]);
+ PCL_WARN ("[PCLVisualizer::addTextureMesh] Failed to load texture %s, skipping!\n",
+ mesh.tex_materials[tex_id].tex_name.c_str ());
+ continue;
}
- coordinates->SetName ("TCoords");
- polydata->GetPointData ()->SetTCoords(coordinates);
- // apply texture
- actor->SetTexture (texture);
- } // end of one texture
+ // the first texture is in REPLACE mode others are in ADD mode
+ if (tex_id == 0)
+ texture->SetBlendingMode(vtkTexture::VTK_TEXTURE_BLENDING_MODE_REPLACE);
+ else
+ texture->SetBlendingMode(vtkTexture::VTK_TEXTURE_BLENDING_MODE_ADD);
+ // add a texture coordinates array per texture
+ vtkSmartPointer<vtkFloatArray> coordinates = vtkSmartPointer<vtkFloatArray>::New ();
+ coordinates->SetNumberOfComponents (2);
+ std::stringstream ss; ss << "TCoords" << tex_id;
+ std::string this_coordinates_name = ss.str ();
+ coordinates->SetName (this_coordinates_name.c_str ());
+
+ for (std::size_t t = 0; t < mesh.tex_coordinates.size (); ++t)
+ if (t == tex_id)
+ for (std::size_t tc = 0; tc < mesh.tex_coordinates[t].size (); ++tc)
+ coordinates->InsertNextTuple2 (mesh.tex_coordinates[t][tc][0],
+ mesh.tex_coordinates[t][tc][1]);
+ else
+ for (std::size_t tc = 0; tc < mesh.tex_coordinates[t].size (); ++tc)
+ coordinates->InsertNextTuple2 (-1.0, -1.0);
+
+ mapper->MapDataArrayToMultiTextureAttribute(tu,
+ this_coordinates_name.c_str (),
+ vtkDataObject::FIELD_ASSOCIATION_POINTS);
+ polydata->GetPointData ()->AddArray (coordinates);
+ actor->GetProperty ()->SetTexture (tu, texture);
+ ++tex_id;
+ }
// set mapper
actor->SetMapper (mapper);
while ((actor = actors->GetNextActor ()) != NULL)
{
actor->GetProperty ()->SetRepresentationToSurface ();
- actor->GetProperty ()->SetLighting(true);
+ actor->GetProperty ()->SetLighting (true);
}
}
}
while ((actor = actors->GetNextActor ()) != NULL)
{
actor->GetProperty ()->SetRepresentationToWireframe ();
- actor->GetProperty ()->SetLighting(false);
+ actor->GetProperty ()->SetLighting (false);
}
}
}
}
+///////////////////////////////////////////////////////////////////////////////////
+float
+pcl::visualization::PCLVisualizer::getFPS () const
+{
+ return (update_fps_->last_fps);
+}
+
+
///////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::PCLVisualizer::renderViewTesselatedSphere (
ico->SetSolidTypeToIcosahedron ();
ico->Update ();
- //tesselate cells from icosahedron
+ //tessellate cells from icosahedron
vtkSmartPointer<vtkLoopSubdivisionFilter> subdivide = vtkSmartPointer<vtkLoopSubdivisionFilter>::New ();
subdivide->SetNumberOfSubdivisions (tesselation_level);
subdivide->SetInputConnection (ico->GetOutputPort ());
vtkSmartPointer<vtkCellArray> cells_sphere = sphere->GetPolys ();
cam_positions.resize (sphere->GetNumberOfPolys ());
- size_t i=0;
+ size_t i = 0;
for (cells_sphere->InitTraversal (); cells_sphere->GetNextCell (npts_com, ptIds_com);)
{
sphere->GetPoint (ptIds_com[0], p1_com);
cam->SetViewAngle (view_angle);
cam->Modified ();
- //For each camera position, traposesnsform the object and render view
+ //For each camera position, transform the object and render view
for (size_t i = 0; i < cam_positions.size (); i++)
{
cam_pos[0] = cam_positions[i][0];
#else
//THIS CAN BE USED WHEN VTK >= 5.4 IS REQUIRED... vtkVisibleCellSelector is deprecated from VTK5.4
vtkSmartPointer<vtkHardwareSelector> hardware_selector = vtkSmartPointer<vtkHardwareSelector>::New ();
- hardware_selector->ClearBuffers();
- vtkSmartPointer<vtkSelection> hdw_selection = vtkSmartPointer<vtkSelection>::New ();
- hardware_selector->SetRenderer (renderer);
- hardware_selector->SetArea (0, 0, xres - 1, yres - 1);
- hardware_selector->SetFieldAssociation(vtkDataObject::FIELD_ASSOCIATION_CELLS);
- hdw_selection = hardware_selector->Select ();
- if (!hdw_selection || !hdw_selection->GetNode (0) || !hdw_selection->GetNode (0)->GetSelectionList ())
- {
- PCL_WARN ("[renderViewTesselatedSphere] Invalid selection, skipping!\n");
- continue;
- }
-
- vtkSmartPointer<vtkIdTypeArray> ids;
- ids = vtkIdTypeArray::SafeDownCast(hdw_selection->GetNode(0)->GetSelectionList());
- if (!ids)
- return;
- double visible_area = 0;
- for (int sel_id = 0; sel_id < (ids->GetNumberOfTuples ()); sel_id++)
- {
- int id_mesh = static_cast<int> (ids->GetValue (sel_id));
- vtkCell * cell = polydata->GetCell (id_mesh);
- vtkTriangle* triangle = dynamic_cast<vtkTriangle*> (cell);
- if (!triangle)
- {
- PCL_WARN ("[renderViewTesselatedSphere] Invalid triangle %d, skipping!\n", id_mesh);
- continue;
- }
-
- double p0[3];
- double p1[3];
- double p2[3];
- triangle->GetPoints ()->GetPoint (0, p0);
- triangle->GetPoints ()->GetPoint (1, p1);
- triangle->GetPoints ()->GetPoint (2, p2);
- area = vtkTriangle::TriangleArea (p0, p1, p2);
- visible_area += area;
- }
+ hardware_selector->ClearBuffers ();
+ vtkSmartPointer<vtkSelection> hdw_selection = vtkSmartPointer<vtkSelection>::New ();
+ hardware_selector->SetRenderer (renderer);
+ hardware_selector->SetArea (0, 0, xres - 1, yres - 1);
+ hardware_selector->SetFieldAssociation (vtkDataObject::FIELD_ASSOCIATION_CELLS);
+ hdw_selection = hardware_selector->Select ();
+ if (!hdw_selection || !hdw_selection->GetNode (0) || !hdw_selection->GetNode (0)->GetSelectionList ())
+ {
+ PCL_WARN ("[renderViewTesselatedSphere] Invalid selection, skipping!\n");
+ continue;
+ }
+
+ vtkSmartPointer<vtkIdTypeArray> ids;
+ ids = vtkIdTypeArray::SafeDownCast (hdw_selection->GetNode (0)->GetSelectionList ());
+ if (!ids)
+ return;
+ double visible_area = 0;
+ for (int sel_id = 0; sel_id < (ids->GetNumberOfTuples ()); sel_id++)
+ {
+ int id_mesh = static_cast<int> (ids->GetValue (sel_id));
+ vtkCell * cell = polydata->GetCell (id_mesh);
+ vtkTriangle* triangle = dynamic_cast<vtkTriangle*> (cell);
+ if (!triangle)
+ {
+ PCL_WARN ("[renderViewTesselatedSphere] Invalid triangle %d, skipping!\n", id_mesh);
+ continue;
+ }
+
+ double p0[3];
+ double p1[3];
+ double p2[3];
+ triangle->GetPoints ()->GetPoint (0, p0);
+ triangle->GetPoints ()->GetPoint (1, p1);
+ triangle->GetPoints ()->GetPoint (2, p2);
+ area = vtkTriangle::TriangleArea (p0, p1, p2);
+ visible_area += area;
+ }
#endif
enthropies.push_back (static_cast<float> (visible_area / totalArea));
{
if (rens_->GetNumberOfItems () > 1)
{
- PCL_WARN("[renderView] Method will render only the first viewport\n");
+ PCL_WARN ("[renderView] Method will render only the first viewport\n");
return;
}
for (vtkIdType i = 0; i < nr_points; ++i, cell += 2)
{
*cell = 1;
- *(cell+1) = i;
+ *(cell + 1) = i;
}
// Save the results in initcells
Eigen::Matrix4f &transformation)
{
transformation.setIdentity ();
- transformation.block<3,3>(0,0) = orientation.toRotationMatrix ();
- transformation.block<3,1>(0,3) = origin.head (3);
+ transformation.block<3, 3> (0, 0) = orientation.toRotationMatrix ();
+ transformation.block<3, 1> (0, 3) = origin.head (3);
}
//////////////////////////////////////////////////////////////////////////////////////////////
{
for (int i = 0; i < 4; i++)
for (int k = 0; k < 4; k++)
- m (i,k) = static_cast<float> (vtk_matrix->GetElement (i, k));
+ m (i, k) = static_cast<float> (vtk_matrix->GetElement (i, k));
}
//////////////////////////////////////////////////////////////////////////////////////////////
if (win_)
win_->SetBorders (mode);
}
-
+
//////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::PCLVisualizer::setPosition (int x, int y)
win_->Render ();
}
}
-
+
//////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::PCLVisualizer::setSize (int xw, int yw)
pcl::visualization::PCLVisualizer::close ()
{
#if ((VTK_MAJOR_VERSION == 5) && (VTK_MINOR_VERSION <= 4))
- interactor_->stopped = true;
- // This tends to close the window...
- interactor_->stopLoop ();
+ if (interactor_)
+ {
+ interactor_->stopped = true;
+ // This tends to close the window...
+ interactor_->stopLoop ();
+ }
#else
stopped_ = true;
// This tends to close the window...
win_->Finalize ();
- interactor_->TerminateApp ();
+ if (interactor_)
+ interactor_->TerminateApp ();
#endif
}
bool
pcl::visualization::PCLVisualizer::wasStopped () const
{
- if (interactor_ != NULL)
+ if (interactor_ != NULL)
#if ((VTK_MAJOR_VERSION == 5) && (VTK_MINOR_VERSION <= 4))
return (interactor_->stopped);
#else
- return (stopped_);
+ return (stopped_);
#endif
- else
+ else
return (true);
}
void
pcl::visualization::PCLVisualizer::resetStoppedFlag ()
{
- if (interactor_ != NULL)
+ if (interactor_ != NULL)
#if ((VTK_MAJOR_VERSION == 5) && (VTK_MINOR_VERSION <= 4))
interactor_->stopped = false;
#else
- stopped_ = false;
+ stopped_ = false;
#endif
}
{
if (event_id != vtkCommand::TimerEvent)
return;
- int timer_id = * static_cast<int*> (call_data);
+ int timer_id = *static_cast<int*> (call_data);
//PCL_WARN ("[pcl::visualization::PCLVisualizer::ExitMainLoopTimerCallback] Timer %d called.\n", timer_id);
if (timer_id != right_timer_id)
return;
pcl_visualizer->interactor_->TerminateApp ();
#endif
}
-
+
//////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::visualization::PCLVisualizer::ExitCallback::Execute (
if (event_id != vtkCommand::ExitEvent)
return;
#if ((VTK_MAJOR_VERSION == 5) && (VTK_MINOR_VERSION <= 4))
- pcl_visualizer->interactor_->stopped = true;
- // This tends to close the window...
- pcl_visualizer->interactor_->stopLoop ();
+ if (pcl_visualizer->interactor_)
+ {
+ pcl_visualizer->interactor_->stopped = true;
+ // This tends to close the window...
+ pcl_visualizer->interactor_->stopLoop ();
+ }
#else
pcl_visualizer->stopped_ = true;
// This tends to close the window...
- pcl_visualizer->interactor_->TerminateApp ();
+ if (pcl_visualizer->interactor_)
+ pcl_visualizer->interactor_->TerminateApp ();
#endif
}
vtkObject* caller, unsigned long, void*)
{
vtkRenderer *ren = reinterpret_cast<vtkRenderer *> (caller);
- float fps = 1.0f / static_cast<float> (ren->GetLastRenderTimeInSeconds ());
+ last_fps = 1.0f / static_cast<float> (ren->GetLastRenderTimeInSeconds ());
char buf[128];
- sprintf (buf, "%.1f FPS", fps);
+ sprintf (buf, "%.1f FPS", last_fps);
actor->SetInput (buf);
}
#target_link_libraries(test_geometry pcl_common pcl_features pcl_filters pcl_io pcl_kdtree pcl_visualization)
#add_test(vis_test_geometry test_geometry)
-#add_executable(demo_shapes test_shapes.cpp)
-#target_link_libraries(demo_shapes pcl_common pcl_io pcl_kdtree pcl_visualization)
+add_executable(demo_shapes test_shapes.cpp)
+target_link_libraries(demo_shapes pcl_common pcl_io pcl_kdtree pcl_visualization)
+add_executable(demo_shapes_multiport test_shapes_multiport.cpp)
+target_link_libraries(demo_shapes_multiport pcl_common pcl_io pcl_kdtree pcl_visualization)
+
+add_executable(demo_text_simple text_simple.cpp)
+target_link_libraries(demo_text_simple pcl_common pcl_visualization)
+
+add_executable(demo_text_simple_multiport text_simple_multiport.cpp)
+target_link_libraries(demo_text_simple_multiport pcl_common pcl_visualization)
--- /dev/null
+#include <pcl/visualization/pcl_visualizer.h>
+#include <pcl/io/pcd_io.h>
+
+using pcl::PointCloud;
+using pcl::PointXYZ;
+
+int
+main (int , char **)
+{
+ srand (unsigned (time (0)));
+
+ PointCloud<PointXYZ>::Ptr cloud (new PointCloud<PointXYZ>);
+
+ cloud->points.resize (5);
+ for (size_t i = 0; i < cloud->points.size (); ++i)
+ {
+ cloud->points[i].x = float (i);
+ cloud->points[i].y = float (i / 2);
+ cloud->points[i].z = 0.0f;
+ }
+
+ // Start the visualizer
+ pcl::visualization::PCLVisualizer p ("test_shapes");
+ int leftPort(0);
+ int rightPort(0);
+ p.createViewPort(0, 0, 0.5, 1, leftPort);
+ p.createViewPort(0.5, 0, 1, 1, rightPort);
+ p.setBackgroundColor (1, 1, 1);
+ p.addCoordinateSystem (1.0, "first");
+
+ //p.addPolygon (cloud, "polygon");
+ p.addPolygon<PointXYZ> (cloud, 1.0, 0.0, 0.0, "polygon", leftPort);
+ p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 10, "polygon", leftPort);
+
+ p.addLine<PointXYZ, PointXYZ> (cloud->points[0], cloud->points[1], 0.0, 1.0, 0.0, "line", leftPort);
+ p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 50, "line", leftPort);
+
+ p.addSphere<PointXYZ> (cloud->points[0], 1, 0.0, 1.0, 0.0, "sphere", leftPort);
+ p.setShapeRenderingProperties (pcl::visualization::PCL_VISUALIZER_LINE_WIDTH, 5, "sphere", leftPort);
+// p.removePolygon ("poly");
+
+ p.addText ("text", 200, 200, 1.0, 0, 0, "text", leftPort);
+
+ p.addText3D ("text3D", cloud->points[0], 1.0, 1.0, 0.0, 0.0, "", rightPort);
+ p.spin ();
+ p.removeCoordinateSystem ("first", 0);
+ p.spin ();
+ p.addCoordinateSystem (1.0, 5, 3, 1, "second");
+ p.spin ();
+ p.removeCoordinateSystem ("second", 0);
+ p.spin ();
+ p.addText3D ("text3D_to_remove", cloud->points[1], 1.0, 0.0, 1.0, 0.0, "", rightPort);
+ p.spin ();
+ p.removeText3D ("text3D_to_remove", rightPort);
+ p.spin ();
+}
--- /dev/null
+#include <pcl/point_types.h>
+
+#include <pcl/visualization/pcl_visualizer.h>
+
+int
+main (int argc, char** argv)
+{
+ pcl::visualization::PCLVisualizer viz ("Visualizator");
+ viz.addCoordinateSystem (1.0);
+
+ viz.addText3D ("Following text", pcl::PointXYZ(0.0, 0.0, 0.0),
+ 1.0, 1.0, 0.0, 0.0, "id_following");
+ viz.spin ();
+ double orientation[3] = {0., 0., 0.};
+ viz.addText3D ("Fixed text", pcl::PointXYZ(0.0, 0.0, 0.0), orientation,
+ 1.0, 0.0, 1.0, 0.0, "id_fixed");
+ viz.spin ();
+ viz.removeText3D ("id_following");
+ viz.spin ();
+ viz.removeText3D ("id_fixed");
+ viz.spin ();
+
+ return (0);
+}
--- /dev/null
+#include <pcl/point_types.h>
+
+#include <pcl/visualization/pcl_visualizer.h>
+
+int
+main (int argc, char** argv)
+{
+ pcl::visualization::PCLVisualizer viz ("Visualizator");
+ int leftPort (0);
+ int rightPort (0);
+
+ viz.createViewPort (0, 0, 0.5, 1, leftPort);
+ viz.createViewPort (0.5, 0, 1, 1, rightPort);
+
+ viz.addCoordinateSystem (1.0);
+
+ viz.addText3D ("Following text", pcl::PointXYZ(0.0, 0.0, 0.0),
+ 1.0, 1.0, 0.0, 0.0, "id_following", leftPort);
+ viz.spin ();
+ double orientation[3] = {0., 0., 0.};
+ viz.addText3D ("Fixed text", pcl::PointXYZ(0.0, 0.0, 0.0), orientation,
+ 1.0, 0.0, 1.0, 0.0, "id_fixed", rightPort);
+ viz.spin ();
+ viz.removeText3D ("id_following", leftPort);
+ viz.spin ();
+ viz.removeText3D ("id_fixed", rightPort);
+ viz.spin ();
+
+ return (0);
+}
{
{
boost::mutex::scoped_lock io_lock (io_mutex);
- print_info ("Writing remaing %ld clouds in the buffer to disk...\n", buf_.getSize ());
+ print_info ("Writing remaining %ld clouds in the buffer to disk...\n", buf_.getSize ());
}
while (!buf_.isEmpty ())
{
{
int rgb_idx = 0;
int label_idx = 0;
+ int invalid_fields_count = 0;
for (size_t f = 0; f < cloud->fields.size (); ++f)
{
+ if (!isValidFieldName (cloud->fields[f].name))
+ {
+ ++invalid_fields_count;
+ continue;
+ }
if (cloud->fields[f].name == "rgb" || cloud->fields[f].name == "rgba")
{
- rgb_idx = f + 1;
+ rgb_idx = f - invalid_fields_count + 1 /* first is ColorHandlerRandom */;
color_handler.reset (new pcl::visualization::PointCloudColorHandlerRGBField<pcl::PCLPointCloud2> (cloud));
}
else if (cloud->fields[f].name == "label")
{
- label_idx = f + 1;
+ label_idx = f - invalid_fields_count + 1;
color_handler.reset (new pcl::visualization::PointCloudColorHandlerLabelField<pcl::PCLPointCloud2> (cloud, !use_optimal_l_colors));
}
else
{
- if (!isValidFieldName (cloud->fields[f].name))
- continue;
color_handler.reset (new pcl::visualization::PointCloudColorHandlerGenericField<pcl::PCLPointCloud2> (cloud, cloud->fields[f].name));
}
// Add the cloud to the renderer